diff --git a/src/go.mod b/src/go.mod index f7103acb8..751bea103 100644 --- a/src/go.mod +++ b/src/go.mod @@ -2,33 +2,33 @@ module code.cloudfoundry.org/loggregator-agent-release/src go 1.22.0 -toolchain go1.22.9 +toolchain go1.22.10 require ( - code.cloudfoundry.org/go-batching v0.0.0-20241125161418-1c4c43ed1a1e - code.cloudfoundry.org/go-diodes v0.0.0-20241125060457-612558937770 + code.cloudfoundry.org/go-batching v0.0.0-20241223135445-b8f45a03eb70 + code.cloudfoundry.org/go-diodes v0.0.0-20241223074059-7f8c1f03edeb code.cloudfoundry.org/go-envstruct v1.7.0 - code.cloudfoundry.org/go-metric-registry v0.0.0-20241125210533-4270b99b04f2 - code.cloudfoundry.org/tlsconfig v0.10.0 + code.cloudfoundry.org/go-metric-registry v0.0.0-20241216212740-f99815f94a2a + code.cloudfoundry.org/tlsconfig v0.13.0 github.com/cloudfoundry/dropsonde v1.1.0 - github.com/cloudfoundry/sonde-go v0.0.0-20241016180203-3c0e1c24e908 - github.com/onsi/gomega v1.36.0 + github.com/cloudfoundry/sonde-go v0.0.0-20241223073712-8666f251e654 + github.com/onsi/gomega v1.36.1 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.60.1 - github.com/valyala/fasthttp v1.57.0 - golang.org/x/net v0.31.0 - google.golang.org/grpc v1.68.0-dev + github.com/prometheus/common v0.61.0 + github.com/valyala/fasthttp v1.58.0 + golang.org/x/net v0.33.0 + google.golang.org/grpc v1.69.2 gopkg.in/yaml.v2 v2.4.0 ) require ( code.cloudfoundry.org/go-loggregator/v9 v9.2.1 git.sr.ht/~nelsam/hel/v3 v3.0.4 - github.com/go-chi/chi/v5 v5.1.0 + github.com/go-chi/chi/v5 v5.2.0 github.com/google/go-cmp v0.6.0 - github.com/onsi/ginkgo/v2 v2.22.0 + github.com/onsi/ginkgo/v2 v2.22.1 go.opentelemetry.io/proto/otlp v1.3.1 - google.golang.org/protobuf v1.35.2 + google.golang.org/protobuf v1.36.1 ) require ( @@ -38,8 +38,8 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect - github.com/google/pprof v0.0.0-20241128161848-dc51965c6481 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect + github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/klauspost/compress v1.17.11 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -50,14 +50,14 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/square/certstrap v1.3.0 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect - go.step.sm/crypto v0.54.2 // indirect - golang.org/x/crypto v0.29.0 // indirect + go.step.sm/crypto v0.56.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/mod v0.22.0 // indirect - golang.org/x/sync v0.9.0 // indirect - golang.org/x/sys v0.27.0 // indirect - golang.org/x/text v0.20.0 // indirect - golang.org/x/tools v0.27.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/tools v0.28.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241223144023-3abc09e42ca8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/src/go.sum b/src/go.sum index 0f3765fc8..03e16aa4a 100644 --- a/src/go.sum +++ b/src/go.sum @@ -1,16 +1,16 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -code.cloudfoundry.org/go-batching v0.0.0-20241125161418-1c4c43ed1a1e h1:bvTSt9QtJCpka5EyJiiDY68DGRH5oy4kab1Etyb8IkU= -code.cloudfoundry.org/go-batching v0.0.0-20241125161418-1c4c43ed1a1e/go.mod h1:Y89Ijl3rCUvYxt3OvriVkoFgJhibutvN7wtiPiICNr0= -code.cloudfoundry.org/go-diodes v0.0.0-20241125060457-612558937770 h1:mIdbJ1bQpJ/EboT42Hz2g2e79yDns1nKYoNB01T+BO0= -code.cloudfoundry.org/go-diodes v0.0.0-20241125060457-612558937770/go.mod h1:LUuklsZ+vM4POrCPtgmPssFlsId1zYprvC0jWmhQghw= +code.cloudfoundry.org/go-batching v0.0.0-20241223135445-b8f45a03eb70 h1:QrpiPn7qAJPGoXoKC6ikaSh/1YjLVobq2dl2gTUlFKg= +code.cloudfoundry.org/go-batching v0.0.0-20241223135445-b8f45a03eb70/go.mod h1:PMiuvMB9vmOM0CiItT9JBT9s7g1FEEZzE/ZoVRqFFAc= +code.cloudfoundry.org/go-diodes v0.0.0-20241223074059-7f8c1f03edeb h1:m41VJyn39E0pK9grfNTiC/v+mWXk0AbZaICCtp8OSEA= +code.cloudfoundry.org/go-diodes v0.0.0-20241223074059-7f8c1f03edeb/go.mod h1:aCl3KLU5akt7Fz1do5pi0T48hEmqouEtn68gZiIC/S4= code.cloudfoundry.org/go-envstruct v1.7.0 h1:6e90Z+lTPrI2jMJOoKErskL7a+Qm97GFiithcXeVWdk= code.cloudfoundry.org/go-envstruct v1.7.0/go.mod h1:xm6Eto/WB7Qq1iwEN29jUQXDeUCJ+nruwAnnrpY0s4E= code.cloudfoundry.org/go-loggregator/v9 v9.2.1 h1:S6Lgg5UJbhh2bt2TGQxs6R00CF8PrUA3GFPYDxy56Fk= code.cloudfoundry.org/go-loggregator/v9 v9.2.1/go.mod h1:FTFFruqGeOhVCDFvyLgl8EV8YW63NNwRzLhxJcporu8= -code.cloudfoundry.org/go-metric-registry v0.0.0-20241125210533-4270b99b04f2 h1:wA7x68A8jBX8RmK9bZ+qQoQyZHBvjbS3AmThfhsTebM= -code.cloudfoundry.org/go-metric-registry v0.0.0-20241125210533-4270b99b04f2/go.mod h1:p+Hdz8bhE7yQpg1mge8dwUOvYG280v+/wMx5BjVCxz4= -code.cloudfoundry.org/tlsconfig v0.10.0 h1:IGbLsOfQmzjI1o5bcGtsTwsZl/Qoz/n5/NkBK68Lk1I= -code.cloudfoundry.org/tlsconfig v0.10.0/go.mod h1:Rz8UI6zoOnB9UFxYfPpZhAsl2hDsmjmVW9SGmUhsDSk= +code.cloudfoundry.org/go-metric-registry v0.0.0-20241216212740-f99815f94a2a h1:o0CYJ5Xpd84l+nIV7OFcTrUS0w/xm/G5ZCvqhVNA7Ao= +code.cloudfoundry.org/go-metric-registry v0.0.0-20241216212740-f99815f94a2a/go.mod h1:LQEPfezhF2NsdnSxnBCmJm1qacL7+3+Fe9A/kLn7l5s= +code.cloudfoundry.org/tlsconfig v0.13.0 h1:3SL/zM7qWWHea5gA2S0+dV7qGRhdkET3lnyAB0CYNtM= +code.cloudfoundry.org/tlsconfig v0.13.0/go.mod h1:1gRXjcf4Af14MDfTOlF+2//hYRF14/Ki1zxz/txa+cY= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= git.sr.ht/~nelsam/hel/v3 v3.0.4 h1:ElleA4q9XHTskFod5T7cC4oXOULo41jKRjYijTIlJgw= @@ -34,8 +34,8 @@ github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudfoundry/dropsonde v1.1.0 h1:nerhj8K0heOsv/U/Ddou5Esw56YlNeHHJH6MP9QlACQ= github.com/cloudfoundry/dropsonde v1.1.0/go.mod h1:OrkxsBrAvM8X0Ve9vaSNKLR+/Jeohu3+J0M4JEaTmnM= -github.com/cloudfoundry/sonde-go v0.0.0-20241016180203-3c0e1c24e908 h1:hTZbUjHHExhpFrZxaG/yv87J5JT3Hr6tlLqiFXruUJg= -github.com/cloudfoundry/sonde-go v0.0.0-20241016180203-3c0e1c24e908/go.mod h1:SiB/qvUutgBWlAEuiQLG3M+hEBeRHdNlMGq1YYjFo90= +github.com/cloudfoundry/sonde-go v0.0.0-20241223073712-8666f251e654 h1:onI+CvuqU0lSGWGSA5P/fXD+ZytbSif1ldH6br3BFk0= +github.com/cloudfoundry/sonde-go v0.0.0-20241223073712-8666f251e654/go.mod h1:UDvvOsijSP803XSgRBzLPvMcBelvsmXyfQR+ezpwfg0= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -50,13 +50,15 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8 github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw= -github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-chi/chi/v5 v5.2.0 h1:Aj1EtB0qR2Rdo2dG4O94RIU35w2lvQSj6BRA4+qwFL0= +github.com/go-chi/chi/v5 v5.2.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= @@ -67,18 +69,22 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4er github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/pprof v0.0.0-20241128161848-dc51965c6481 h1:yudKIrXagAOl99WQzrP1gbz5HLB9UjhcOFnPzdd6Qec= -github.com/google/pprof v0.0.0-20241128161848-dc51965c6481/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -114,10 +120,10 @@ github.com/nelsam/hel/v2 v2.3.2/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ= github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= -github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/gomega v1.36.0 h1:Pb12RlruUtj4XUuPUqeEWc6j5DkVVVA49Uf6YLfC95Y= -github.com/onsi/gomega v1.36.0/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/onsi/ginkgo/v2 v2.22.1 h1:QW7tbJAUDyVDVOM5dFa7qaybo+CRfR7bemlQUN6Z8aM= +github.com/onsi/ginkgo/v2 v2.22.1/go.mod h1:S6aTpoRsSq2cZOd+pssHAlKW/Q/jZt6cPrPlnj4a1xM= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -137,8 +143,8 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= @@ -170,14 +176,14 @@ github.com/square/certstrap v1.3.0 h1:N9P0ZRA+DjT8pq5fGDj0z3FjafRKnBDypP0QHpMlaA github.com/square/certstrap v1.3.0/go.mod h1:wGZo9eE1B7WX2GKBn0htJ+B3OuRl2UsdCFySNooy9hU= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.57.0 h1:Xw8SjWGEP/+wAAgyy5XTvgrWlOD1+TxbbvNADYCm1Tg= -github.com/valyala/fasthttp v1.57.0/go.mod h1:h6ZBaPRlzpZ6O3H5t2gEk1Qi33+TmLvfwgLLp0t9CpE= +github.com/valyala/fasthttp v1.58.0 h1:GGB2dWxSbEprU9j0iMJHgdKYJVDyjrOwF9RE59PbRuE= +github.com/valyala/fasthttp v1.58.0/go.mod h1:SYXvHHaFp7QZHGKSHmoMipInhrI5StHrhDTYVEjK/Kw= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= @@ -185,10 +191,20 @@ github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3i github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.step.sm/crypto v0.54.2 h1:3LSA5nYDQvcd484OSx7xsS3XDqQ7/WZjVqvq0+a0fWc= -go.step.sm/crypto v0.54.2/go.mod h1:1+OjUozd5aA3TkBJfr5Aobd6vNt9F70n1DagcoBh3Pc= +go.step.sm/crypto v0.56.0 h1:KcFfV76cI9Xaw8bdSc9x55skyuSdcHcTdL37vvVZnvY= +go.step.sm/crypto v0.56.0/go.mod h1:snWNloxY9s1W+HsFqcviq55nvzbqqX6LxVt0Vktv5mw= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -196,8 +212,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= -golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -213,8 +229,8 @@ golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= -golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -222,8 +238,8 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -233,13 +249,13 @@ golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= -golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -247,24 +263,24 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200313205530-4303120df7d8/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200922162830-aeb8e3692911/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= -golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697 h1:pgr/4QbFyktUv9CtQ/Fq4gzEE6/Xs7iCXbktaGzLHbQ= -google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697/go.mod h1:+D9ySVjN8nY8YCVjc5O7PZDIdZporIDY3KaGfJunh88= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 h1:LWZqQOEjDyONlF1H6afSWpAL/znlREo2tHfLoe+8LMA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/genproto/googleapis/api v0.0.0-20241223144023-3abc09e42ca8 h1:st3LcW/BPi75W4q1jJTEor/QWwbNlPlDG0JTn6XhZu0= +google.golang.org/genproto/googleapis/api v0.0.0-20241223144023-3abc09e42ca8/go.mod h1:klhJGKFyG8Tn50enBn7gizg4nXGXJ+jqEREdCWaPcV4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 h1:TqExAhdPaB60Ux47Cn0oLV07rGnxZzIsaRhQaqS666A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.68.0-dev h1:Qao/m2HpklhJt2QbpdRutxyNfRuwM8nGPpmi2UkuEHw= -google.golang.org/grpc v1.68.0-dev/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= +google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= +google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/src/vendor/github.com/go-chi/chi/v5/README.md b/src/vendor/github.com/go-chi/chi/v5/README.md index 21cbb0fc0..7f662ab45 100644 --- a/src/vendor/github.com/go-chi/chi/v5/README.md +++ b/src/vendor/github.com/go-chi/chi/v5/README.md @@ -65,7 +65,7 @@ func main() { **REST Preview:** -Here is a little preview of how routing looks like with chi. Also take a look at the generated routing docs +Here is a little preview of what routing looks like with chi. Also take a look at the generated routing docs in JSON ([routes.json](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.json)) and in Markdown ([routes.md](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.md)). @@ -469,7 +469,8 @@ how setting context on a request in Go works. * Carl Jackson for https://github.com/zenazn/goji * Parts of chi's thinking comes from goji, and chi's middleware package - sources from goji. + sources from [goji](https://github.com/zenazn/goji/tree/master/web/middleware). + * Please see goji's [LICENSE](https://github.com/zenazn/goji/blob/master/LICENSE) (MIT) * Armon Dadgar for https://github.com/armon/go-radix * Contributions: [@VojtechVitek](https://github.com/VojtechVitek) diff --git a/src/vendor/github.com/go-chi/chi/v5/chi.go b/src/vendor/github.com/go-chi/chi/v5/chi.go index a1691bbeb..fc32c4efe 100644 --- a/src/vendor/github.com/go-chi/chi/v5/chi.go +++ b/src/vendor/github.com/go-chi/chi/v5/chi.go @@ -51,7 +51,7 @@ // "/user/{name}" matches "/user/jsmith" but not "/user/jsmith/info" or "/user/jsmith/" // "/user/{name}/info" matches "/user/jsmith/info" // "/page/*" matches "/page/intro/latest" -// "/page/{other}/index" also matches "/page/intro/latest" +// "/page/{other}/latest" also matches "/page/intro/latest" // "/date/{yyyy:\\d\\d\\d\\d}/{mm:\\d\\d}/{dd:\\d\\d}" matches "/date/2017/04/01" package chi @@ -127,6 +127,10 @@ type Routes interface { // the method/path - similar to routing a http request, but without // executing the handler thereafter. Match(rctx *Context, method, path string) bool + + // Find searches the routing tree for the pattern that matches + // the method/path. + Find(rctx *Context, method, path string) string } // Middlewares type is a slice of standard middleware handlers with methods diff --git a/src/vendor/github.com/go-chi/chi/v5/context.go b/src/vendor/github.com/go-chi/chi/v5/context.go index e2cd908d7..aacf6eff7 100644 --- a/src/vendor/github.com/go-chi/chi/v5/context.go +++ b/src/vendor/github.com/go-chi/chi/v5/context.go @@ -109,7 +109,7 @@ func (x *Context) URLParam(key string) string { // RoutePattern builds the routing pattern string for the particular // request, at the particular point during routing. This means, the value // will change throughout the execution of a request in a router. That is -// why its advised to only use this value after calling the next handler. +// why it's advised to only use this value after calling the next handler. // // For example, // @@ -121,6 +121,9 @@ func (x *Context) URLParam(key string) string { // }) // } func (x *Context) RoutePattern() string { + if x == nil { + return "" + } routePattern := strings.Join(x.RoutePatterns, "") routePattern = replaceWildcards(routePattern) if routePattern != "/" { diff --git a/src/vendor/github.com/go-chi/chi/v5/mux.go b/src/vendor/github.com/go-chi/chi/v5/mux.go index 6dc1904d9..91daf691e 100644 --- a/src/vendor/github.com/go-chi/chi/v5/mux.go +++ b/src/vendor/github.com/go-chi/chi/v5/mux.go @@ -363,19 +363,40 @@ func (mx *Mux) Middlewares() Middlewares { // Note: the *Context state is updated during execution, so manage // the state carefully or make a NewRouteContext(). func (mx *Mux) Match(rctx *Context, method, path string) bool { + return mx.Find(rctx, method, path) != "" +} + +// Find searches the routing tree for the pattern that matches +// the method/path. +// +// Note: the *Context state is updated during execution, so manage +// the state carefully or make a NewRouteContext(). +func (mx *Mux) Find(rctx *Context, method, path string) string { m, ok := methodMap[method] if !ok { - return false + return "" } - node, _, h := mx.tree.FindRoute(rctx, m, path) + node, _, _ := mx.tree.FindRoute(rctx, m, path) + pattern := rctx.routePattern + + if node != nil { + if node.subroutes == nil { + e := node.endpoints[m] + return e.pattern + } - if node != nil && node.subroutes != nil { rctx.RoutePath = mx.nextRoutePath(rctx) - return node.subroutes.Match(rctx, method, rctx.RoutePath) + subPattern := node.subroutes.Find(rctx, method, rctx.RoutePath) + if subPattern == "" { + return "" + } + + pattern = strings.TrimSuffix(pattern, "/*") + pattern += subPattern } - return h != nil + return pattern } // NotFoundHandler returns the default Mux 404 responder whenever a route diff --git a/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index 9f50a569e..0fa907656 100644 --- a/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -64,7 +64,13 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal } if !wroteHeader { - w.Header().Set("Content-Type", marshaler.ContentType(respRw)) + var contentType string + if sct, ok := marshaler.(StreamContentType); ok { + contentType = sct.StreamContentType(respRw) + } else { + contentType = marshaler.ContentType(respRw) + } + w.Header().Set("Content-Type", contentType) } var buf []byte @@ -194,7 +200,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha w.Header().Set("Content-Length", strconv.Itoa(len(buf))) } - if _, err = w.Write(buf); err != nil { + if _, err = w.Write(buf); err != nil && !errors.Is(err, http.ErrBodyNotAllowed) { grpclog.Errorf("Failed to write response: %v", err) } diff --git a/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go b/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go index 2c0d25ff4..b1dfc37af 100644 --- a/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go +++ b/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go @@ -48,3 +48,11 @@ type Delimited interface { // Delimiter returns the record separator for the stream. Delimiter() []byte } + +// StreamContentType defines the streaming content type. +type StreamContentType interface { + // StreamContentType returns the content type for a stream. This shares the + // same behaviour as for `Marshaler.ContentType`, but is called, if present, + // in the case of a streamed response. + StreamContentType(v interface{}) string +} diff --git a/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go index fe634174b..0a1ca7e06 100644 --- a/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go +++ b/src/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go @@ -141,7 +141,7 @@ func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []strin } // Check if oneof already set - if of := fieldDescriptor.ContainingOneof(); of != nil { + if of := fieldDescriptor.ContainingOneof(); of != nil && !of.IsSynthetic() { if f := msgValue.WhichOneof(of); f != nil { return fmt.Errorf("field already set for oneof %q", of.FullName().Name()) } @@ -291,7 +291,11 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p if err != nil { return protoreflect.Value{}, err } - msg = timestamppb.New(t) + timestamp := timestamppb.New(t) + if ok := timestamp.IsValid(); !ok { + return protoreflect.Value{}, fmt.Errorf("%s before 0001-01-01", value) + } + msg = timestamp case "google.protobuf.Duration": d, err := time.ParseDuration(value) if err != nil { diff --git a/src/vendor/github.com/onsi/ginkgo/v2/.gitignore b/src/vendor/github.com/onsi/ginkgo/v2/.gitignore index 18793c248..6faaaf315 100644 --- a/src/vendor/github.com/onsi/ginkgo/v2/.gitignore +++ b/src/vendor/github.com/onsi/ginkgo/v2/.gitignore @@ -4,4 +4,5 @@ tmp/**/* *.coverprofile .vscode .idea/ -*.log \ No newline at end of file +*.log +*.test \ No newline at end of file diff --git a/src/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/src/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index fd6e070c1..0c29708d2 100644 --- a/src/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/src/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,16 @@ +## 2.22.1 + +### Fixes +Fix CSV encoding +- Update tests [aab3da6] +- Properly encode CSV rows [c09df39] +- Add test case for proper csv escaping [96a80fc] +- Add meta-test [43dad69] + +### Maintenance +- ensure *.test files are gitignored so we don't accidentally commit compiled tests again [c88c634] +- remove golang.org/x/net/context in favour of stdlib context [4df44bf] + ## 2.22.0 ### Features diff --git a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go index c2327cda8..e99d557d1 100644 --- a/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go +++ b/src/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go @@ -1,10 +1,13 @@ package outline import ( + "bytes" + "encoding/csv" "encoding/json" "fmt" "go/ast" "go/token" + "strconv" "strings" "golang.org/x/tools/go/ast/inspector" @@ -84,9 +87,11 @@ func (o *outline) String() string { // StringIndent returns a CSV-formated outline, but every line is indented by // one 'width' of spaces for every level of nesting. func (o *outline) StringIndent(width int) string { - var b strings.Builder + var b bytes.Buffer b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n") + csvWriter := csv.NewWriter(&b) + currentIndent := 0 pre := func(n *ginkgoNode) { b.WriteString(fmt.Sprintf("%*s", currentIndent, "")) @@ -96,8 +101,22 @@ func (o *outline) StringIndent(width int) string { } else { labels = strings.Join(n.Labels, ", ") } - //enclosing labels in a double quoted comma separate listed so that when inmported into a CSV app the Labels column has comma separate strings - b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t,\"%s\"\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending, labels)) + + row := []string{ + n.Name, + n.Text, + strconv.Itoa(n.Start), + strconv.Itoa(n.End), + strconv.FormatBool(n.Spec), + strconv.FormatBool(n.Focused), + strconv.FormatBool(n.Pending), + labels, + } + csvWriter.Write(row) + + // Ensure we write to `b' before the next `b.WriteString()', which might be adding indentation + csvWriter.Flush() + currentIndent += width } post := func(n *ginkgoNode) { @@ -106,5 +125,6 @@ func (o *outline) StringIndent(width int) string { for _, n := range o.Nodes { n.Walk(pre, post) } + return b.String() } diff --git a/src/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/src/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index a3c9e6bf1..3edf50776 100644 --- a/src/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/src/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -1,6 +1,7 @@ package internal import ( + "context" "fmt" "sync" "time" @@ -9,7 +10,6 @@ import ( "github.com/onsi/ginkgo/v2/internal/parallel_support" "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" - "golang.org/x/net/context" ) type Phase uint @@ -20,7 +20,7 @@ const ( PhaseRun ) -var PROGRESS_REPORTER_DEADLING = 5 * time.Second +const ProgressReporterDeadline = 5 * time.Second type Suite struct { tree *TreeNode @@ -370,7 +370,7 @@ func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport suite.selectiveLock.Lock() defer suite.selectiveLock.Unlock() - deadline, cancel := context.WithTimeout(context.Background(), PROGRESS_REPORTER_DEADLING) + deadline, cancel := context.WithTimeout(context.Background(), ProgressReporterDeadline) defer cancel() var additionalReports []string if suite.currentSpecContext != nil { diff --git a/src/vendor/github.com/onsi/ginkgo/v2/types/version.go b/src/vendor/github.com/onsi/ginkgo/v2/types/version.go index 0b51c0b56..501af2fd9 100644 --- a/src/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/src/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.22.0" +const VERSION = "2.22.1" diff --git a/src/vendor/github.com/onsi/gomega/CHANGELOG.md b/src/vendor/github.com/onsi/gomega/CHANGELOG.md index b797577ff..79c3f6199 100644 --- a/src/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/src/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,9 @@ +## 1.36.1 + +### Fixes +- Fix https://github.com/onsi/gomega/issues/803 [1c6c112] +- resolves onsi/gomega#696: make HaveField great on pointer receivers given only a non-addressable value [4feb9d7] + ## 1.36.0 ### Features diff --git a/src/vendor/github.com/onsi/gomega/gomega_dsl.go b/src/vendor/github.com/onsi/gomega/gomega_dsl.go index eb74f6f6a..c6ac499f7 100644 --- a/src/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/src/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.36.0" +const GOMEGA_VERSION = "1.36.1" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/src/vendor/github.com/onsi/gomega/matchers/have_field.go b/src/vendor/github.com/onsi/gomega/matchers/have_field.go index 8dd3f871a..293457e85 100644 --- a/src/vendor/github.com/onsi/gomega/matchers/have_field.go +++ b/src/vendor/github.com/onsi/gomega/matchers/have_field.go @@ -40,7 +40,12 @@ func extractField(actual interface{}, field string, matchername string) (any, er extractedValue = actualValue.Addr().MethodByName(strings.TrimSuffix(fields[0], "()")) } if extractedValue == (reflect.Value{}) { - return nil, missingFieldError(fmt.Sprintf("%s could not find method named '%s' in struct of type %T.", matchername, fields[0], actual)) + ptr := reflect.New(actualValue.Type()) + ptr.Elem().Set(actualValue) + extractedValue = ptr.MethodByName(strings.TrimSuffix(fields[0], "()")) + if extractedValue == (reflect.Value{}) { + return nil, missingFieldError(fmt.Sprintf("%s could not find method named '%s' in struct of type %T.", matchername, fields[0], actual)) + } } t := extractedValue.Type() if t.NumIn() != 0 || t.NumOut() != 1 { diff --git a/src/vendor/github.com/prometheus/common/expfmt/encode.go b/src/vendor/github.com/prometheus/common/expfmt/encode.go index cf0c150c2..d7f3d76f5 100644 --- a/src/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/src/vendor/github.com/prometheus/common/expfmt/encode.go @@ -68,7 +68,7 @@ func Negotiate(h http.Header) Format { if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { switch Format(escapeParam) { case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: - escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + escapingScheme = Format("; escaping=" + escapeParam) default: // If the escaping parameter is unknown, ignore it. } @@ -101,7 +101,7 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { switch Format(escapeParam) { case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: - escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + escapingScheme = Format("; escaping=" + escapeParam) default: // If the escaping parameter is unknown, ignore it. } diff --git a/src/vendor/github.com/prometheus/common/expfmt/expfmt.go b/src/vendor/github.com/prometheus/common/expfmt/expfmt.go index d942af8ed..b26886560 100644 --- a/src/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/src/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -15,7 +15,7 @@ package expfmt import ( - "fmt" + "errors" "strings" "github.com/prometheus/common/model" @@ -109,7 +109,7 @@ func NewOpenMetricsFormat(version string) (Format, error) { if version == OpenMetricsVersion_1_0_0 { return FmtOpenMetrics_1_0_0, nil } - return FmtUnknown, fmt.Errorf("unknown open metrics version string") + return FmtUnknown, errors.New("unknown open metrics version string") } // WithEscapingScheme returns a copy of Format with the specified escaping diff --git a/src/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/src/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 11c8ff4b9..f1c495dd6 100644 --- a/src/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/src/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -152,8 +152,8 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") { compliantName = name[:len(name)-6] } - if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) { - compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit) + if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, "_"+*in.Unit) { + compliantName = compliantName + "_" + *in.Unit } // Comments, first HELP, then TYPE. diff --git a/src/vendor/github.com/prometheus/common/expfmt/text_parse.go b/src/vendor/github.com/prometheus/common/expfmt/text_parse.go index f085a923f..b4607fe4d 100644 --- a/src/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/src/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -895,7 +895,7 @@ func histogramMetricName(name string) string { func parseFloat(s string) (float64, error) { if strings.ContainsAny(s, "pP_") { - return 0, fmt.Errorf("unsupported character in float") + return 0, errors.New("unsupported character in float") } return strconv.ParseFloat(s, 64) } diff --git a/src/vendor/github.com/prometheus/common/model/alert.go b/src/vendor/github.com/prometheus/common/model/alert.go index 80d1fe944..bd3a39e3e 100644 --- a/src/vendor/github.com/prometheus/common/model/alert.go +++ b/src/vendor/github.com/prometheus/common/model/alert.go @@ -14,6 +14,7 @@ package model import ( + "errors" "fmt" "time" ) @@ -89,16 +90,16 @@ func (a *Alert) StatusAt(ts time.Time) AlertStatus { // Validate checks whether the alert data is inconsistent. func (a *Alert) Validate() error { if a.StartsAt.IsZero() { - return fmt.Errorf("start time missing") + return errors.New("start time missing") } if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { - return fmt.Errorf("start time must be before end time") + return errors.New("start time must be before end time") } if err := a.Labels.Validate(); err != nil { return fmt.Errorf("invalid label set: %w", err) } if len(a.Labels) == 0 { - return fmt.Errorf("at least one label pair required") + return errors.New("at least one label pair required") } if err := a.Annotations.Validate(); err != nil { return fmt.Errorf("invalid annotations: %w", err) diff --git a/src/vendor/github.com/prometheus/common/model/metric.go b/src/vendor/github.com/prometheus/common/model/metric.go index f50966bc4..0daca836a 100644 --- a/src/vendor/github.com/prometheus/common/model/metric.go +++ b/src/vendor/github.com/prometheus/common/model/metric.go @@ -14,9 +14,11 @@ package model import ( + "errors" "fmt" "regexp" "sort" + "strconv" "strings" "unicode/utf8" @@ -269,10 +271,6 @@ func metricNeedsEscaping(m *dto.Metric) bool { return false } -const ( - lowerhex = "0123456789abcdef" -) - // EscapeName escapes the incoming name according to the provided escaping // scheme. Depending on the rules of escaping, this may cause no change in the // string that is returned. (Especially NoEscaping, which by definition is a @@ -307,7 +305,7 @@ func EscapeName(name string, scheme EscapingScheme) string { } else if isValidLegacyRune(b, i) { escaped.WriteRune(b) } else { - escaped.WriteRune('_') + escaped.WriteString("__") } } return escaped.String() @@ -317,21 +315,15 @@ func EscapeName(name string, scheme EscapingScheme) string { } escaped.WriteString("U__") for i, b := range name { - if isValidLegacyRune(b, i) { + if b == '_' { + escaped.WriteString("__") + } else if isValidLegacyRune(b, i) { escaped.WriteRune(b) } else if !utf8.ValidRune(b) { escaped.WriteString("_FFFD_") - } else if b < 0x100 { - escaped.WriteRune('_') - for s := 4; s >= 0; s -= 4 { - escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) - } - escaped.WriteRune('_') - } else if b < 0x10000 { + } else { escaped.WriteRune('_') - for s := 12; s >= 0; s -= 4 { - escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) - } + escaped.WriteString(strconv.FormatInt(int64(b), 16)) escaped.WriteRune('_') } } @@ -389,8 +381,9 @@ func UnescapeName(name string, scheme EscapingScheme) string { // We think we are in a UTF-8 code, process it. var utf8Val uint for j := 0; i < len(escapedName); j++ { - // This is too many characters for a utf8 value. - if j > 4 { + // This is too many characters for a utf8 value based on the MaxRune + // value of '\U0010FFFF'. + if j >= 6 { return name } // Found a closing underscore, convert to a rune, check validity, and append. @@ -443,7 +436,7 @@ func (e EscapingScheme) String() string { func ToEscapingScheme(s string) (EscapingScheme, error) { if s == "" { - return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme") + return NoEscaping, errors.New("got empty string instead of escaping scheme") } switch s { case AllowUTF8: diff --git a/src/vendor/github.com/prometheus/common/model/silence.go b/src/vendor/github.com/prometheus/common/model/silence.go index 910b0b71f..8f91a9702 100644 --- a/src/vendor/github.com/prometheus/common/model/silence.go +++ b/src/vendor/github.com/prometheus/common/model/silence.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "regexp" "time" @@ -34,7 +35,7 @@ func (m *Matcher) UnmarshalJSON(b []byte) error { } if len(m.Name) == 0 { - return fmt.Errorf("label name in matcher must not be empty") + return errors.New("label name in matcher must not be empty") } if m.IsRegex { if _, err := regexp.Compile(m.Value); err != nil { @@ -77,7 +78,7 @@ type Silence struct { // Validate returns true iff all fields of the silence have valid values. func (s *Silence) Validate() error { if len(s.Matchers) == 0 { - return fmt.Errorf("at least one matcher required") + return errors.New("at least one matcher required") } for _, m := range s.Matchers { if err := m.Validate(); err != nil { @@ -85,22 +86,22 @@ func (s *Silence) Validate() error { } } if s.StartsAt.IsZero() { - return fmt.Errorf("start time missing") + return errors.New("start time missing") } if s.EndsAt.IsZero() { - return fmt.Errorf("end time missing") + return errors.New("end time missing") } if s.EndsAt.Before(s.StartsAt) { - return fmt.Errorf("start time must be before end time") + return errors.New("start time must be before end time") } if s.CreatedBy == "" { - return fmt.Errorf("creator information missing") + return errors.New("creator information missing") } if s.Comment == "" { - return fmt.Errorf("comment missing") + return errors.New("comment missing") } if s.CreatedAt.IsZero() { - return fmt.Errorf("creation timestamp missing") + return errors.New("creation timestamp missing") } return nil } diff --git a/src/vendor/github.com/prometheus/common/model/value_float.go b/src/vendor/github.com/prometheus/common/model/value_float.go index ae35cc2ab..6bfc757d1 100644 --- a/src/vendor/github.com/prometheus/common/model/value_float.go +++ b/src/vendor/github.com/prometheus/common/model/value_float.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "math" "strconv" @@ -39,7 +40,7 @@ func (v SampleValue) MarshalJSON() ([]byte, error) { // UnmarshalJSON implements json.Unmarshaler. func (v *SampleValue) UnmarshalJSON(b []byte) error { if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") + return errors.New("sample value must be a quoted string") } f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) if err != nil { diff --git a/src/vendor/github.com/prometheus/common/model/value_histogram.go b/src/vendor/github.com/prometheus/common/model/value_histogram.go index 54bb038cf..895e6a3e8 100644 --- a/src/vendor/github.com/prometheus/common/model/value_histogram.go +++ b/src/vendor/github.com/prometheus/common/model/value_histogram.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "strconv" "strings" @@ -32,7 +33,7 @@ func (v FloatString) MarshalJSON() ([]byte, error) { func (v *FloatString) UnmarshalJSON(b []byte) error { if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("float value must be a quoted string") + return errors.New("float value must be a quoted string") } f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) if err != nil { @@ -141,7 +142,7 @@ type SampleHistogramPair struct { func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { if s.Histogram == nil { - return nil, fmt.Errorf("histogram is nil") + return nil, errors.New("histogram is nil") } t, err := json.Marshal(s.Timestamp) if err != nil { @@ -164,7 +165,7 @@ func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) } if s.Histogram == nil { - return fmt.Errorf("histogram is null") + return errors.New("histogram is null") } return nil } diff --git a/src/vendor/github.com/valyala/fasthttp/.golangci.yml b/src/vendor/github.com/valyala/fasthttp/.golangci.yml index 19a288913..dc8c148c5 100644 --- a/src/vendor/github.com/valyala/fasthttp/.golangci.yml +++ b/src/vendor/github.com/valyala/fasthttp/.golangci.yml @@ -65,6 +65,7 @@ linters-settings: revive: # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md rules: + - name: indent-error-flow - name: use-any lll: line-length: 130 diff --git a/src/vendor/github.com/valyala/fasthttp/client.go b/src/vendor/github.com/valyala/fasthttp/client.go index 84a7f93ed..7b88a7815 100644 --- a/src/vendor/github.com/valyala/fasthttp/client.go +++ b/src/vendor/github.com/valyala/fasthttp/client.go @@ -3060,9 +3060,8 @@ func (t *transport) RoundTrip(hc *HostClient, req *Request, resp *Response) (ret return nil }) return false, nil - } else { - hc.releaseReader(br) } + hc.releaseReader(br) if closeConn { hc.closeConn(cc) diff --git a/src/vendor/github.com/valyala/fasthttp/cookie.go b/src/vendor/github.com/valyala/fasthttp/cookie.go index 3b1fe6b98..f4ea24c4c 100644 --- a/src/vendor/github.com/valyala/fasthttp/cookie.go +++ b/src/vendor/github.com/valyala/fasthttp/cookie.go @@ -77,6 +77,9 @@ type Cookie struct { bufK []byte bufV []byte + // maxAge=0 means no 'max-age' attribute specified. + // maxAge<0 means delete cookie now, equivalently 'max-age=0' + // maxAge>0 means 'max-age' attribute present and given in seconds maxAge int sameSite CookieSameSite @@ -193,7 +196,10 @@ func (c *Cookie) MaxAge() int { // SetMaxAge sets cookie expiration time based on seconds. This takes precedence // over any absolute expiry set on the cookie. // -// Set max age to 0 to unset. +// 'max-age' is set when the maxAge is non-zero. That is, if maxAge = 0, +// the 'max-age' is unset. If maxAge < 0, it indicates that the cookie should +// be deleted immediately, equivalent to 'max-age=0'. This behavior is +// consistent with the Go standard library's net/http package. func (c *Cookie) SetMaxAge(seconds int) { c.maxAge = seconds } @@ -278,11 +284,16 @@ func (c *Cookie) AppendBytes(dst []byte) []byte { } dst = append(dst, c.value...) - if c.maxAge > 0 { + if c.maxAge != 0 { dst = append(dst, ';', ' ') dst = append(dst, strCookieMaxAge...) dst = append(dst, '=') - dst = AppendUint(dst, c.maxAge) + if c.maxAge < 0 { + // See https://github.com/valyala/fasthttp/issues/1900 + dst = AppendUint(dst, 0) + } else { + dst = AppendUint(dst, c.maxAge) + } } else if !c.expire.IsZero() { c.bufV = AppendHTTPDate(c.bufV[:0], c.expire) dst = append(dst, ';', ' ') diff --git a/src/vendor/github.com/valyala/fasthttp/http.go b/src/vendor/github.com/valyala/fasthttp/http.go index 2a9a6cb43..99a8bddf8 100644 --- a/src/vendor/github.com/valyala/fasthttp/http.go +++ b/src/vendor/github.com/valyala/fasthttp/http.go @@ -1590,9 +1590,8 @@ func (req *Request) Write(w *bufio.Writer) error { if len(req.Header.Host()) == 0 { if len(host) == 0 { return errRequestHostRequired - } else { - req.Header.SetHostBytes(host) } + req.Header.SetHostBytes(host) } else if !req.UseHostHeader { req.Header.SetHostBytes(host) } @@ -2219,20 +2218,109 @@ func writeBodyFixedSize(w *bufio.Writer, r io.Reader, size int64) error { return err } +// copyZeroAlloc optimizes io.Copy by calling ReadFrom or WriteTo only when +// copying between os.File and net.TCPConn. If the reader has a WriteTo +// method, it uses WriteTo for copying; if the writer has a ReadFrom method, +// it uses ReadFrom for copying. If neither method is available, it gets a +// buffer from sync.Pool to perform the copy. +// +// io.CopyBuffer always uses the WriterTo or ReadFrom interface if it's +// available. however, os.File and net.TCPConn unfortunately have a +// fallback in their WriterTo that calls io.Copy if sendfile isn't possible. +// +// See issue: https://github.com/valyala/fasthttp/issues/1889 +// +// sendfile can only be triggered when copying between os.File and net.TCPConn. +// Since the function confirming zero-copy is a private function, we use +// ReadFrom only in this specific scenario. For all other cases, we prioritize +// using our own copyBuffer method. +// +// o: our copyBuffer +// r: readFrom +// w: writeTo +// +// write\read *File *TCPConn writeTo other +// *File o r w o +// *TCPConn w,r o w o +// readFrom r r w r +// other o o w o +// +//nolint:dupword func copyZeroAlloc(w io.Writer, r io.Reader) (int64, error) { - if wt, ok := r.(io.WriterTo); ok { - return wt.WriteTo(w) - } - if rt, ok := w.(io.ReaderFrom); ok { - return rt.ReadFrom(r) + var readerIsFile, readerIsConn bool + + switch r := r.(type) { + case *os.File: + readerIsFile = true + case *net.TCPConn: + readerIsConn = true + case io.WriterTo: + return r.WriteTo(w) + } + + switch w := w.(type) { + case *os.File: + if readerIsConn { + return w.ReadFrom(r) + } + case *net.TCPConn: + if readerIsFile { + // net.WriteTo requires go1.22 or later + // Benchmark tests show that on Windows, WriteTo performs + // significantly better than ReadFrom. On Linux, however, + // ReadFrom slightly outperforms WriteTo. When possible, + // copyZeroAlloc aims to perform better than or as well + // as io.Copy, so we use WriteTo whenever possible for + // optimal performance. + if rt, ok := r.(io.WriterTo); ok { + return rt.WriteTo(w) + } + return w.ReadFrom(r) + } + case io.ReaderFrom: + return w.ReadFrom(r) } + vbuf := copyBufPool.Get() buf := vbuf.([]byte) - n, err := io.CopyBuffer(w, r, buf) + n, err := copyBuffer(w, r, buf) copyBufPool.Put(vbuf) return n, err } +// copyBuffer is rewritten from io.copyBuffer. We do not check if src has a +// WriteTo method, if dst has a ReadFrom method, or if buf is empty. +func copyBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) { + for { + nr, er := src.Read(buf) + if nr > 0 { + nw, ew := dst.Write(buf[0:nr]) + if nw < 0 || nr < nw { + nw = 0 + if ew == nil { + ew = errors.New("invalid write result") + } + } + written += int64(nw) + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er != nil { + if er != io.EOF { + err = er + } + break + } + } + return written, err +} + var copyBufPool = sync.Pool{ New: func() any { return make([]byte, 4096) @@ -2417,17 +2505,24 @@ func parseChunkSize(r *bufio.Reader) (int, error) { c, err := r.ReadByte() if err != nil { return -1, ErrBrokenChunk{ - error: fmt.Errorf("cannot read '\r' char at the end of chunk size: %w", err), + error: fmt.Errorf("cannot read '\\r' char at the end of chunk size: %w", err), } } // Skip chunk extension after chunk size. // Add support later if anyone needs it. if c != '\r' { + // Security: Don't allow newlines in chunk extensions. + // This can lead to request smuggling issues with some reverse proxies. + if c == '\n' { + return -1, ErrBrokenChunk{ + error: errors.New("invalid character '\\n' after chunk size"), + } + } continue } if err := r.UnreadByte(); err != nil { return -1, ErrBrokenChunk{ - error: fmt.Errorf("cannot unread '\r' char at the end of chunk size: %w", err), + error: fmt.Errorf("cannot unread '\\r' char at the end of chunk size: %w", err), } } break diff --git a/src/vendor/github.com/valyala/fasthttp/server.go b/src/vendor/github.com/valyala/fasthttp/server.go index bcf30cb9e..4e4412245 100644 --- a/src/vendor/github.com/valyala/fasthttp/server.go +++ b/src/vendor/github.com/valyala/fasthttp/server.go @@ -1887,6 +1887,8 @@ func (s *Server) Shutdown() error { // // ShutdownWithContext does not close keepalive connections so it's recommended to set ReadTimeout and IdleTimeout // to something else than 0. +// +// When ShutdownWithContext returns errors, any operation to the Server is unavailable. func (s *Server) ShutdownWithContext(ctx context.Context) (err error) { s.mu.Lock() defer s.mu.Unlock() @@ -1898,11 +1900,7 @@ func (s *Server) ShutdownWithContext(ctx context.Context) (err error) { return nil } - for _, ln := range s.ln { - if err = ln.Close(); err != nil { - return err - } - } + lnerr := s.closeListenersLocked() if s.done != nil { close(s.done) @@ -1913,28 +1911,25 @@ func (s *Server) ShutdownWithContext(ctx context.Context) (err error) { // Now we just have to wait until all workers are done or timeout. ticker := time.NewTicker(time.Millisecond * 100) defer ticker.Stop() -END: + for { s.closeIdleConns() if open := atomic.LoadInt32(&s.open); open == 0 { - break + // There may be a pending request to call ctx.Done(). Therefore, we only set it to nil when open == 0. + s.done = nil + return lnerr } // This is not an optimal solution but using a sync.WaitGroup // here causes data races as it's hard to prevent Add() to be called // while Wait() is waiting. select { case <-ctx.Done(): - err = ctx.Err() - break END + return ctx.Err() case <-ticker.C: continue } } - - s.done = nil - s.ln = nil - return err } func acceptConn(s *Server, ln net.Listener, lastPerIPErrorTime *time.Time) (net.Conn, error) { @@ -2749,15 +2744,7 @@ func (ctx *RequestCtx) Deadline() (deadline time.Time, ok bool) { // Note: Because creating a new channel for every request is just too expensive, so // RequestCtx.s.done is only closed when the server is shutting down. func (ctx *RequestCtx) Done() <-chan struct{} { - // fix use new variables to prevent panic caused by modifying the original done chan to nil. - done := ctx.s.done - - if done == nil { - done = make(chan struct{}, 1) - done <- struct{}{} - return done - } - return done + return ctx.s.done } // Err returns a non-nil error value after Done is closed, @@ -2934,6 +2921,17 @@ func (s *Server) closeIdleConns() { s.idleConnsMu.Unlock() } +func (s *Server) closeListenersLocked() error { + var err error + for _, ln := range s.ln { + if cerr := ln.Close(); cerr != nil && err == nil { + err = cerr + } + } + s.ln = nil + return err +} + // A ConnState represents the state of a client connection to a server. // It's used by the optional Server.ConnState hook. type ConnState int diff --git a/src/vendor/github.com/valyala/fasthttp/uri.go b/src/vendor/github.com/valyala/fasthttp/uri.go index cca09517a..11f7b03bd 100644 --- a/src/vendor/github.com/valyala/fasthttp/uri.go +++ b/src/vendor/github.com/valyala/fasthttp/uri.go @@ -312,11 +312,11 @@ func (u *URI) parse(host, uri []byte, isTLS bool) error { } u.host = append(u.host, host...) - if parsedHost, err := parseHost(u.host); err != nil { + parsedHost, err := parseHost(u.host) + if err != nil { return err - } else { - u.host = parsedHost } + u.host = parsedHost lowercaseBytes(u.host) b := uri diff --git a/src/vendor/golang.org/x/crypto/ssh/server.go b/src/vendor/golang.org/x/crypto/ssh/server.go index c0d1c29e6..5b5ccd96f 100644 --- a/src/vendor/golang.org/x/crypto/ssh/server.go +++ b/src/vendor/golang.org/x/crypto/ssh/server.go @@ -149,7 +149,7 @@ func (s *ServerConfig) AddHostKey(key Signer) { } // cachedPubKey contains the results of querying whether a public key is -// acceptable for a user. +// acceptable for a user. This is a FIFO cache. type cachedPubKey struct { user string pubKeyData []byte @@ -157,7 +157,13 @@ type cachedPubKey struct { perms *Permissions } -const maxCachedPubKeys = 16 +// maxCachedPubKeys is the number of cache entries we store. +// +// Due to consistent misuse of the PublicKeyCallback API, we have reduced this +// to 1, such that the only key in the cache is the most recently seen one. This +// forces the behavior that the last call to PublicKeyCallback will always be +// with the key that is used for authentication. +const maxCachedPubKeys = 1 // pubKeyCache caches tests for public keys. Since SSH clients // will query whether a public key is acceptable before attempting to @@ -179,9 +185,10 @@ func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { // add adds the given tuple to the cache. func (c *pubKeyCache) add(candidate cachedPubKey) { - if len(c.keys) < maxCachedPubKeys { - c.keys = append(c.keys, candidate) + if len(c.keys) >= maxCachedPubKeys { + c.keys = c.keys[1:] } + c.keys = append(c.keys, candidate) } // ServerConn is an authenticated SSH connection, as seen from the diff --git a/src/vendor/golang.org/x/net/html/doctype.go b/src/vendor/golang.org/x/net/html/doctype.go index c484e5a94..bca3ae9a0 100644 --- a/src/vendor/golang.org/x/net/html/doctype.go +++ b/src/vendor/golang.org/x/net/html/doctype.go @@ -87,7 +87,7 @@ func parseDoctype(s string) (n *Node, quirks bool) { } } if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && - strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { + strings.EqualFold(lastAttr.Val, "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd") { quirks = true } } diff --git a/src/vendor/golang.org/x/net/html/foreign.go b/src/vendor/golang.org/x/net/html/foreign.go index 9da9e9dc4..e8515d8e8 100644 --- a/src/vendor/golang.org/x/net/html/foreign.go +++ b/src/vendor/golang.org/x/net/html/foreign.go @@ -40,8 +40,7 @@ func htmlIntegrationPoint(n *Node) bool { if n.Data == "annotation-xml" { for _, a := range n.Attr { if a.Key == "encoding" { - val := strings.ToLower(a.Val) - if val == "text/html" || val == "application/xhtml+xml" { + if strings.EqualFold(a.Val, "text/html") || strings.EqualFold(a.Val, "application/xhtml+xml") { return true } } diff --git a/src/vendor/golang.org/x/net/html/parse.go b/src/vendor/golang.org/x/net/html/parse.go index 46a89eda6..643c674e3 100644 --- a/src/vendor/golang.org/x/net/html/parse.go +++ b/src/vendor/golang.org/x/net/html/parse.go @@ -840,6 +840,10 @@ func afterHeadIM(p *parser) bool { p.parseImpliedToken(StartTagToken, a.Body, a.Body.String()) p.framesetOK = true + if p.tok.Type == ErrorToken { + // Stop parsing. + return true + } return false } @@ -1031,7 +1035,7 @@ func inBodyIM(p *parser) bool { if p.tok.DataAtom == a.Input { for _, t := range p.tok.Attr { if t.Key == "type" { - if strings.ToLower(t.Val) == "hidden" { + if strings.EqualFold(t.Val, "hidden") { // Skip setting framesetOK = false return true } @@ -1459,7 +1463,7 @@ func inTableIM(p *parser) bool { return inHeadIM(p) case a.Input: for _, t := range p.tok.Attr { - if t.Key == "type" && strings.ToLower(t.Val) == "hidden" { + if t.Key == "type" && strings.EqualFold(t.Val, "hidden") { p.addElement() p.oe.pop() return true diff --git a/src/vendor/golang.org/x/net/http2/frame.go b/src/vendor/golang.org/x/net/http2/frame.go index 105c3b279..81faec7e7 100644 --- a/src/vendor/golang.org/x/net/http2/frame.go +++ b/src/vendor/golang.org/x/net/http2/frame.go @@ -1490,7 +1490,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { pf := mh.PseudoFields() for i, hf := range pf { switch hf.Name { - case ":method", ":path", ":scheme", ":authority": + case ":method", ":path", ":scheme", ":authority", ":protocol": isRequest = true case ":status": isResponse = true @@ -1498,7 +1498,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { return pseudoHeaderError(hf.Name) } // Check for duplicates. - // This would be a bad algorithm, but N is 4. + // This would be a bad algorithm, but N is 5. // And this doesn't allocate. for _, hf2 := range pf[:i] { if hf.Name == hf2.Name { diff --git a/src/vendor/golang.org/x/net/http2/http2.go b/src/vendor/golang.org/x/net/http2/http2.go index 7688c356b..c7601c909 100644 --- a/src/vendor/golang.org/x/net/http2/http2.go +++ b/src/vendor/golang.org/x/net/http2/http2.go @@ -34,10 +34,11 @@ import ( ) var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool - inTests bool + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool + disableExtendedConnectProtocol bool ) func init() { @@ -50,6 +51,9 @@ func init() { logFrameWrites = true logFrameReads = true } + if strings.Contains(e, "http2xconnect=0") { + disableExtendedConnectProtocol = true + } } const ( @@ -141,6 +145,10 @@ func (s Setting) Valid() error { if s.Val < 16384 || s.Val > 1<<24-1 { return ConnectionError(ErrCodeProtocol) } + case SettingEnableConnectProtocol: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } } return nil } @@ -150,21 +158,23 @@ func (s Setting) Valid() error { type SettingID uint16 const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 + SettingEnableConnectProtocol SettingID = 0x8 ) var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingEnableConnectProtocol: "ENABLE_CONNECT_PROTOCOL", } func (s SettingID) String() string { diff --git a/src/vendor/golang.org/x/net/http2/server.go b/src/vendor/golang.org/x/net/http2/server.go index 832414b45..b55547aec 100644 --- a/src/vendor/golang.org/x/net/http2/server.go +++ b/src/vendor/golang.org/x/net/http2/server.go @@ -932,14 +932,18 @@ func (sc *serverConn) serve(conf http2Config) { sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) } + settings := writeSettings{ + {SettingMaxFrameSize, conf.MaxReadFrameSize}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, + } + if !disableExtendedConnectProtocol { + settings = append(settings, Setting{SettingEnableConnectProtocol, 1}) + } sc.writeFrame(FrameWriteRequest{ - write: writeSettings{ - {SettingMaxFrameSize, conf.MaxReadFrameSize}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, - {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, - }, + write: settings, }) sc.unackedSettings++ @@ -1801,6 +1805,9 @@ func (sc *serverConn) processSetting(s Setting) error { sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 case SettingMaxHeaderListSize: sc.peerMaxHeaderListSize = s.Val + case SettingEnableConnectProtocol: + // Receipt of this parameter by a server does not + // have any impact default: // Unknown setting: "An endpoint that receives a SETTINGS // frame with any unknown or unsupported identifier MUST @@ -2231,11 +2238,17 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res scheme: f.PseudoValue("scheme"), authority: f.PseudoValue("authority"), path: f.PseudoValue("path"), + protocol: f.PseudoValue("protocol"), + } + + // extended connect is disabled, so we should not see :protocol + if disableExtendedConnectProtocol && rp.protocol != "" { + return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } isConnect := rp.method == "CONNECT" if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { + if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { @@ -2259,6 +2272,9 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res if rp.authority == "" { rp.authority = rp.header.Get("Host") } + if rp.protocol != "" { + rp.header.Set(":protocol", rp.protocol) + } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) if err != nil { @@ -2285,6 +2301,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res type requestParam struct { method string scheme, authority, path string + protocol string header http.Header } @@ -2326,7 +2343,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r var url_ *url.URL var requestURI string - if rp.method == "CONNECT" { + if rp.method == "CONNECT" && rp.protocol == "" { url_ = &url.URL{Host: rp.authority} requestURI = rp.authority // mimic HTTP/1 server behavior } else { diff --git a/src/vendor/golang.org/x/net/http2/transport.go b/src/vendor/golang.org/x/net/http2/transport.go index f5968f440..090d0e1bd 100644 --- a/src/vendor/golang.org/x/net/http2/transport.go +++ b/src/vendor/golang.org/x/net/http2/transport.go @@ -368,25 +368,26 @@ type ClientConn struct { idleTimeout time.Duration // or 0 for never idleTimer timer - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow outflow // our conn-level flow control quota (cs.outflow is per stream) - inflow inflow // peer's conn-level flow control - doNotReuse bool // whether conn is marked to not be reused for any future requests - closing bool - closed bool - seenSettings bool // true if we've seen a settings frame, false otherwise - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip - nextStreamID uint32 - pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams - pings map[[8]byte]chan struct{} // in flight ping data to notification channel - br *bufio.Reader - lastActive time.Time - lastIdle time.Time // time last idle + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow outflow // our conn-level flow control quota (cs.outflow is per stream) + inflow inflow // peer's conn-level flow control + doNotReuse bool // whether conn is marked to not be reused for any future requests + closing bool + closed bool + seenSettings bool // true if we've seen a settings frame, false otherwise + seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + br *bufio.Reader + lastActive time.Time + lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) maxFrameSize uint32 maxConcurrentStreams uint32 @@ -396,6 +397,17 @@ type ClientConn struct { initialStreamRecvWindowSize int32 readIdleTimeout time.Duration pingTimeout time.Duration + extendedConnectAllowed bool + + // rstStreamPingsBlocked works around an unfortunate gRPC behavior. + // gRPC strictly limits the number of PING frames that it will receive. + // The default is two pings per two hours, but the limit resets every time + // the gRPC endpoint sends a HEADERS or DATA frame. See golang/go#70575. + // + // rstStreamPingsBlocked is set after receiving a response to a PING frame + // bundled with an RST_STREAM (see pendingResets below), and cleared after + // receiving a HEADERS or DATA frame. + rstStreamPingsBlocked bool // pendingResets is the number of RST_STREAM frames we have sent to the peer, // without confirming that the peer has received them. When we send a RST_STREAM, @@ -819,6 +831,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. streams: make(map[uint32]*clientStream), singleUse: singleUse, + seenSettingsChan: make(chan struct{}), wantSettingsAck: true, readIdleTimeout: conf.SendPingTimeout, pingTimeout: conf.PingTimeout, @@ -1466,6 +1479,8 @@ func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream) cs.cleanupWriteRequest(err) } +var errExtendedConnectNotSupported = errors.New("net/http: extended connect not supported by peer") + // writeRequest sends a request. // // It returns nil after the request is written, the response read, @@ -1481,12 +1496,31 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre return err } + // wait for setting frames to be received, a server can change this value later, + // but we just wait for the first settings frame + var isExtendedConnect bool + if req.Method == "CONNECT" && req.Header.Get(":protocol") != "" { + isExtendedConnect = true + } + // Acquire the new-request lock by writing to reqHeaderMu. // This lock guards the critical section covering allocating a new stream ID // (requires mu) and creating the stream (requires wmu). if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + if isExtendedConnect { + select { + case <-cs.reqCancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-cc.seenSettingsChan: + if !cc.extendedConnectAllowed { + return errExtendedConnectNotSupported + } + } + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1714,10 +1748,14 @@ func (cs *clientStream) cleanupWriteRequest(err error) { ping := false if !closeOnIdle { cc.mu.Lock() - if cc.pendingResets == 0 { - ping = true + // rstStreamPingsBlocked works around a gRPC behavior: + // see comment on the field for details. + if !cc.rstStreamPingsBlocked { + if cc.pendingResets == 0 { + ping = true + } + cc.pendingResets++ } - cc.pendingResets++ cc.mu.Unlock() } cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err) @@ -2030,7 +2068,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) func validateHeaders(hdrs http.Header) string { for k, vv := range hdrs { - if !httpguts.ValidHeaderFieldName(k) { + if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { return fmt.Sprintf("name %q", k) } for _, v := range vv { @@ -2046,6 +2084,10 @@ func validateHeaders(hdrs http.Header) string { var errNilRequestURL = errors.New("http2: Request.URI is nil") +func isNormalConnect(req *http.Request) bool { + return req.Method == "CONNECT" && req.Header.Get(":protocol") == "" +} + // requires cc.wmu be held. func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() @@ -2066,7 +2108,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } var path string - if req.Method != "CONNECT" { + if !isNormalConnect(req) { path = req.URL.RequestURI() if !validPseudoPath(path) { orig := path @@ -2103,7 +2145,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail m = http.MethodGet } f(":method", m) - if req.Method != "CONNECT" { + if !isNormalConnect(req) { f(":path", path) f(":scheme", req.URL.Scheme) } @@ -2461,7 +2503,7 @@ func (rl *clientConnReadLoop) run() error { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { - if cs := rl.streamByID(se.StreamID); cs != nil { + if cs := rl.streamByID(se.StreamID, notHeaderOrDataFrame); cs != nil { if se.Cause == nil { se.Cause = cc.fr.errDetail } @@ -2507,13 +2549,16 @@ func (rl *clientConnReadLoop) run() error { if VerboseLogs { cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) } + if !cc.seenSettings { + close(cc.seenSettingsChan) + } return err } } } func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) if cs == nil { // We'd get here if we canceled a request while the // server had its response still in flight. So if this @@ -2842,7 +2887,7 @@ func (b transportResponseBody) Close() error { func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) data := f.Data() if cs == nil { cc.mu.Lock() @@ -2977,9 +3022,22 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { cs.abortStream(err) } -func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream { +// Constants passed to streamByID for documentation purposes. +const ( + headerOrDataFrame = true + notHeaderOrDataFrame = false +) + +// streamByID returns the stream with the given id, or nil if no stream has that id. +// If headerOrData is true, it clears rst.StreamPingsBlocked. +func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientStream { rl.cc.mu.Lock() defer rl.cc.mu.Unlock() + if headerOrData { + // Work around an unfortunate gRPC behavior. + // See comment on ClientConn.rstStreamPingsBlocked for details. + rl.cc.rstStreamPingsBlocked = false + } cs := rl.cc.streams[id] if cs != nil && !cs.readAborted { return cs @@ -3073,6 +3131,21 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { case SettingHeaderTableSize: cc.henc.SetMaxDynamicTableSize(s.Val) cc.peerMaxHeaderTableSize = s.Val + case SettingEnableConnectProtocol: + if err := s.Valid(); err != nil { + return err + } + // If the peer wants to send us SETTINGS_ENABLE_CONNECT_PROTOCOL, + // we require that it do so in the first SETTINGS frame. + // + // When we attempt to use extended CONNECT, we wait for the first + // SETTINGS frame to see if the server supports it. If we let the + // server enable the feature with a later SETTINGS frame, then + // users will see inconsistent results depending on whether we've + // seen that frame or not. + if !cc.seenSettings { + cc.extendedConnectAllowed = s.Val == 1 + } default: cc.vlogf("Unhandled Setting: %v", s) } @@ -3090,6 +3163,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { // connection can establish to our default. cc.maxConcurrentStreams = defaultMaxConcurrentStreams } + close(cc.seenSettingsChan) cc.seenSettings = true } @@ -3098,7 +3172,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if f.StreamID != 0 && cs == nil { return nil } @@ -3127,7 +3201,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { } func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if cs == nil { // TODO: return error if server tries to RST_STREAM an idle stream return nil @@ -3205,6 +3279,7 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error { if cc.pendingResets > 0 { // See clientStream.cleanupWriteRequest. cc.pendingResets = 0 + cc.rstStreamPingsBlocked = true cc.cond.Broadcast() } return nil diff --git a/src/vendor/golang.org/x/sys/unix/zerrors_linux.go b/src/vendor/golang.org/x/sys/unix/zerrors_linux.go index ccba391c9..6ebc48b3f 100644 --- a/src/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/src/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -321,6 +321,9 @@ const ( AUDIT_INTEGRITY_STATUS = 0x70a AUDIT_IPC = 0x517 AUDIT_IPC_SET_PERM = 0x51f + AUDIT_IPE_ACCESS = 0x58c + AUDIT_IPE_CONFIG_CHANGE = 0x58d + AUDIT_IPE_POLICY_LOAD = 0x58e AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 @@ -489,6 +492,7 @@ const ( BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 BPF_F_STRICT_ALIGNMENT = 0x1 @@ -1166,6 +1170,7 @@ const ( EXTA = 0xe EXTB = 0xf F2FS_SUPER_MAGIC = 0xf2f52010 + FALLOC_FL_ALLOCATE_RANGE = 0x0 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -1799,6 +1804,8 @@ const ( LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 + LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef LINUX_REBOOT_CMD_HALT = 0xcdef0123 @@ -1924,6 +1931,7 @@ const ( MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 MNT_ID_REQ_SIZE_VER1 = 0x20 + MNT_NS_INFO_SIZE_VER0 = 0x10 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2970,6 +2978,7 @@ const ( RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 SCHED_DEADLINE = 0x6 + SCHED_EXT = 0x7 SCHED_FIFO = 0x1 SCHED_FLAG_ALL = 0x7f SCHED_FLAG_DL_OVERRUN = 0x4 diff --git a/src/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/src/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 0c00cb3f3..c0d45e320 100644 --- a/src/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/src/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -109,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -297,6 +298,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -335,6 +338,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/src/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/src/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index dfb364554..c731d24f0 100644 --- a/src/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/src/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -109,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -298,6 +299,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -336,6 +339,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/src/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/src/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index d46dcf78a..680018a4a 100644 --- a/src/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/src/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -303,6 +304,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -341,6 +344,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/src/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/src/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 3af3248a7..a63909f30 100644 --- a/src/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/src/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -112,6 +112,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -205,6 +206,7 @@ const ( PERF_EVENT_IOC_SET_BPF = 0x40042408 PERF_EVENT_IOC_SET_FILTER = 0x40082406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + POE_MAGIC = 0x504f4530 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 PPPIOCBRIDGECHAN = 0x40047435 @@ -294,6 +296,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -332,6 +336,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/src/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/src/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 292bcf028..9b0a2573f 100644 --- a/src/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/src/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -109,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -290,6 +291,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -328,6 +331,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 782b7110f..958e6e064 100644 --- a/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -296,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -334,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 84973fd92..50c7f25bd 100644 --- a/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -296,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -334,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 6d9cbc3b2..ced21d66d 100644 --- a/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/src/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -296,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -334,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/src/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/src/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 5f9fedbce..226c04419 100644 --- a/src/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/src/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -296,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -334,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index bb0026ee0..3122737cd 100644 --- a/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -351,6 +352,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -389,6 +392,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 46120db5c..eb5d3467e 100644 --- a/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -355,6 +356,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -393,6 +396,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 5c951634f..e921ebc60 100644 --- a/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/src/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -355,6 +356,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -393,6 +396,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/src/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/src/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 11a84d5af..38ba81c55 100644 --- a/src/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/src/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -287,6 +288,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -325,6 +328,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/src/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/src/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index f78c4617c..71f040097 100644 --- a/src/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/src/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -359,6 +360,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -397,6 +400,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/src/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/src/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index aeb777c34..c44a31332 100644 --- a/src/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/src/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -112,6 +112,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -350,6 +351,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x58 + SCM_DEVMEM_LINEAR = 0x57 SCM_TIMESTAMPING = 0x23 SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c @@ -436,6 +439,9 @@ const ( SO_CNX_ADVICE = 0x37 SO_COOKIE = 0x3b SO_DETACH_REUSEPORT_BPF = 0x47 + SO_DEVMEM_DMABUF = 0x58 + SO_DEVMEM_DONTNEED = 0x59 + SO_DEVMEM_LINEAR = 0x57 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/src/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index d003c3d43..17c53bd9b 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -462,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -480,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -512,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -557,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/src/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 0d45a941a..2392226a7 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -462,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -480,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -512,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -557,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_linux.go b/src/vendor/golang.org/x/sys/unix/ztypes_linux.go index 8daaf3faf..5537148dc 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -2594,8 +2594,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x10000 - SOF_TIMESTAMPING_MASK = 0x1ffff + SOF_TIMESTAMPING_LAST = 0x20000 + SOF_TIMESTAMPING_MASK = 0x3ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3541,7 +3541,7 @@ type Nhmsg struct { type NexthopGrp struct { Id uint32 Weight uint8 - Resvd1 uint8 + High uint8 Resvd2 uint16 } @@ -3802,7 +3802,7 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2c + ETHTOOL_MSG_USER_MAX = 0x2d ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3842,7 +3842,7 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2c + ETHTOOL_MSG_KERNEL_MAX = 0x2e ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 ETHTOOL_FLAG_OMIT_REPLY = 0x2 ETHTOOL_FLAG_STATS = 0x4 @@ -3850,7 +3850,7 @@ const ( ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 ETHTOOL_A_HEADER_FLAGS = 0x3 - ETHTOOL_A_HEADER_MAX = 0x3 + ETHTOOL_A_HEADER_MAX = 0x4 ETHTOOL_A_BITSET_BIT_UNSPEC = 0x0 ETHTOOL_A_BITSET_BIT_INDEX = 0x1 ETHTOOL_A_BITSET_BIT_NAME = 0x2 @@ -4031,11 +4031,11 @@ const ( ETHTOOL_A_CABLE_RESULT_UNSPEC = 0x0 ETHTOOL_A_CABLE_RESULT_PAIR = 0x1 ETHTOOL_A_CABLE_RESULT_CODE = 0x2 - ETHTOOL_A_CABLE_RESULT_MAX = 0x2 + ETHTOOL_A_CABLE_RESULT_MAX = 0x3 ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0x0 ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 0x1 ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 0x2 - ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x2 + ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x3 ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 0x1 ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 0x2 @@ -4200,7 +4200,8 @@ type ( } PtpSysOffsetExtended struct { Samples uint32 - Rsv [3]uint32 + Clockid int32 + Rsv [2]uint32 Ts [25][3]PtpClockTime } PtpSysOffsetPrecise struct { @@ -4399,6 +4400,7 @@ const ( type LandlockRulesetAttr struct { Access_fs uint64 Access_net uint64 + Scoped uint64 } type LandlockPathBeneathAttr struct { diff --git a/src/vendor/golang.org/x/sys/windows/syscall_windows.go b/src/vendor/golang.org/x/sys/windows/syscall_windows.go index 4510bfc3f..4a3254386 100644 --- a/src/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/src/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -168,6 +168,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) //sys DisconnectNamedPipe(pipe Handle) (err error) +//sys GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) +//sys GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState diff --git a/src/vendor/golang.org/x/sys/windows/types_windows.go b/src/vendor/golang.org/x/sys/windows/types_windows.go index 51311e205..9d138de5f 100644 --- a/src/vendor/golang.org/x/sys/windows/types_windows.go +++ b/src/vendor/golang.org/x/sys/windows/types_windows.go @@ -176,6 +176,7 @@ const ( WAIT_FAILED = 0xFFFFFFFF // Access rights for process. + PROCESS_ALL_ACCESS = 0xFFFF PROCESS_CREATE_PROCESS = 0x0080 PROCESS_CREATE_THREAD = 0x0002 PROCESS_DUP_HANDLE = 0x0040 diff --git a/src/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/src/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 6f5252880..01c0716c2 100644 --- a/src/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/src/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -280,8 +280,10 @@ var ( procGetMaximumProcessorCount = modkernel32.NewProc("GetMaximumProcessorCount") procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") + procGetNamedPipeClientProcessId = modkernel32.NewProc("GetNamedPipeClientProcessId") procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId") procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") procGetProcAddress = modkernel32.NewProc("GetProcAddress") @@ -1612,7 +1614,7 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si } func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { - r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle)) + r0, _, _ := syscall.Syscall(procCancelMibChangeNotify2.Addr(), 1, uintptr(notificationHandle), 0, 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1652,7 +1654,7 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { } func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { - r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row))) + r0, _, _ := syscall.Syscall(procGetIfEntry2Ex.Addr(), 2, uintptr(level), uintptr(unsafe.Pointer(row)), 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1660,7 +1662,7 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { } func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { - r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) + r0, _, _ := syscall.Syscall(procGetUnicastIpAddressEntry.Addr(), 1, uintptr(unsafe.Pointer(row)), 0, 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1672,7 +1674,7 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa if initialNotification { _p0 = 1 } - r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + r0, _, _ := syscall.Syscall6(procNotifyIpInterfaceChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1684,7 +1686,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext if initialNotification { _p0 = 1 } - r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + r0, _, _ := syscall.Syscall6(procNotifyUnicastIpAddressChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -2446,6 +2448,14 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er return } +func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetNamedPipeClientProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) if r1 == 0 { @@ -2462,6 +2472,14 @@ func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint3 return } +func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetNamedPipeServerProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { var _p0 uint32 if wait { diff --git a/src/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/src/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index f3ab0a2e1..65fe2628e 100644 --- a/src/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/src/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -106,24 +106,18 @@ func Find(importPath, srcDir string) (filename, path string) { // additional trailing data beyond the end of the export data. func NewReader(r io.Reader) (io.Reader, error) { buf := bufio.NewReader(r) - _, size, err := gcimporter.FindExportData(buf) + size, err := gcimporter.FindExportData(buf) if err != nil { return nil, err } - if size >= 0 { - // We were given an archive and found the __.PKGDEF in it. - // This tells us the size of the export data, and we don't - // need to return the entire file. - return &io.LimitedReader{ - R: buf, - N: size, - }, nil - } else { - // We were given an object file. As such, we don't know how large - // the export data is and must return the entire file. - return buf, nil - } + // We were given an archive and found the __.PKGDEF in it. + // This tells us the size of the export data, and we don't + // need to return the entire file. + return &io.LimitedReader{ + R: buf, + N: size, + }, nil } // readAll works the same way as io.ReadAll, but avoids allocations and copies diff --git a/src/vendor/golang.org/x/tools/go/packages/external.go b/src/vendor/golang.org/x/tools/go/packages/external.go index 96db9daf3..91bd62e83 100644 --- a/src/vendor/golang.org/x/tools/go/packages/external.go +++ b/src/vendor/golang.org/x/tools/go/packages/external.go @@ -13,6 +13,7 @@ import ( "fmt" "os" "os/exec" + "slices" "strings" ) @@ -131,7 +132,7 @@ func findExternalDriver(cfg *Config) driver { // command. // // (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go) - cmd.Env = append(slicesClip(cfg.Env), "PWD="+cfg.Dir) + cmd.Env = append(slices.Clip(cfg.Env), "PWD="+cfg.Dir) cmd.Stdin = bytes.NewReader(req) cmd.Stdout = buf cmd.Stderr = stderr @@ -150,7 +151,3 @@ func findExternalDriver(cfg *Config) driver { return &response, nil } } - -// slicesClip removes unused capacity from the slice, returning s[:len(s):len(s)]. -// TODO(adonovan): use go1.21 slices.Clip. -func slicesClip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] } diff --git a/src/vendor/golang.org/x/tools/go/packages/golist.go b/src/vendor/golang.org/x/tools/go/packages/golist.go index 76f910ece..870271ed5 100644 --- a/src/vendor/golang.org/x/tools/go/packages/golist.go +++ b/src/vendor/golang.org/x/tools/go/packages/golist.go @@ -505,13 +505,14 @@ func (state *golistState) createDriverResponse(words ...string) (*DriverResponse pkg := &Package{ Name: p.Name, ID: p.ImportPath, + Dir: p.Dir, GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), EmbedFiles: absJoin(p.Dir, p.EmbedFiles), EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns), IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), - forTest: p.ForTest, + ForTest: p.ForTest, depsErrors: p.DepsErrors, Module: p.Module, } @@ -795,7 +796,7 @@ func jsonFlag(cfg *Config, goVersion int) string { // Request Dir in the unlikely case Export is not absolute. addFields("Dir", "Export") } - if cfg.Mode&needInternalForTest != 0 { + if cfg.Mode&NeedForTest != 0 { addFields("ForTest") } if cfg.Mode&needInternalDepsErrors != 0 { diff --git a/src/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/src/vendor/golang.org/x/tools/go/packages/loadmode_string.go index 5fcad6ea6..969da4c26 100644 --- a/src/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ b/src/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -23,6 +23,7 @@ var modes = [...]struct { {NeedSyntax, "NeedSyntax"}, {NeedTypesInfo, "NeedTypesInfo"}, {NeedTypesSizes, "NeedTypesSizes"}, + {NeedForTest, "NeedForTest"}, {NeedModule, "NeedModule"}, {NeedEmbedFiles, "NeedEmbedFiles"}, {NeedEmbedPatterns, "NeedEmbedPatterns"}, diff --git a/src/vendor/golang.org/x/tools/go/packages/packages.go b/src/vendor/golang.org/x/tools/go/packages/packages.go index 2ecc64238..9dedf9777 100644 --- a/src/vendor/golang.org/x/tools/go/packages/packages.go +++ b/src/vendor/golang.org/x/tools/go/packages/packages.go @@ -43,6 +43,20 @@ import ( // ID and Errors (if present) will always be filled. // [Load] may return more information than requested. // +// The Mode flag is a union of several bits named NeedName, +// NeedFiles, and so on, each of which determines whether +// a given field of Package (Name, Files, etc) should be +// populated. +// +// For convenience, we provide named constants for the most +// common combinations of Need flags: +// +// [LoadFiles] lists of files in each package +// [LoadImports] ... plus imports +// [LoadTypes] ... plus type information +// [LoadSyntax] ... plus type-annotated syntax +// [LoadAllSyntax] ... for all dependencies +// // Unfortunately there are a number of open bugs related to // interactions among the LoadMode bits: // - https://github.com/golang/go/issues/56633 @@ -55,7 +69,7 @@ const ( // NeedName adds Name and PkgPath. NeedName LoadMode = 1 << iota - // NeedFiles adds GoFiles, OtherFiles, and IgnoredFiles + // NeedFiles adds Dir, GoFiles, OtherFiles, and IgnoredFiles NeedFiles // NeedCompiledGoFiles adds CompiledGoFiles. @@ -86,9 +100,10 @@ const ( // needInternalDepsErrors adds the internal deps errors field for use by gopls. needInternalDepsErrors - // needInternalForTest adds the internal forTest field. + // NeedForTest adds ForTest. + // // Tests must also be set on the context for this field to be populated. - needInternalForTest + NeedForTest // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. // Modifies CompiledGoFiles and Types, and has no effect on its own. @@ -108,33 +123,18 @@ const ( const ( // LoadFiles loads the name and file names for the initial packages. - // - // Deprecated: LoadFiles exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles // LoadImports loads the name, file names, and import mapping for the initial packages. - // - // Deprecated: LoadImports exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. LoadImports = LoadFiles | NeedImports // LoadTypes loads exported type information for the initial packages. - // - // Deprecated: LoadTypes exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. LoadTypes = LoadImports | NeedTypes | NeedTypesSizes // LoadSyntax loads typed syntax for the initial packages. - // - // Deprecated: LoadSyntax exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo // LoadAllSyntax loads typed syntax for the initial packages and all dependencies. - // - // Deprecated: LoadAllSyntax exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. LoadAllSyntax = LoadSyntax | NeedDeps // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. @@ -434,6 +434,12 @@ type Package struct { // PkgPath is the package path as used by the go/types package. PkgPath string + // Dir is the directory associated with the package, if it exists. + // + // For packages listed by the go command, this is the directory containing + // the package files. + Dir string + // Errors contains any errors encountered querying the metadata // of the package, or while parsing or type-checking its files. Errors []Error @@ -521,8 +527,8 @@ type Package struct { // -- internal -- - // forTest is the package under test, if any. - forTest string + // ForTest is the package under test, if any. + ForTest string // depsErrors is the DepsErrors field from the go list response, if any. depsErrors []*packagesinternal.PackageError @@ -551,9 +557,6 @@ type ModuleError struct { } func init() { - packagesinternal.GetForTest = func(p interface{}) string { - return p.(*Package).forTest - } packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { return p.(*Package).depsErrors } @@ -565,7 +568,6 @@ func init() { } packagesinternal.TypecheckCgo = int(typecheckCgo) packagesinternal.DepsErrors = int(needInternalDepsErrors) - packagesinternal.ForTest = int(needInternalForTest) } // An Error describes a problem with a package's metadata, syntax, or types. diff --git a/src/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/src/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go index f6437feb1..6f5d8a213 100644 --- a/src/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go +++ b/src/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go @@ -39,12 +39,15 @@ func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) { } // FindExportData positions the reader r at the beginning of the -// export data section of an underlying GC-created object/archive +// export data section of an underlying cmd/compile created archive // file by reading from it. The reader must be positioned at the -// start of the file before calling this function. The hdr result -// is the string before the export data, either "$$" or "$$B". -// The size result is the length of the export data in bytes, or -1 if not known. -func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { +// start of the file before calling this function. +// The size result is the length of the export data in bytes. +// +// This function is needed by [gcexportdata.Read], which must +// accept inputs produced by the last two releases of cmd/compile, +// plus tip. +func FindExportData(r *bufio.Reader) (size int64, err error) { // Read first line to make sure this is an object file. line, err := r.ReadSlice('\n') if err != nil { @@ -52,27 +55,32 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { return } - if string(line) == "!\n" { - // Archive file. Scan to __.PKGDEF. - var name string - if name, size, err = readGopackHeader(r); err != nil { - return - } + // Is the first line an archive file signature? + if string(line) != "!\n" { + err = fmt.Errorf("not the start of an archive file (%q)", line) + return + } - // First entry should be __.PKGDEF. - if name != "__.PKGDEF" { - err = fmt.Errorf("go archive is missing __.PKGDEF") - return - } + // Archive file. Scan to __.PKGDEF. + var name string + if name, size, err = readGopackHeader(r); err != nil { + return + } + arsize := size - // Read first line of __.PKGDEF data, so that line - // is once again the first line of the input. - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - size -= int64(len(line)) + // First entry should be __.PKGDEF. + if name != "__.PKGDEF" { + err = fmt.Errorf("go archive is missing __.PKGDEF") + return + } + + // Read first line of __.PKGDEF data, so that line + // is once again the first line of the input. + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return } + size -= int64(len(line)) // Now at __.PKGDEF in archive or still at beginning of file. // Either way, line should begin with "go object ". @@ -81,8 +89,8 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { return } - // Skip over object header to export data. - // Begins after first line starting with $$. + // Skip over object headers to get to the export data section header "$$B\n". + // Object headers are lines that do not start with '$'. for line[0] != '$' { if line, err = r.ReadSlice('\n'); err != nil { err = fmt.Errorf("can't find export data (%v)", err) @@ -90,9 +98,18 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { } size -= int64(len(line)) } - hdr = string(line) + + // Check for the binary export data section header "$$B\n". + hdr := string(line) + if hdr != "$$B\n" { + err = fmt.Errorf("unknown export data header: %q", hdr) + return + } + // TODO(taking): Remove end-of-section marker "\n$$\n" from size. + if size < 0 { - size = -1 + err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size) + return } return diff --git a/src/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/src/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index e6c5d51f8..dbbca8604 100644 --- a/src/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/src/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -161,6 +161,8 @@ func FindPkg(path, srcDir string) (filename, id string) { // Import imports a gc-generated package given its import path and srcDir, adds // the corresponding package object to the packages map, and returns the object. // The packages map must contain all packages already imported. +// +// TODO(taking): Import is only used in tests. Move to gcimporter_test. func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { var rc io.ReadCloser var filename, id string @@ -210,58 +212,50 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func } defer rc.Close() - var hdr string var size int64 buf := bufio.NewReader(rc) - if hdr, size, err = FindExportData(buf); err != nil { + if size, err = FindExportData(buf); err != nil { return } - switch hdr { - case "$$B\n": - var data []byte - data, err = io.ReadAll(buf) - if err != nil { - break - } + var data []byte + data, err = io.ReadAll(buf) + if err != nil { + return + } + if len(data) == 0 { + return nil, fmt.Errorf("no data to load a package from for path %s", id) + } - // TODO(gri): allow clients of go/importer to provide a FileSet. - // Or, define a new standard go/types/gcexportdata package. - fset := token.NewFileSet() - - // Select appropriate importer. - if len(data) > 0 { - switch data[0] { - case 'v', 'c', 'd': - // binary: emitted by cmd/compile till go1.10; obsolete. - return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - - case 'i': - // indexed: emitted by cmd/compile till go1.19; - // now used only for serializing go/types. - // See https://github.com/golang/go/issues/69491. - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err - - case 'u': - // unified: emitted by cmd/compile since go1.20. - _, pkg, err := UImportData(fset, packages, data[1:size], id) - return pkg, err - - default: - l := len(data) - if l > 10 { - l = 10 - } - return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) - } - } + // TODO(gri): allow clients of go/importer to provide a FileSet. + // Or, define a new standard go/types/gcexportdata package. + fset := token.NewFileSet() + + // Select appropriate importer. + switch data[0] { + case 'v', 'c', 'd': + // binary: emitted by cmd/compile till go1.10; obsolete. + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) + + case 'i': + // indexed: emitted by cmd/compile till go1.19; + // now used only for serializing go/types. + // See https://github.com/golang/go/issues/69491. + _, pkg, err := IImportData(fset, packages, data[1:], id) + return pkg, err + + case 'u': + // unified: emitted by cmd/compile since go1.20. + _, pkg, err := UImportData(fset, packages, data[1:size], id) + return pkg, err default: - err = fmt.Errorf("unknown export data header: %q", hdr) + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) } - - return } type byPath []*types.Package diff --git a/src/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/src/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index 44719de17..66e69b438 100644 --- a/src/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/src/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -5,7 +5,6 @@ // Package packagesinternal exposes internal-only fields from go/packages. package packagesinternal -var GetForTest = func(p interface{}) string { return "" } var GetDepsErrors = func(p interface{}) []*PackageError { return nil } type PackageError struct { @@ -16,7 +15,6 @@ type PackageError struct { var TypecheckCgo int var DepsErrors int // must be set as a LoadMode to call GetDepsErrors -var ForTest int // must be set as a LoadMode to call GetForTest var SetModFlag = func(config interface{}, value string) {} var SetModFile = func(config interface{}, value string) {} diff --git a/src/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/src/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go new file mode 100644 index 000000000..106698064 --- /dev/null +++ b/src/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go @@ -0,0 +1,282 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "strconv" + "strings" +) + +// ZeroString returns the string representation of the "zero" value of the type t. +// This string can be used on the right-hand side of an assignment where the +// left-hand side has that explicit type. +// Exception: This does not apply to tuples. Their string representation is +// informational only and cannot be used in an assignment. +// When assigning to a wider type (such as 'any'), it's the caller's +// responsibility to handle any necessary type conversions. +// See [ZeroExpr] for a variant that returns an [ast.Expr]. +func ZeroString(t types.Type, qf types.Qualifier) string { + switch t := t.(type) { + case *types.Basic: + switch { + case t.Info()&types.IsBoolean != 0: + return "false" + case t.Info()&types.IsNumeric != 0: + return "0" + case t.Info()&types.IsString != 0: + return `""` + case t.Kind() == types.UnsafePointer: + fallthrough + case t.Kind() == types.UntypedNil: + return "nil" + default: + panic(fmt.Sprint("ZeroString for unexpected type:", t)) + } + + case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature: + return "nil" + + case *types.Named, *types.Alias: + switch under := t.Underlying().(type) { + case *types.Struct, *types.Array: + return types.TypeString(t, qf) + "{}" + default: + return ZeroString(under, qf) + } + + case *types.Array, *types.Struct: + return types.TypeString(t, qf) + "{}" + + case *types.TypeParam: + // Assumes func new is not shadowed. + return "*new(" + types.TypeString(t, qf) + ")" + + case *types.Tuple: + // Tuples are not normal values. + // We are currently format as "(t[0], ..., t[n])". Could be something else. + components := make([]string, t.Len()) + for i := 0; i < t.Len(); i++ { + components[i] = ZeroString(t.At(i).Type(), qf) + } + return "(" + strings.Join(components, ", ") + ")" + + case *types.Union: + // Variables of these types cannot be created, so it makes + // no sense to ask for their zero value. + panic(fmt.Sprintf("invalid type for a variable: %v", t)) + + default: + panic(t) // unreachable. + } +} + +// ZeroExpr returns the ast.Expr representation of the "zero" value of the type t. +// ZeroExpr is defined for types that are suitable for variables. +// It may panic for other types such as Tuple or Union. +// See [ZeroString] for a variant that returns a string. +func ZeroExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { + switch t := typ.(type) { + case *types.Basic: + switch { + case t.Info()&types.IsBoolean != 0: + return &ast.Ident{Name: "false"} + case t.Info()&types.IsNumeric != 0: + return &ast.BasicLit{Kind: token.INT, Value: "0"} + case t.Info()&types.IsString != 0: + return &ast.BasicLit{Kind: token.STRING, Value: `""`} + case t.Kind() == types.UnsafePointer: + fallthrough + case t.Kind() == types.UntypedNil: + return ast.NewIdent("nil") + default: + panic(fmt.Sprint("ZeroExpr for unexpected type:", t)) + } + + case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature: + return ast.NewIdent("nil") + + case *types.Named, *types.Alias: + switch under := t.Underlying().(type) { + case *types.Struct, *types.Array: + return &ast.CompositeLit{ + Type: TypeExpr(f, pkg, typ), + } + default: + return ZeroExpr(f, pkg, under) + } + + case *types.Array, *types.Struct: + return &ast.CompositeLit{ + Type: TypeExpr(f, pkg, typ), + } + + case *types.TypeParam: + return &ast.StarExpr{ // *new(T) + X: &ast.CallExpr{ + // Assumes func new is not shadowed. + Fun: ast.NewIdent("new"), + Args: []ast.Expr{ + ast.NewIdent(t.Obj().Name()), + }, + }, + } + + case *types.Tuple: + // Unlike ZeroString, there is no ast.Expr can express tuple by + // "(t[0], ..., t[n])". + panic(fmt.Sprintf("invalid type for a variable: %v", t)) + + case *types.Union: + // Variables of these types cannot be created, so it makes + // no sense to ask for their zero value. + panic(fmt.Sprintf("invalid type for a variable: %v", t)) + + default: + panic(t) // unreachable. + } +} + +// IsZeroExpr uses simple syntactic heuristics to report whether expr +// is a obvious zero value, such as 0, "", nil, or false. +// It cannot do better without type information. +func IsZeroExpr(expr ast.Expr) bool { + switch e := expr.(type) { + case *ast.BasicLit: + return e.Value == "0" || e.Value == `""` + case *ast.Ident: + return e.Name == "nil" || e.Name == "false" + default: + return false + } +} + +// TypeExpr returns syntax for the specified type. References to named types +// from packages other than pkg are qualified by an appropriate package name, as +// defined by the import environment of file. +// It may panic for types such as Tuple or Union. +func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { + switch t := typ.(type) { + case *types.Basic: + switch t.Kind() { + case types.UnsafePointer: + // TODO(hxjiang): replace the implementation with types.Qualifier. + return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")} + default: + return ast.NewIdent(t.Name()) + } + + case *types.Pointer: + return &ast.UnaryExpr{ + Op: token.MUL, + X: TypeExpr(f, pkg, t.Elem()), + } + + case *types.Array: + return &ast.ArrayType{ + Len: &ast.BasicLit{ + Kind: token.INT, + Value: fmt.Sprintf("%d", t.Len()), + }, + Elt: TypeExpr(f, pkg, t.Elem()), + } + + case *types.Slice: + return &ast.ArrayType{ + Elt: TypeExpr(f, pkg, t.Elem()), + } + + case *types.Map: + return &ast.MapType{ + Key: TypeExpr(f, pkg, t.Key()), + Value: TypeExpr(f, pkg, t.Elem()), + } + + case *types.Chan: + dir := ast.ChanDir(t.Dir()) + if t.Dir() == types.SendRecv { + dir = ast.SEND | ast.RECV + } + return &ast.ChanType{ + Dir: dir, + Value: TypeExpr(f, pkg, t.Elem()), + } + + case *types.Signature: + var params []*ast.Field + for i := 0; i < t.Params().Len(); i++ { + params = append(params, &ast.Field{ + Type: TypeExpr(f, pkg, t.Params().At(i).Type()), + Names: []*ast.Ident{ + { + Name: t.Params().At(i).Name(), + }, + }, + }) + } + if t.Variadic() { + last := params[len(params)-1] + last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt} + } + var returns []*ast.Field + for i := 0; i < t.Results().Len(); i++ { + returns = append(returns, &ast.Field{ + Type: TypeExpr(f, pkg, t.Results().At(i).Type()), + }) + } + return &ast.FuncType{ + Params: &ast.FieldList{ + List: params, + }, + Results: &ast.FieldList{ + List: returns, + }, + } + + case interface{ Obj() *types.TypeName }: // *types.{Alias,Named,TypeParam} + switch t.Obj().Pkg() { + case pkg, nil: + return ast.NewIdent(t.Obj().Name()) + } + pkgName := t.Obj().Pkg().Name() + + // TODO(hxjiang): replace the implementation with types.Qualifier. + // If the file already imports the package under another name, use that. + for _, cand := range f.Imports { + if path, _ := strconv.Unquote(cand.Path.Value); path == t.Obj().Pkg().Path() { + if cand.Name != nil && cand.Name.Name != "" { + pkgName = cand.Name.Name + } + } + } + if pkgName == "." { + return ast.NewIdent(t.Obj().Name()) + } + return &ast.SelectorExpr{ + X: ast.NewIdent(pkgName), + Sel: ast.NewIdent(t.Obj().Name()), + } + + case *types.Struct: + return ast.NewIdent(t.String()) + + case *types.Interface: + return ast.NewIdent(t.String()) + + case *types.Union: + // TODO(hxjiang): handle the union through syntax (~A | ... | ~Z). + // Remove nil check when calling typesinternal.TypeExpr. + return nil + + case *types.Tuple: + panic("invalid input type types.Tuple") + + default: + panic("unreachable") + } +} diff --git a/src/vendor/golang.org/x/tools/internal/versions/constraint.go b/src/vendor/golang.org/x/tools/internal/versions/constraint.go deleted file mode 100644 index 179063d48..000000000 --- a/src/vendor/golang.org/x/tools/internal/versions/constraint.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package versions - -import "go/build/constraint" - -// ConstraintGoVersion is constraint.GoVersion (if built with go1.21+). -// Otherwise nil. -// -// Deprecate once x/tools is after go1.21. -var ConstraintGoVersion func(x constraint.Expr) string diff --git a/src/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/src/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index e7d3805e3..f388426b0 100644 --- a/src/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/src/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -159,14 +159,14 @@ var file_google_api_httpbody_proto_rawDesc = []byte{ 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x68, 0x0a, 0x0e, 0x63, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x65, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, - 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xa2, 0x02, 0x04, 0x47, 0x41, + 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/src/vendor/google.golang.org/grpc/CONTRIBUTING.md b/src/vendor/google.golang.org/grpc/CONTRIBUTING.md index 0854d298e..d9bfa6e1e 100644 --- a/src/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/src/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -4,7 +4,7 @@ We definitely welcome your patches and contributions to gRPC! Please read the gR organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. -If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) +If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) ## Legal requirements @@ -25,8 +25,8 @@ How to get your contributions merged smoothly and quickly. is a great place to start. These issues are well-documented and usually can be resolved with a single pull request. -- If you are adding a new file, make sure it has the copyright message template - at the top as a comment. You can copy over the message from an existing file +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file and update the year. - The grpc package should only depend on standard Go packages and a small number @@ -39,12 +39,12 @@ How to get your contributions merged smoothly and quickly. proposal](https://github.com/grpc/proposal). - Provide a good **PR description** as a record of **what** change is being made - and **why** it was made. Link to a github issue if it exists. + and **why** it was made. Link to a GitHub issue if it exists. -- If you want to fix formatting or style, consider whether your changes are an - obvious improvement or might be considered a personal preference. If a style - change is based on preference, it likely will not be accepted. If it corrects - widely agreed-upon anti-patterns, then please do create a PR and explain the +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the benefits of the change. - Unless your PR is trivial, you should expect there will be reviewer comments diff --git a/src/vendor/google.golang.org/grpc/balancer/balancer.go b/src/vendor/google.golang.org/grpc/balancer/balancer.go index b181f386a..382ad6941 100644 --- a/src/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/src/vendor/google.golang.org/grpc/balancer/balancer.go @@ -73,17 +73,6 @@ func unregisterForTesting(name string) { delete(m, name) } -// connectedAddress returns the connected address for a SubConnState. The -// address is only valid if the state is READY. -func connectedAddress(scs SubConnState) resolver.Address { - return scs.connectedAddress -} - -// setConnectedAddress sets the connected address for a SubConnState. -func setConnectedAddress(scs *SubConnState, addr resolver.Address) { - scs.connectedAddress = addr -} - func init() { internal.BalancerUnregister = unregisterForTesting internal.ConnectedAddress = connectedAddress @@ -106,54 +95,6 @@ func Get(name string) Builder { return nil } -// A SubConn represents a single connection to a gRPC backend service. -// -// Each SubConn contains a list of addresses. -// -// All SubConns start in IDLE, and will not try to connect. To trigger the -// connecting, Balancers must call Connect. If a connection re-enters IDLE, -// Balancers must call Connect again to trigger a new connection attempt. -// -// gRPC will try to connect to the addresses in sequence, and stop trying the -// remainder once the first connection is successful. If an attempt to connect -// to all addresses encounters an error, the SubConn will enter -// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. -// -// Once established, if a connection is lost, the SubConn will transition -// directly to IDLE. -// -// This interface is to be implemented by gRPC. Users should not need their own -// implementation of this interface. For situations like testing, any -// implementations should embed this interface. This allows gRPC to add new -// methods to this interface. -type SubConn interface { - // UpdateAddresses updates the addresses used in this SubConn. - // gRPC checks if currently-connected address is still in the new list. - // If it's in the list, the connection will be kept. - // If it's not in the list, the connection will gracefully closed, and - // a new connection will be created. - // - // This will trigger a state transition for the SubConn. - // - // Deprecated: this method will be removed. Create new SubConns for new - // addresses instead. - UpdateAddresses([]resolver.Address) - // Connect starts the connecting for this SubConn. - Connect() - // GetOrBuildProducer returns a reference to the existing Producer for this - // ProducerBuilder in this SubConn, or, if one does not currently exist, - // creates a new one and returns it. Returns a close function which must - // be called when the Producer is no longer needed. - GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) - // Shutdown shuts down the SubConn gracefully. Any started RPCs will be - // allowed to complete. No future calls should be made on the SubConn. - // One final state update will be delivered to the StateListener (or - // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to - // indicate the shutdown operation. This may be delivered before - // in-progress RPCs are complete and the actual connection is closed. - Shutdown() -} - // NewSubConnOptions contains options to create new SubConn. type NewSubConnOptions struct { // CredsBundle is the credentials bundle that will be used in the created @@ -421,18 +362,6 @@ type ExitIdler interface { ExitIdle() } -// SubConnState describes the state of a SubConn. -type SubConnState struct { - // ConnectivityState is the connectivity state of the SubConn. - ConnectivityState connectivity.State - // ConnectionError is set if the ConnectivityState is TransientFailure, - // describing the reason the SubConn failed. Otherwise, it is nil. - ConnectionError error - // connectedAddr contains the connected address when ConnectivityState is - // Ready. Otherwise, it is indeterminate. - connectedAddress resolver.Address -} - // ClientConnState describes the state of a ClientConn relevant to the // balancer. type ClientConnState struct { @@ -445,20 +374,3 @@ type ClientConnState struct { // ErrBadResolverState may be returned by UpdateClientConnState to indicate a // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") - -// A ProducerBuilder is a simple constructor for a Producer. It is used by the -// SubConn to create producers when needed. -type ProducerBuilder interface { - // Build creates a Producer. The first parameter is always a - // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as `any` to avoid a dependency - // cycle. Should also return a close function that will be called when all - // references to the Producer have been given up. - Build(grpcClientConnInterface any) (p Producer, close func()) -} - -// A Producer is a type shared among potentially many consumers. It is -// associated with a SubConn, and an implementation will typically contain -// other methods to provide additional functionality, e.g. configuration or -// subscription registration. -type Producer any diff --git a/src/vendor/google.golang.org/grpc/balancer/base/balancer.go b/src/vendor/google.golang.org/grpc/balancer/base/balancer.go index 2b87bd79c..d5ed172ae 100644 --- a/src/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/src/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -133,7 +133,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { } } // If resolver state contains no addresses, return an error so ClientConn - // will trigger re-resolve. Also records this as an resolver error, so when + // will trigger re-resolve. Also records this as a resolver error, so when // the overall state turns transient failure, the error message will have // the zero address information. if len(s.ResolverState.Addresses) == 0 { diff --git a/src/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/src/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go new file mode 100644 index 000000000..7d66cb491 --- /dev/null +++ b/src/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go @@ -0,0 +1,35 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains code internal to the pickfirst package. +package internal + +import ( + rand "math/rand/v2" + "time" +) + +var ( + // RandShuffle pseudo-randomizes the order of addresses. + RandShuffle = rand.Shuffle + // TimeAfterFunc allows mocking the timer for testing connection delay + // related functionality. + TimeAfterFunc = func(d time.Duration, f func()) func() { + timer := time.AfterFunc(d, f) + return func() { timer.Stop() } + } +) diff --git a/src/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/src/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 4d69b4052..ea8899818 100644 --- a/src/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/src/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -23,21 +23,26 @@ import ( "encoding/json" "errors" "fmt" - "math/rand" + rand "math/rand/v2" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + + _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required. ) func init() { + if envconfig.NewPickFirstEnabled { + return + } balancer.Register(pickfirstBuilder{}) - internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } } var logger = grpclog.Component("pick-first-lb") @@ -103,10 +108,13 @@ func (b *pickfirstBalancer) ResolverError(err error) { }) } +// Shuffler is an interface for shuffling an address list. type Shuffler interface { ShuffleAddressListForTesting(n int, swap func(i, j int)) } +// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n +// is the number of elements. swap swaps the elements with indexes i and j. func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { @@ -140,7 +148,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // within each endpoint. - A61 if cfg.ShuffleAddressList { endpoints = append([]resolver.Endpoint{}, endpoints...) - internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) } // "Flatten the list by concatenating the ordered list of addresses for each diff --git a/src/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/src/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go new file mode 100644 index 000000000..2fc0a71f9 --- /dev/null +++ b/src/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -0,0 +1,911 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pickfirstleaf contains the pick_first load balancing policy which +// will be the universal leaf policy after dualstack changes are implemented. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package pickfirstleaf + +import ( + "encoding/json" + "errors" + "fmt" + "net" + "net/netip" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/internal" + "google.golang.org/grpc/connectivity" + expstats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +func init() { + if envconfig.NewPickFirstEnabled { + // Register as the default pick_first balancer. + Name = "pick_first" + } + balancer.Register(pickfirstBuilder{}) +} + +// enableHealthListenerKeyType is a unique key type used in resolver attributes +// to indicate whether the health listener usage is enabled. +type enableHealthListenerKeyType struct{} + +var ( + logger = grpclog.Component("pick-first-leaf-lb") + // Name is the name of the pick_first_leaf balancer. + // It is changed to "pick_first" in init() if this balancer is to be + // registered as the default pickfirst. + Name = "pick_first_leaf" + disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.disconnections", + Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", + Unit: "disconnection", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_succeeded", + Description: "EXPERIMENTAL. Number of successful connection attempts.", + Unit: "attempt", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_failed", + Description: "EXPERIMENTAL. Number of failed connection attempts.", + Unit: "attempt", + Labels: []string{"grpc.target"}, + Default: false, + }) +) + +const ( + // TODO: change to pick-first when this becomes the default pick_first policy. + logPrefix = "[pick-first-leaf-lb %p] " + // connectionDelayInterval is the time to wait for during the happy eyeballs + // pass before starting the next connection attempt. + connectionDelayInterval = 250 * time.Millisecond +) + +type ipAddrFamily int + +const ( + // ipAddrFamilyUnknown represents strings that can't be parsed as an IP + // address. + ipAddrFamilyUnknown ipAddrFamily = iota + ipAddrFamilyV4 + ipAddrFamilyV6 +) + +type pickfirstBuilder struct{} + +func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{ + cc: cc, + target: bo.Target.String(), + metricsRecorder: bo.MetricsRecorder, // ClientConn will always create a Metrics Recorder. + + subConns: resolver.NewAddressMap(), + state: connectivity.Connecting, + cancelConnectionTimer: func() {}, + } + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b +} + +func (b pickfirstBuilder) Name() string { + return Name +} + +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + +// EnableHealthListener updates the state to configure pickfirst for using a +// generic health listener. +func EnableHealthListener(state resolver.State) resolver.State { + state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true) + return state +} + +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of endpoints received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +// scData keeps track of the current state of the subConn. +// It is not safe for concurrent access. +type scData struct { + // The following fields are initialized at build time and read-only after + // that. + subConn balancer.SubConn + addr resolver.Address + + rawConnectivityState connectivity.State + // The effective connectivity state based on raw connectivity, health state + // and after following sticky TransientFailure behaviour defined in A62. + effectiveState connectivity.State + lastErr error + connectionFailedInFirstPass bool +} + +func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { + sd := &scData{ + rawConnectivityState: connectivity.Idle, + effectiveState: connectivity.Idle, + addr: addr, + } + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(sd, state) + }, + }) + if err != nil { + return nil, err + } + sd.subConn = sc + return sd, nil +} + +type pickfirstBalancer struct { + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn + target string + metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil + + // The mutex is used to ensure synchronization of updates triggered + // from the idle picker and the already serialized resolver, + // SubConn state updates. + mu sync.Mutex + // State reported to the channel based on SubConn states and resolver + // updates. + state connectivity.State + // scData for active subonns mapped by address. + subConns *resolver.AddressMap + addressList addressList + firstPass bool + numTF int + cancelConnectionTimer func() + healthCheckingEnabled bool +} + +// ResolverError is called by the ClientConn when the name resolver produces +// an error or when pickfirst determined the resolver update to be invalid. +func (b *pickfirstBalancer) ResolverError(err error) { + b.mu.Lock() + defer b.mu.Unlock() + b.resolverErrorLocked(err) +} + +func (b *pickfirstBalancer) resolverErrorLocked(err error) { + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) + } + + // The picker will not change since the balancer does not currently + // report an error. If the balancer hasn't received a single good resolver + // update yet, transition to TRANSIENT_FAILURE. + if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { + if b.logger.V(2) { + b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") + } + return + } + + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) +} + +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + b.mu.Lock() + defer b.mu.Unlock() + b.cancelConnectionTimer() + if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { + // Cleanup state pertaining to the previous resolver state. + // Treat an empty address list like an error by calling b.ResolverError. + b.closeSubConnsLocked() + b.addressList.updateAddrs(nil) + b.resolverErrorLocked(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) + } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + + var newAddrs []resolver.Address + if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { + // Perform the optional shuffling described in gRFC A62. The shuffling + // will change the order of endpoints but not touch the order of the + // addresses within each endpoint. - A61 + if cfg.ShuffleAddressList { + endpoints = append([]resolver.Endpoint{}, endpoints...) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + + // "Flatten the list by concatenating the ordered list of addresses for + // each of the endpoints, in order." - A61 + for _, endpoint := range endpoints { + newAddrs = append(newAddrs, endpoint.Addresses...) + } + } else { + // Endpoints not set, process addresses until we migrate resolver + // emissions fully to Endpoints. The top channel does wrap emitted + // addresses with endpoints, however some balancers such as weighted + // target do not forward the corresponding correct endpoints down/split + // endpoints properly. Once all balancers correctly forward endpoints + // down, can delete this else conditional. + newAddrs = state.ResolverState.Addresses + if cfg.ShuffleAddressList { + newAddrs = append([]resolver.Address{}, newAddrs...) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + } + + // If an address appears in multiple endpoints or in the same endpoint + // multiple times, we keep it only once. We will create only one SubConn + // for the address because an AddressMap is used to store SubConns. + // Not de-duplicating would result in attempting to connect to the same + // SubConn multiple times in the same pass. We don't want this. + newAddrs = deDupAddresses(newAddrs) + newAddrs = interleaveAddresses(newAddrs) + + prevAddr := b.addressList.currentAddress() + prevSCData, found := b.subConns.Get(prevAddr) + prevAddrsCount := b.addressList.size() + isPrevRawConnectivityStateReady := found && prevSCData.(*scData).rawConnectivityState == connectivity.Ready + b.addressList.updateAddrs(newAddrs) + + // If the previous ready SubConn exists in new address list, + // keep this connection and don't create new SubConns. + if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { + return nil + } + + b.reconcileSubConnsLocked(newAddrs) + // If it's the first resolver update or the balancer was already READY + // (but the new address list does not contain the ready SubConn) or + // CONNECTING, enter CONNECTING. + // We may be in TRANSIENT_FAILURE due to a previous empty address list, + // we should still enter CONNECTING because the sticky TF behaviour + // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported + // due to connectivity failures. + if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { + // Start connection attempt at first address. + b.forceUpdateConcludedStateLocked(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.startFirstPassLocked() + } else if b.state == connectivity.TransientFailure { + // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until + // we're READY. See A62. + b.startFirstPassLocked() + } + return nil +} + +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) Close() { + b.mu.Lock() + defer b.mu.Unlock() + b.closeSubConnsLocked() + b.cancelConnectionTimer() + b.state = connectivity.Shutdown +} + +// ExitIdle moves the balancer out of idle state. It can be called concurrently +// by the idlePicker and clientConn so access to variables should be +// synchronized. +func (b *pickfirstBalancer) ExitIdle() { + b.mu.Lock() + defer b.mu.Unlock() + if b.state == connectivity.Idle { + b.startFirstPassLocked() + } +} + +func (b *pickfirstBalancer) startFirstPassLocked() { + b.firstPass = true + b.numTF = 0 + // Reset the connection attempt record for existing SubConns. + for _, sd := range b.subConns.Values() { + sd.(*scData).connectionFailedInFirstPass = false + } + b.requestConnectionLocked() +} + +func (b *pickfirstBalancer) closeSubConnsLocked() { + for _, sd := range b.subConns.Values() { + sd.(*scData).subConn.Shutdown() + } + b.subConns = resolver.NewAddressMap() +} + +// deDupAddresses ensures that each address appears only once in the slice. +func deDupAddresses(addrs []resolver.Address) []resolver.Address { + seenAddrs := resolver.NewAddressMap() + retAddrs := []resolver.Address{} + + for _, addr := range addrs { + if _, ok := seenAddrs.Get(addr); ok { + continue + } + retAddrs = append(retAddrs, addr) + } + return retAddrs +} + +// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) +// as per RFC-8305 section 4. +// Whichever address family is first in the list is followed by an address of +// the other address family; that is, if the first address in the list is IPv6, +// then the first IPv4 address should be moved up in the list to be second in +// the list. It doesn't support configuring "First Address Family Count", i.e. +// there will always be a single member of the first address family at the +// beginning of the interleaved list. +// Addresses that are neither IPv4 nor IPv6 are treated as part of a third +// "unknown" family for interleaving. +// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 +func interleaveAddresses(addrs []resolver.Address) []resolver.Address { + familyAddrsMap := map[ipAddrFamily][]resolver.Address{} + interleavingOrder := []ipAddrFamily{} + for _, addr := range addrs { + family := addressFamily(addr.Addr) + if _, found := familyAddrsMap[family]; !found { + interleavingOrder = append(interleavingOrder, family) + } + familyAddrsMap[family] = append(familyAddrsMap[family], addr) + } + + interleavedAddrs := make([]resolver.Address, 0, len(addrs)) + + for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { + // Some IP types may have fewer addresses than others, so we look for + // the next type that has a remaining member to add to the interleaved + // list. + family := interleavingOrder[curFamilyIdx] + remainingMembers := familyAddrsMap[family] + if len(remainingMembers) > 0 { + interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) + familyAddrsMap[family] = remainingMembers[1:] + } + } + + return interleavedAddrs +} + +// addressFamily returns the ipAddrFamily after parsing the address string. +// If the address isn't of the format "ip-address:port", it returns +// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when +// using a resolver like passthrough where the address may be a hostname in +// some format that the dialer can resolve. +func addressFamily(address string) ipAddrFamily { + // Parse the IP after removing the port. + host, _, err := net.SplitHostPort(address) + if err != nil { + return ipAddrFamilyUnknown + } + ip, err := netip.ParseAddr(host) + if err != nil { + return ipAddrFamilyUnknown + } + switch { + case ip.Is4() || ip.Is4In6(): + return ipAddrFamilyV4 + case ip.Is6(): + return ipAddrFamilyV6 + default: + return ipAddrFamilyUnknown + } +} + +// reconcileSubConnsLocked updates the active subchannels based on a new address +// list from the resolver. It does this by: +// - closing subchannels: any existing subchannels associated with addresses +// that are no longer in the updated list are shut down. +// - removing subchannels: entries for these closed subchannels are removed +// from the subchannel map. +// +// This ensures that the subchannel map accurately reflects the current set of +// addresses received from the name resolver. +func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { + newAddrsMap := resolver.NewAddressMap() + for _, addr := range newAddrs { + newAddrsMap.Set(addr, true) + } + + for _, oldAddr := range b.subConns.Keys() { + if _, ok := newAddrsMap.Get(oldAddr); ok { + continue + } + val, _ := b.subConns.Get(oldAddr) + val.(*scData).subConn.Shutdown() + b.subConns.Delete(oldAddr) + } +} + +// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn +// becomes ready, which means that all other subConn must be shutdown. +func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { + b.cancelConnectionTimer() + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if sd.subConn != selected.subConn { + sd.subConn.Shutdown() + } + } + b.subConns = resolver.NewAddressMap() + b.subConns.Set(selected.addr, selected) +} + +// requestConnectionLocked starts connecting on the subchannel corresponding to +// the current address. If no subchannel exists, one is created. If the current +// subchannel is in TransientFailure, a connection to the next address is +// attempted until a subchannel is found. +func (b *pickfirstBalancer) requestConnectionLocked() { + if !b.addressList.isValid() { + return + } + var lastErr error + for valid := true; valid; valid = b.addressList.increment() { + curAddr := b.addressList.currentAddress() + sd, ok := b.subConns.Get(curAddr) + if !ok { + var err error + // We want to assign the new scData to sd from the outer scope, + // hence we can't use := below. + sd, err = b.newSCData(curAddr) + if err != nil { + // This should never happen, unless the clientConn is being shut + // down. + if b.logger.V(2) { + b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) + } + // Do nothing, the LB policy will be closed soon. + return + } + b.subConns.Set(curAddr, sd) + } + + scd := sd.(*scData) + switch scd.rawConnectivityState { + case connectivity.Idle: + scd.subConn.Connect() + b.scheduleNextConnectionLocked() + return + case connectivity.TransientFailure: + // The SubConn is being re-used and failed during a previous pass + // over the addressList. It has not completed backoff yet. + // Mark it as having failed and try the next address. + scd.connectionFailedInFirstPass = true + lastErr = scd.lastErr + continue + case connectivity.Connecting: + // Wait for the connection attempt to complete or the timer to fire + // before attempting the next address. + b.scheduleNextConnectionLocked() + return + default: + b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", scd.rawConnectivityState) + return + + } + } + + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the + // first pass if possible. + b.endFirstPassIfPossibleLocked(lastErr) +} + +func (b *pickfirstBalancer) scheduleNextConnectionLocked() { + b.cancelConnectionTimer() + if !b.addressList.hasNext() { + return + } + curAddr := b.addressList.currentAddress() + cancelled := false // Access to this is protected by the balancer's mutex. + closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { + b.mu.Lock() + defer b.mu.Unlock() + // If the scheduled task is cancelled while acquiring the mutex, return. + if cancelled { + return + } + if b.logger.V(2) { + b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) + } + if b.addressList.increment() { + b.requestConnectionLocked() + } + }) + // Access to the cancellation callback held by the balancer is guarded by + // the balancer's mutex, so it's safe to set the boolean from the callback. + b.cancelConnectionTimer = sync.OnceFunc(func() { + cancelled = true + closeFn() + }) +} + +func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + oldState := sd.rawConnectivityState + sd.rawConnectivityState = newState.ConnectivityState + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes this + // SubConn. + if !b.isActiveSCData(sd) { + return + } + if newState.ConnectivityState == connectivity.Shutdown { + sd.effectiveState = connectivity.Shutdown + return + } + + // Record a connection attempt when exiting CONNECTING. + if newState.ConnectivityState == connectivity.TransientFailure { + sd.connectionFailedInFirstPass = true + connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) + } + + if newState.ConnectivityState == connectivity.Ready { + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + b.shutdownRemainingLocked(sd) + if !b.addressList.seekTo(sd.addr) { + // This should not fail as we should have only one SubConn after + // entering READY. The SubConn should be present in the addressList. + b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) + return + } + if !b.healthCheckingEnabled { + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) + } + + sd.effectiveState = connectivity.Ready + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + return + } + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn) + } + // Send a CONNECTING update to take the SubConn out of sticky-TF if + // required. + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { + b.updateSubConnHealthState(sd, scs) + }) + return + } + + // If the LB policy is READY, and it receives a subchannel state change, + // it means that the READY subchannel has failed. + // A SubConn can also transition from CONNECTING directly to IDLE when + // a transport is successfully created, but the connection fails + // before the SubConn can send the notification for READY. We treat + // this as a successful connection and transition to IDLE. + // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second + // part of the if condition below once the issue is fixed. + if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + // Once a transport fails, the balancer enters IDLE and starts from + // the first address when the picker is used. + b.shutdownRemainingLocked(sd) + sd.effectiveState = newState.ConnectivityState + // READY SubConn interspliced in between CONNECTING and IDLE, need to + // account for that. + if oldState == connectivity.Connecting { + // A known issue (https://github.com/grpc/grpc-go/issues/7862) + // causes a race that prevents the READY state change notification. + // This works around it. + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + } + disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) + b.addressList.reset() + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, + }) + return + } + + if b.firstPass { + switch newState.ConnectivityState { + case connectivity.Connecting: + // The effective state can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in + // TRANSIENT_FAILURE until it's READY. See A62. + if sd.effectiveState != connectivity.TransientFailure { + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + } + case connectivity.TransientFailure: + sd.lastErr = newState.ConnectionError + sd.effectiveState = connectivity.TransientFailure + // Since we're re-using common SubConns while handling resolver + // updates, we could receive an out of turn TRANSIENT_FAILURE from + // a pass over the previous address list. Happy Eyeballs will also + // cause out of order updates to arrive. + + if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + b.cancelConnectionTimer() + if b.addressList.increment() { + b.requestConnectionLocked() + return + } + } + + // End the first pass if we've seen a TRANSIENT_FAILURE from all + // SubConns once. + b.endFirstPassIfPossibleLocked(newState.ConnectionError) + } + return + } + + // We have finished the first pass, keep re-connecting failing SubConns. + switch newState.ConnectivityState { + case connectivity.TransientFailure: + b.numTF = (b.numTF + 1) % b.subConns.Len() + sd.lastErr = newState.ConnectionError + if b.numTF%b.subConns.Len() == 0 { + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: newState.ConnectionError}, + }) + } + // We don't need to request re-resolution since the SubConn already + // does that before reporting TRANSIENT_FAILURE. + // TODO: #7534 - Move re-resolution requests from SubConn into + // pick_first. + case connectivity.Idle: + sd.subConn.Connect() + } +} + +// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the +// addresses are tried and their SubConns have reported a failure. +func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { + // An optimization to avoid iterating over the entire SubConn map. + if b.addressList.isValid() { + return + } + // Connect() has been called on all the SubConns. The first pass can be + // ended if all the SubConns have reported a failure. + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if !sd.connectionFailedInFirstPass { + return + } + } + b.firstPass = false + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: lastErr}, + }) + // Start re-connecting all the SubConns that are already in IDLE. + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if sd.rawConnectivityState == connectivity.Idle { + sd.subConn.Connect() + } + } +} + +func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { + activeSD, found := b.subConns.Get(sd.addr) + return found && activeSD == sd +} + +func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes + // this SubConn. + if !b.isActiveSCData(sd) { + return + } + sd.effectiveState = state.ConnectivityState + switch state.ConnectivityState { + case connectivity.Ready: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + case connectivity.TransientFailure: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)}, + }) + case connectivity.Connecting: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + default: + b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state) + } +} + +// updateBalancerState stores the state reported to the channel and calls +// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate +// updates to the channel. +func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) { + // In case of TransientFailures allow the picker to be updated to update + // the connectivity error, in all other cases don't send duplicate state + // updates. + if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure { + return + } + b.forceUpdateConcludedStateLocked(newState) +} + +// forceUpdateConcludedStateLocked stores the state reported to the channel and +// calls ClientConn.UpdateState(). +// A separate function is defined to force update the ClientConn state since the +// channel doesn't correctly assume that LB policies start in CONNECTING and +// relies on LB policy to send an initial CONNECTING update. +func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) { + b.state = newState.ConnectivityState + b.cc.UpdateState(newState) +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + exitIdle func() +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.exitIdle() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + +// addressList manages sequentially iterating over addresses present in a list +// of endpoints. It provides a 1 dimensional view of the addresses present in +// the endpoints. +// This type is not safe for concurrent access. +type addressList struct { + addresses []resolver.Address + idx int +} + +func (al *addressList) isValid() bool { + return al.idx < len(al.addresses) +} + +func (al *addressList) size() int { + return len(al.addresses) +} + +// increment moves to the next index in the address list. +// This method returns false if it went off the list, true otherwise. +func (al *addressList) increment() bool { + if !al.isValid() { + return false + } + al.idx++ + return al.idx < len(al.addresses) +} + +// currentAddress returns the current address pointed to in the addressList. +// If the list is in an invalid state, it returns an empty address instead. +func (al *addressList) currentAddress() resolver.Address { + if !al.isValid() { + return resolver.Address{} + } + return al.addresses[al.idx] +} + +func (al *addressList) reset() { + al.idx = 0 +} + +func (al *addressList) updateAddrs(addrs []resolver.Address) { + al.addresses = addrs + al.reset() +} + +// seekTo returns false if the needle was not found and the current index was +// left unchanged. +func (al *addressList) seekTo(needle resolver.Address) bool { + for ai, addr := range al.addresses { + if !equalAddressIgnoringBalAttributes(&addr, &needle) { + continue + } + al.idx = ai + return true + } + return false +} + +// hasNext returns whether incrementing the addressList will result in moving +// past the end of the list. If the list has already moved past the end, it +// returns false. +func (al *addressList) hasNext() bool { + if !al.isValid() { + return false + } + return al.idx+1 < len(al.addresses) +} + +// equalAddressIgnoringBalAttributes returns true is a and b are considered +// equal. This is different from the Equal method on the resolver.Address type +// which considers all fields to determine equality. Here, we only consider +// fields that are meaningful to the SubConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} diff --git a/src/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/src/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 260255d31..80a42d225 100644 --- a/src/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/src/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,7 +22,7 @@ package roundrobin import ( - "math/rand" + rand "math/rand/v2" "sync/atomic" "google.golang.org/grpc/balancer" @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: uint32(rand.Intn(len(scs))), + next: uint32(rand.IntN(len(scs))), } } diff --git a/src/vendor/google.golang.org/grpc/balancer/subconn.go b/src/vendor/google.golang.org/grpc/balancer/subconn.go new file mode 100644 index 000000000..ea27c4fa7 --- /dev/null +++ b/src/vendor/google.golang.org/grpc/balancer/subconn.go @@ -0,0 +1,134 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import ( + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/resolver" +) + +// A SubConn represents a single connection to a gRPC backend service. +// +// All SubConns start in IDLE, and will not try to connect. To trigger a +// connection attempt, Balancers must call Connect. +// +// If the connection attempt fails, the SubConn will transition to +// TRANSIENT_FAILURE for a backoff period, and then return to IDLE. If the +// connection attempt succeeds, it will transition to READY. +// +// If a READY SubConn becomes disconnected, the SubConn will transition to IDLE. +// +// If a connection re-enters IDLE, Balancers must call Connect again to trigger +// a new connection attempt. +// +// Each SubConn contains a list of addresses. gRPC will try to connect to the +// addresses in sequence, and stop trying the remainder once the first +// connection is successful. However, this behavior is deprecated. SubConns +// should only use a single address. +// +// NOTICE: This interface is intended to be implemented by gRPC, or intercepted +// by custom load balancing poilices. Users should not need their own complete +// implementation of this interface -- they should always delegate to a SubConn +// returned by ClientConn.NewSubConn() by embedding it in their implementations. +// An embedded SubConn must never be nil, or runtime panics will occur. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully close, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which may be + // called when the Producer is no longer needed. Otherwise the producer + // will automatically be closed upon connection loss or subchannel close. + // Should only be called on a SubConn in state Ready. Otherwise the + // producer will be unable to create streams. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() + // RegisterHealthListener registers a health listener that receives health + // updates for a Ready SubConn. Only one health listener can be registered + // at a time. A health listener should be registered each time the SubConn's + // connectivity state changes to READY. Registering a health listener when + // the connectivity state is not READY may result in undefined behaviour. + // This method must not be called synchronously while handling an update + // from a previously registered health listener. + RegisterHealthListener(func(SubConnState)) + // EnforceSubConnEmbedding is included to force implementers to embed + // another implementation of this interface, allowing gRPC to add methods + // without breaking users. + internal.EnforceSubConnEmbedding +} + +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Build also returns a close function that will be called when all + // references to the Producer have been given up for a SubConn, or when a + // connectivity state change occurs on the SubConn. The close function + // should always block until all asynchronous cleanup work is completed. + Build(grpcClientConnInterface any) (p Producer, close func()) +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. + ConnectivityState connectivity.State + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address +} + +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer any diff --git a/src/vendor/google.golang.org/grpc/balancer_wrapper.go b/src/vendor/google.golang.org/grpc/balancer_wrapper.go index 8ad6ce2f0..905817b5f 100644 --- a/src/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/src/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -24,12 +24,14 @@ import ( "sync" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" ) var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) @@ -187,6 +189,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer), stateListener: opts.StateListener, + healthData: newHealthData(connectivity.Idle), } ac.acbw = acbw return acbw, nil @@ -252,12 +255,32 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { + internal.EnforceSubConnEmbedding ac *addrConn // read-only ccb *ccBalancerWrapper // read-only stateListener func(balancer.SubConnState) - mu sync.Mutex - producers map[balancer.ProducerBuilder]*refCountedProducer + producersMu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer + + // Access to healthData is protected by healthMu. + healthMu sync.Mutex + // healthData is stored as a pointer to detect when the health listener is + // dropped or updated. This is required as closures can't be compared for + // equality. + healthData *healthData +} + +// healthData holds data related to health state reporting. +type healthData struct { + // connectivityState stores the most recent connectivity state delivered + // to the LB policy. This is stored to avoid sending updates when the + // SubConn has already exited connectivity state READY. + connectivityState connectivity.State +} + +func newHealthData(s connectivity.State) *healthData { + return &healthData{connectivityState: s} } // updateState is invoked by grpc to push a subConn state update to the @@ -267,6 +290,9 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve if ctx.Err() != nil || acbw.ccb.balancer == nil { return } + // Invalidate all producers on any state change. + acbw.closeProducers() + // Even though it is optional for balancers, gracefulswitch ensures // opts.StateListener is set, so this cannot ever be nil. // TODO: delete this comment when UpdateSubConnState is removed. @@ -274,17 +300,25 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve if s == connectivity.Ready { setConnectedAddress(&scs, curAddr) } + // Invalidate the health listener by updating the healthData. + acbw.healthMu.Lock() + // A race may occur if a health listener is registered soon after the + // connectivity state is set but before the stateListener is called. + // Two cases may arise: + // 1. The new state is not READY: RegisterHealthListener has checks to + // ensure no updates are sent when the connectivity state is not + // READY. + // 2. The new state is READY: This means that the old state wasn't Ready. + // The RegisterHealthListener API mentions that a health listener + // must not be registered when a SubConn is not ready to avoid such + // races. When this happens, the LB policy would get health updates + // on the old listener. When the LB policy registers a new listener + // on receiving the connectivity update, the health updates will be + // sent to the new health listener. + acbw.healthData = newHealthData(scs.ConnectivityState) + acbw.healthMu.Unlock() + acbw.stateListener(scs) - acbw.ac.mu.Lock() - defer acbw.ac.mu.Unlock() - if s == connectivity.Ready { - // When changing states to READY, reset stateReadyChan. Wait until - // after we notify the LB policy's listener(s) in order to prevent - // ac.getTransport() from unblocking before the LB policy starts - // tracking the subchannel as READY. - close(acbw.ac.stateReadyChan) - acbw.ac.stateReadyChan = make(chan struct{}) - } }) } @@ -301,6 +335,7 @@ func (acbw *acBalancerWrapper) Connect() { } func (acbw *acBalancerWrapper) Shutdown() { + acbw.closeProducers() acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) } @@ -308,9 +343,10 @@ func (acbw *acBalancerWrapper) Shutdown() { // ready, blocks until it is or ctx expires. Returns an error when the context // expires or the addrConn is shut down. func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - transport, err := acbw.ac.getTransport(ctx) - if err != nil { - return nil, err + transport := acbw.ac.getReadyTransport() + if transport == nil { + return nil, status.Errorf(codes.Unavailable, "SubConn state is not Ready") + } return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) } @@ -335,8 +371,8 @@ type refCountedProducer struct { } func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { - acbw.mu.Lock() - defer acbw.mu.Unlock() + acbw.producersMu.Lock() + defer acbw.producersMu.Unlock() // Look up existing producer from this builder. pData := acbw.producers[pb] @@ -353,13 +389,64 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) ( // and delete the refCountedProducer from the map if the total reference // count goes to zero. unref := func() { - acbw.mu.Lock() + acbw.producersMu.Lock() + // If closeProducers has already closed this producer instance, refs is + // set to 0, so the check after decrementing will never pass, and the + // producer will not be double-closed. pData.refs-- if pData.refs == 0 { defer pData.close() // Run outside the acbw mutex delete(acbw.producers, pb) } - acbw.mu.Unlock() + acbw.producersMu.Unlock() } return pData.producer, grpcsync.OnceFunc(unref) } + +func (acbw *acBalancerWrapper) closeProducers() { + acbw.producersMu.Lock() + defer acbw.producersMu.Unlock() + for pb, pData := range acbw.producers { + pData.refs = 0 + pData.close() + delete(acbw.producers, pb) + } +} + +// RegisterHealthListener accepts a health listener from the LB policy. It sends +// updates to the health listener as long as the SubConn's connectivity state +// doesn't change and a new health listener is not registered. To invalidate +// the currently registered health listener, acbw updates the healthData. If a +// nil listener is registered, the active health listener is dropped. +func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) { + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + // listeners should not be registered when the connectivity state + // isn't Ready. This may happen when the balancer registers a listener + // after the connectivityState is updated, but before it is notified + // of the update. + if acbw.healthData.connectivityState != connectivity.Ready { + return + } + // Replace the health data to stop sending updates to any previously + // registered health listeners. + hd := newHealthData(connectivity.Ready) + acbw.healthData = hd + if listener == nil { + return + } + + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Don't send updates if a new listener is registered. + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + curHD := acbw.healthData + if curHD != hd { + return + } + listener(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + }) +} diff --git a/src/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/src/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 55bffaa77..9e9d08069 100644 --- a/src/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/src/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto @@ -274,11 +274,9 @@ type GrpcLogEntry struct { func (x *GrpcLogEntry) Reset() { *x = GrpcLogEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GrpcLogEntry) String() string { @@ -289,7 +287,7 @@ func (*GrpcLogEntry) ProtoMessage() {} func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -440,11 +438,9 @@ type ClientHeader struct { func (x *ClientHeader) Reset() { *x = ClientHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClientHeader) String() string { @@ -455,7 +451,7 @@ func (*ClientHeader) ProtoMessage() {} func (x *ClientHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -509,11 +505,9 @@ type ServerHeader struct { func (x *ServerHeader) Reset() { *x = ServerHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerHeader) String() string { @@ -524,7 +518,7 @@ func (*ServerHeader) ProtoMessage() {} func (x *ServerHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -565,11 +559,9 @@ type Trailer struct { func (x *Trailer) Reset() { *x = Trailer{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Trailer) String() string { @@ -580,7 +572,7 @@ func (*Trailer) ProtoMessage() {} func (x *Trailer) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -638,11 +630,9 @@ type Message struct { func (x *Message) Reset() { *x = Message{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Message) String() string { @@ -653,7 +643,7 @@ func (*Message) ProtoMessage() {} func (x *Message) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -713,11 +703,9 @@ type Metadata struct { func (x *Metadata) Reset() { *x = Metadata{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Metadata) String() string { @@ -728,7 +716,7 @@ func (*Metadata) ProtoMessage() {} func (x *Metadata) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -762,11 +750,9 @@ type MetadataEntry struct { func (x *MetadataEntry) Reset() { *x = MetadataEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MetadataEntry) String() string { @@ -777,7 +763,7 @@ func (*MetadataEntry) ProtoMessage() {} func (x *MetadataEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -820,11 +806,9 @@ type Address struct { func (x *Address) Reset() { *x = Address{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Address) String() string { @@ -835,7 +819,7 @@ func (*Address) ProtoMessage() {} func (x *Address) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1057,104 +1041,6 @@ func file_grpc_binlog_v1_binarylog_proto_init() { if File_grpc_binlog_v1_binarylog_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GrpcLogEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*ClientHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ServerHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*Trailer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*Message); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*Metadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*MetadataEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*Address); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), diff --git a/src/vendor/google.golang.org/grpc/clientconn.go b/src/vendor/google.golang.org/grpc/clientconn.go index 9c8850e3f..4f57b5543 100644 --- a/src/vendor/google.golang.org/grpc/clientconn.go +++ b/src/vendor/google.golang.org/grpc/clientconn.go @@ -775,10 +775,7 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) } } - var balCfg serviceconfig.LoadBalancingConfig - if cc.sc != nil && cc.sc.lbConfig != nil { - balCfg = cc.sc.lbConfig - } + balCfg := cc.sc.lbConfig bw := cc.balancerWrapper cc.mu.Unlock() @@ -825,14 +822,13 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. } ac := &addrConn{ - state: connectivity.Idle, - cc: cc, - addrs: copyAddresses(addrs), - scopts: opts, - dopts: cc.dopts, - channelz: channelz.RegisterSubChannel(cc.channelz, ""), - resetBackoff: make(chan struct{}), - stateReadyChan: make(chan struct{}), + state: connectivity.Idle, + cc: cc, + addrs: copyAddresses(addrs), + scopts: opts, + dopts: cc.dopts, + channelz: channelz.RegisterSubChannel(cc.channelz, ""), + resetBackoff: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Start with our address set to the first address; this may be updated if @@ -1141,10 +1137,15 @@ func (cc *ClientConn) Close() error { <-cc.resolverWrapper.serializer.Done() <-cc.balancerWrapper.serializer.Done() - + var wg sync.WaitGroup for ac := range conns { - ac.tearDown(ErrClientConnClosing) + wg.Add(1) + go func(ac *addrConn) { + defer wg.Done() + ac.tearDown(ErrClientConnClosing) + }(ac) } + wg.Wait() cc.addTraceEvent("deleted") // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being @@ -1179,8 +1180,7 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - stateReadyChan chan struct{} // closed and recreated on every READY state change. + state connectivity.State backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1251,6 +1251,8 @@ func (ac *addrConn) resetTransportAndUnlock() { ac.mu.Unlock() if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + // TODO: #7534 - Move re-resolution requests into the pick_first LB policy + // to ensure one resolution request per pass instead of per subconn failure. ac.cc.resolveNow(resolver.ResolveNowOptions{}) ac.mu.Lock() if acCtx.Err() != nil { @@ -1292,7 +1294,7 @@ func (ac *addrConn) resetTransportAndUnlock() { ac.mu.Unlock() } -// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// tryAllAddrs tries to create a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { @@ -1369,7 +1371,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, defer cancel() copts.ChannelzParent = ac.channelz - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) + newTr, err := transport.NewHTTP2Client(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { if logger.V(2) { logger.Infof("Creating new client transport to %q: %v", addr, err) @@ -1443,7 +1445,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if !ac.scopts.HealthCheckEnabled { return } - healthCheckFunc := ac.cc.dopts.healthCheckFunc + healthCheckFunc := internal.HealthCheckFunc if healthCheckFunc == nil { // The health package is not imported to set health check function. // @@ -1475,7 +1477,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { } // Start the health checking stream. go func() { - err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + err := healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled") @@ -1504,29 +1506,6 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { return nil } -// getTransport waits until the addrconn is ready and returns the transport. -// If the context expires first, returns an appropriate status. If the -// addrConn is stopped first, returns an Unavailable status error. -func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { - for ctx.Err() == nil { - ac.mu.Lock() - t, state, sc := ac.transport, ac.state, ac.stateReadyChan - ac.mu.Unlock() - if state == connectivity.Ready { - return t, nil - } - if state == connectivity.Shutdown { - return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") - } - - select { - case <-ctx.Done(): - case <-sc: - } - } - return nil, status.FromContextError(ctx.Err()).Err() -} - // tearDown starts to tear down the addrConn. // // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct diff --git a/src/vendor/google.golang.org/grpc/codec.go b/src/vendor/google.golang.org/grpc/codec.go index e840858b7..959c2f99d 100644 --- a/src/vendor/google.golang.org/grpc/codec.go +++ b/src/vendor/google.golang.org/grpc/codec.go @@ -71,7 +71,7 @@ func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { if err != nil { return nil, err } - return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil + return mem.BufferSlice{mem.SliceBuffer(data)}, nil } func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { diff --git a/src/vendor/google.golang.org/grpc/credentials/tls.go b/src/vendor/google.golang.org/grpc/credentials/tls.go index 411435854..e163a473d 100644 --- a/src/vendor/google.golang.org/grpc/credentials/tls.go +++ b/src/vendor/google.golang.org/grpc/credentials/tls.go @@ -200,25 +200,40 @@ var tls12ForbiddenCipherSuites = map[uint16]struct{}{ // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { - tc := &tlsCreds{credinternal.CloneTLSConfig(c)} - tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + config := applyDefaults(c) + if config.GetConfigForClient != nil { + oldFn := config.GetConfigForClient + config.GetConfigForClient = func(hello *tls.ClientHelloInfo) (*tls.Config, error) { + cfgForClient, err := oldFn(hello) + if err != nil || cfgForClient == nil { + return cfgForClient, err + } + return applyDefaults(cfgForClient), nil + } + } + return &tlsCreds{config: config} +} + +func applyDefaults(c *tls.Config) *tls.Config { + config := credinternal.CloneTLSConfig(c) + config.NextProtos = credinternal.AppendH2ToNextProtos(config.NextProtos) // If the user did not configure a MinVersion and did not configure a // MaxVersion < 1.2, use MinVersion=1.2, which is required by // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 - if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { - tc.config.MinVersion = tls.VersionTLS12 + if config.MinVersion == 0 && (config.MaxVersion == 0 || config.MaxVersion >= tls.VersionTLS12) { + config.MinVersion = tls.VersionTLS12 } // If the user did not configure CipherSuites, use all "secure" cipher // suites reported by the TLS package, but remove some explicitly forbidden // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A - if tc.config.CipherSuites == nil { + if config.CipherSuites == nil { for _, cs := range tls.CipherSuites() { if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { - tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + config.CipherSuites = append(config.CipherSuites, cs.ID) } } } - return tc + return config } // NewClientTLSFromCert constructs TLS credentials from the provided root diff --git a/src/vendor/google.golang.org/grpc/dialoptions.go b/src/vendor/google.golang.org/grpc/dialoptions.go index 2b285beee..7494ae591 100644 --- a/src/vendor/google.golang.org/grpc/dialoptions.go +++ b/src/vendor/google.golang.org/grpc/dialoptions.go @@ -87,7 +87,6 @@ type dialOptions struct { disableServiceConfig bool disableRetry bool disableHealthCheck bool - healthCheckFunc internal.HealthChecker minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string @@ -436,7 +435,7 @@ func WithTimeout(d time.Duration) DialOption { // option to true from the Control field. For a concrete example of how to do // this, see internal.NetDialerWithTCPKeepalive(). // -// For more information, please see [issue 23459] in the Go github repo. +// For more information, please see [issue 23459] in the Go GitHub repo. // // [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { @@ -445,10 +444,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp }) } -func init() { - internal.WithHealthCheckFunc = withHealthCheckFunc -} - // WithDialer returns a DialOption that specifies a function to use for dialing // network addresses. If FailOnNonTempDialError() is set to true, and an error // is returned by f, gRPC checks the error's Temporary() method to decide if it @@ -662,16 +657,6 @@ func WithDisableHealthCheck() DialOption { }) } -// withHealthCheckFunc replaces the default health check function with the -// provided one. It makes tests easier to change the health check function. -// -// For testing purpose only. -func withHealthCheckFunc(f internal.HealthChecker) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.healthCheckFunc = f - }) -} - func defaultDialOptions() dialOptions { return dialOptions{ copts: transport.ConnectOptions{ @@ -682,7 +667,6 @@ func defaultDialOptions() dialOptions { BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, - healthCheckFunc: internal.HealthCheckFunc, idleTimeout: 30 * time.Minute, defaultScheme: "dns", maxCallAttempts: defaultMaxCallAttempts, diff --git a/src/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/src/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go index 1d827dd5d..ad75313a1 100644 --- a/src/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go +++ b/src/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -23,6 +23,7 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" + "google.golang.org/grpc/stats" ) func init() { @@ -34,7 +35,7 @@ var logger = grpclog.Component("metrics-registry") // DefaultMetrics are the default metrics registered through global metrics // registry. This is written to at initialization time only, and is read only // after initialization. -var DefaultMetrics = NewMetrics() +var DefaultMetrics = stats.NewMetricSet() // MetricDescriptor is the data for a registered metric. type MetricDescriptor struct { @@ -42,7 +43,7 @@ type MetricDescriptor struct { // (including any per call metrics). See // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions // for metric naming conventions. - Name Metric + Name string // The description of this metric. Description string // The unit (e.g. entries, seconds) of this metric. @@ -154,27 +155,27 @@ func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels . } // registeredMetrics are the registered metric descriptor names. -var registeredMetrics = make(map[Metric]bool) +var registeredMetrics = make(map[string]bool) // metricsRegistry contains all of the registered metrics. // // This is written to only at init time, and read only after that. -var metricsRegistry = make(map[Metric]*MetricDescriptor) +var metricsRegistry = make(map[string]*MetricDescriptor) // DescriptorForMetric returns the MetricDescriptor from the global registry. // // Returns nil if MetricDescriptor not present. -func DescriptorForMetric(metric Metric) *MetricDescriptor { - return metricsRegistry[metric] +func DescriptorForMetric(metricName string) *MetricDescriptor { + return metricsRegistry[metricName] } -func registerMetric(name Metric, def bool) { - if registeredMetrics[name] { - logger.Fatalf("metric %v already registered", name) +func registerMetric(metricName string, def bool) { + if registeredMetrics[metricName] { + logger.Fatalf("metric %v already registered", metricName) } - registeredMetrics[name] = true + registeredMetrics[metricName] = true if def { - DefaultMetrics = DefaultMetrics.Add(name) + DefaultMetrics = DefaultMetrics.Add(metricName) } } @@ -256,8 +257,8 @@ func snapshotMetricsRegistryForTesting() func() { oldRegisteredMetrics := registeredMetrics oldMetricsRegistry := metricsRegistry - registeredMetrics = make(map[Metric]bool) - metricsRegistry = make(map[Metric]*MetricDescriptor) + registeredMetrics = make(map[string]bool) + metricsRegistry = make(map[string]*MetricDescriptor) maps.Copy(registeredMetrics, registeredMetrics) maps.Copy(metricsRegistry, metricsRegistry) diff --git a/src/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/src/vendor/google.golang.org/grpc/experimental/stats/metrics.go index 3221f7a63..ee1423605 100644 --- a/src/vendor/google.golang.org/grpc/experimental/stats/metrics.go +++ b/src/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -19,7 +19,7 @@ // Package stats contains experimental metrics/stats API's. package stats -import "maps" +import "google.golang.org/grpc/stats" // MetricsRecorder records on metrics derived from metric registry. type MetricsRecorder interface { @@ -40,75 +40,15 @@ type MetricsRecorder interface { RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) } -// Metric is an identifier for a metric. -type Metric string +// Metrics is an experimental legacy alias of the now-stable stats.MetricSet. +// Metrics will be deleted in a future release. +type Metrics = stats.MetricSet -// Metrics is a set of metrics to record. Once created, Metrics is immutable, -// however Add and Remove can make copies with specific metrics added or -// removed, respectively. -// -// Do not construct directly; use NewMetrics instead. -type Metrics struct { - // metrics are the set of metrics to initialize. - metrics map[Metric]bool -} +// Metric was replaced by direct usage of strings. +type Metric = string -// NewMetrics returns a Metrics containing Metrics. +// NewMetrics is an experimental legacy alias of the now-stable +// stats.NewMetricSet. NewMetrics will be deleted in a future release. func NewMetrics(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for _, metric := range metrics { - newMetrics[metric] = true - } - return &Metrics{ - metrics: newMetrics, - } -} - -// Metrics returns the metrics set. The returned map is read-only and must not -// be modified. -func (m *Metrics) Metrics() map[Metric]bool { - return m.metrics -} - -// Add adds the metrics to the metrics set and returns a new copy with the -// additional metrics. -func (m *Metrics) Add(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for metric := range m.metrics { - newMetrics[metric] = true - } - - for _, metric := range metrics { - newMetrics[metric] = true - } - return &Metrics{ - metrics: newMetrics, - } -} - -// Join joins the metrics passed in with the metrics set, and returns a new copy -// with the merged metrics. -func (m *Metrics) Join(metrics *Metrics) *Metrics { - newMetrics := make(map[Metric]bool) - maps.Copy(newMetrics, m.metrics) - maps.Copy(newMetrics, metrics.metrics) - return &Metrics{ - metrics: newMetrics, - } -} - -// Remove removes the metrics from the metrics set and returns a new copy with -// the metrics removed. -func (m *Metrics) Remove(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for metric := range m.metrics { - newMetrics[metric] = true - } - - for _, metric := range metrics { - delete(newMetrics, metric) - } - return &Metrics{ - metrics: newMetrics, - } + return stats.NewMetricSet(metrics...) } diff --git a/src/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go b/src/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go index 07df71e98..ed90060c3 100644 --- a/src/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go +++ b/src/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -101,6 +101,22 @@ var severityName = []string{ fatalLog: "FATAL", } +// sprintf is fmt.Sprintf. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprintf = fmt.Sprintf + +// sprint is fmt.Sprint. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprint = fmt.Sprint + +// sprintln is fmt.Sprintln. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprintln = fmt.Sprintln + +// exit is os.Exit. +// This var exists to make it possible to test functions calling os.Exit. +var exit = os.Exit + // loggerT is the default logger used by grpclog. type loggerT struct { m []*log.Logger @@ -111,7 +127,7 @@ type loggerT struct { func (g *loggerT) output(severity int, s string) { sevStr := severityName[severity] if !g.jsonFormat { - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + g.m[severity].Output(2, sevStr+": "+s) return } // TODO: we can also include the logging component, but that needs more @@ -123,55 +139,79 @@ func (g *loggerT) output(severity int, s string) { g.m[severity].Output(2, string(b)) } +func (g *loggerT) printf(severity int, format string, args ...any) { + // Note the discard check is duplicated in each print func, rather than in + // output, to avoid the expensive Sprint calls. + // De-duplicating this by moving to output would be a significant performance regression! + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprintf(format, args...)) +} + +func (g *loggerT) print(severity int, v ...any) { + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprint(v...)) +} + +func (g *loggerT) println(severity int, v ...any) { + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprintln(v...)) +} + func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) + g.print(infoLog, args...) } func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) + g.println(infoLog, args...) } func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) + g.printf(infoLog, format, args...) } func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) + g.print(warningLog, args...) } func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) + g.println(warningLog, args...) } func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) + g.printf(warningLog, format, args...) } func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) + g.print(errorLog, args...) } func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) + g.println(errorLog, args...) } func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) + g.printf(errorLog, format, args...) } func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) + g.print(fatalLog, args...) + exit(1) } func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) + g.println(fatalLog, args...) + exit(1) } func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) + g.printf(fatalLog, format, args...) + exit(1) } func (g *loggerT) V(l int) bool { @@ -186,19 +226,42 @@ type LoggerV2Config struct { FormatJSON bool } +// combineLoggers returns a combined logger for both higher & lower severity logs, +// or only one if the other is io.Discard. +// +// This uses io.Discard instead of io.MultiWriter when all loggers +// are set to io.Discard. Both this package and the standard log package have +// significant optimizations for io.Discard, which io.MultiWriter lacks (as of +// this writing). +func combineLoggers(lower, higher io.Writer) io.Writer { + if lower == io.Discard { + return higher + } + if higher == io.Discard { + return lower + } + return io.MultiWriter(lower, higher) +} + // NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. // The infoW, warningW, and errorW writers are used to write log messages of // different severity levels. func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { - var m []*log.Logger flag := log.LstdFlags if c.FormatJSON { flag = 0 } - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) + + warningW = combineLoggers(infoW, warningW) + errorW = combineLoggers(errorW, warningW) + + fatalW := errorW + + m := []*log.Logger{ + log.New(infoW, "", flag), + log.New(warningW, "", flag), + log.New(errorW, "", flag), + log.New(fatalW, "", flag), + } return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} } diff --git a/src/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/src/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index d92335445..26e16d919 100644 --- a/src/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/src/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/health/v1/health.proto @@ -99,11 +99,9 @@ type HealthCheckRequest struct { func (x *HealthCheckRequest) Reset() { *x = HealthCheckRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_health_v1_health_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_health_v1_health_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HealthCheckRequest) String() string { @@ -114,7 +112,7 @@ func (*HealthCheckRequest) ProtoMessage() {} func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_health_v1_health_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -146,11 +144,9 @@ type HealthCheckResponse struct { func (x *HealthCheckResponse) Reset() { *x = HealthCheckResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_health_v1_health_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_health_v1_health_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HealthCheckResponse) String() string { @@ -161,7 +157,7 @@ func (*HealthCheckResponse) ProtoMessage() {} func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_health_v1_health_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -260,32 +256,6 @@ func file_grpc_health_v1_health_proto_init() { if File_grpc_health_v1_health_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*HealthCheckRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*HealthCheckResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/src/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/src/vendor/google.golang.org/grpc/internal/backoff/backoff.go index b15cf482d..b6ae7f258 100644 --- a/src/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/src/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -25,7 +25,7 @@ package backoff import ( "context" "errors" - "math/rand" + rand "math/rand/v2" "time" grpcbackoff "google.golang.org/grpc/backoff" diff --git a/src/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/src/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go index 13821a926..85540f86a 100644 --- a/src/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go +++ b/src/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go @@ -33,6 +33,8 @@ type lbConfig struct { childConfig serviceconfig.LoadBalancingConfig } +// ChildName returns the name of the child balancer of the gracefulswitch +// Balancer. func ChildName(l serviceconfig.LoadBalancingConfig) string { return l.(*lbConfig).childBuilder.Name() } diff --git a/src/vendor/google.golang.org/grpc/internal/channelz/channel.go b/src/vendor/google.golang.org/grpc/internal/channelz/channel.go index d7e9e1d54..3ec662799 100644 --- a/src/vendor/google.golang.org/grpc/internal/channelz/channel.go +++ b/src/vendor/google.golang.org/grpc/internal/channelz/channel.go @@ -43,6 +43,8 @@ type Channel struct { // Non-zero traceRefCount means the trace of this channel cannot be deleted. traceRefCount int32 + // ChannelMetrics holds connectivity state, target and call metrics for the + // channel within channelz. ChannelMetrics ChannelMetrics } @@ -50,6 +52,8 @@ type Channel struct { // nesting. func (c *Channel) channelzIdentifier() {} +// String returns a string representation of the Channel, including its parent +// entity and ID. func (c *Channel) String() string { if c.Parent == nil { return fmt.Sprintf("Channel #%d", c.ID) @@ -61,24 +65,31 @@ func (c *Channel) id() int64 { return c.ID } +// SubChans returns a copy of the map of sub-channels associated with the +// Channel. func (c *Channel) SubChans() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(c.subChans) } +// NestedChans returns a copy of the map of nested channels associated with the +// Channel. func (c *Channel) NestedChans() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(c.nestedChans) } +// Trace returns a copy of the Channel's trace data. func (c *Channel) Trace() *ChannelTrace { db.mu.RLock() defer db.mu.RUnlock() return c.trace.copy() } +// ChannelMetrics holds connectivity state, target and call metrics for the +// channel within channelz. type ChannelMetrics struct { // The current connectivity state of the channel. State atomic.Pointer[connectivity.State] @@ -136,12 +147,16 @@ func strFromPointer(s *string) string { return *s } +// String returns a string representation of the ChannelMetrics, including its +// state, target, and call metrics. func (c *ChannelMetrics) String() string { return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v", c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(), ) } +// NewChannelMetricForTesting creates a new instance of ChannelMetrics with +// specified initial values for testing purposes. func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics { c := &ChannelMetrics{} c.State.Store(&state) diff --git a/src/vendor/google.golang.org/grpc/internal/channelz/server.go b/src/vendor/google.golang.org/grpc/internal/channelz/server.go index cdfc49d6e..b5a824992 100644 --- a/src/vendor/google.golang.org/grpc/internal/channelz/server.go +++ b/src/vendor/google.golang.org/grpc/internal/channelz/server.go @@ -59,6 +59,8 @@ func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *Se return sm } +// CopyFrom copies the metrics data from the provided ServerMetrics +// instance into the current instance. func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) { sm.CallsStarted.Store(o.CallsStarted.Load()) sm.CallsSucceeded.Store(o.CallsSucceeded.Load()) diff --git a/src/vendor/google.golang.org/grpc/internal/channelz/socket.go b/src/vendor/google.golang.org/grpc/internal/channelz/socket.go index fa64834b2..90103847c 100644 --- a/src/vendor/google.golang.org/grpc/internal/channelz/socket.go +++ b/src/vendor/google.golang.org/grpc/internal/channelz/socket.go @@ -70,13 +70,18 @@ type EphemeralSocketMetrics struct { RemoteFlowControlWindow int64 } +// SocketType represents the type of socket. type SocketType string +// SocketType can be one of these. const ( SocketTypeNormal = "NormalSocket" SocketTypeListen = "ListenSocket" ) +// Socket represents a socket within channelz which includes socket +// metrics and data related to socket activity and provides methods +// for managing and interacting with sockets. type Socket struct { Entity SocketType SocketType @@ -100,6 +105,8 @@ type Socket struct { Security credentials.ChannelzSecurityValue } +// String returns a string representation of the Socket, including its parent +// entity, socket type, and ID. func (ls *Socket) String() string { return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID) } diff --git a/src/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/src/vendor/google.golang.org/grpc/internal/channelz/subchannel.go index 3b88e4cba..b20802e6e 100644 --- a/src/vendor/google.golang.org/grpc/internal/channelz/subchannel.go +++ b/src/vendor/google.golang.org/grpc/internal/channelz/subchannel.go @@ -47,12 +47,14 @@ func (sc *SubChannel) id() int64 { return sc.ID } +// Sockets returns a copy of the sockets map associated with the SubChannel. func (sc *SubChannel) Sockets() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(sc.sockets) } +// Trace returns a copy of the ChannelTrace associated with the SubChannel. func (sc *SubChannel) Trace() *ChannelTrace { db.mu.RLock() defer db.mu.RUnlock() diff --git a/src/vendor/google.golang.org/grpc/internal/channelz/trace.go b/src/vendor/google.golang.org/grpc/internal/channelz/trace.go index 36b867403..2bffe4777 100644 --- a/src/vendor/google.golang.org/grpc/internal/channelz/trace.go +++ b/src/vendor/google.golang.org/grpc/internal/channelz/trace.go @@ -79,13 +79,21 @@ type TraceEvent struct { Parent *TraceEvent } +// ChannelTrace provides tracing information for a channel. +// It tracks various events and metadata related to the channel's lifecycle +// and operations. type ChannelTrace struct { - cm *channelMap - clearCalled bool + cm *channelMap + clearCalled bool + // The time when the trace was created. CreationTime time.Time - EventNum int64 - mu sync.Mutex - Events []*traceEvent + // A counter for the number of events recorded in the + // trace. + EventNum int64 + mu sync.Mutex + // A slice of traceEvent pointers representing the events recorded for + // this channel. + Events []*traceEvent } func (c *ChannelTrace) copy() *ChannelTrace { @@ -175,6 +183,7 @@ var refChannelTypeToString = map[RefChannelType]string{ RefNormalSocket: "NormalSocket", } +// String returns a string representation of the RefChannelType func (r RefChannelType) String() string { return refChannelTypeToString[r] } diff --git a/src/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/src/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 452985f8d..6e7dd6b77 100644 --- a/src/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/src/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -50,6 +50,11 @@ var ( // xDS fallback is turned on. If this is unset or is false, only the first // xDS server in the list of server configs will be used. XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) + // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used + // instead of the exiting pickfirst implementation. This can be enabled by + // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" + // to "true". + NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/src/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/src/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 19b9d6392..8e8e86128 100644 --- a/src/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/src/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -53,7 +53,7 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { return cs } -// TrySchedule tries to schedules the provided callback function f to be +// TrySchedule tries to schedule the provided callback function f to be // executed in the order it was added. This is a best-effort operation. If the // context passed to NewCallbackSerializer was canceled before this method is // called, the callback will not be scheduled. diff --git a/src/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/src/vendor/google.golang.org/grpc/internal/grpcutil/method.go index ec62b4775..683d1955c 100644 --- a/src/vendor/google.golang.org/grpc/internal/grpcutil/method.go +++ b/src/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -39,7 +39,7 @@ func ParseMethod(methodName string) (service, method string, _ error) { } // baseContentType is the base content-type for gRPC. This is a valid -// content-type on it's own, but can also include a content-subtype such as +// content-type on its own, but can also include a content-subtype such as // "proto" as a suffix after "+" or ";". See // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests // for more details. diff --git a/src/vendor/google.golang.org/grpc/internal/idle/idle.go b/src/vendor/google.golang.org/grpc/internal/idle/idle.go index fe49cb74c..2c13ee9da 100644 --- a/src/vendor/google.golang.org/grpc/internal/idle/idle.go +++ b/src/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -182,6 +182,7 @@ func (m *Manager) tryEnterIdleMode() bool { return true } +// EnterIdleModeForTesting instructs the channel to enter idle mode. func (m *Manager) EnterIdleModeForTesting() { m.tryEnterIdleMode() } @@ -225,7 +226,7 @@ func (m *Manager) ExitIdleMode() error { // came in and OnCallBegin() noticed that the calls count is negative. // - Channel is in idle mode, and multiple new RPCs come in at the same // time, all of them notice a negative calls count in OnCallBegin and get - // here. The first one to get the lock would got the channel to exit idle. + // here. The first one to get the lock would get the channel to exit idle. // - Channel is not in idle mode, and the user calls Connect which calls // m.ExitIdleMode. // @@ -266,6 +267,7 @@ func (m *Manager) isClosed() bool { return atomic.LoadInt32(&m.closed) == 1 } +// Close stops the timer associated with the Manager, if it exists. func (m *Manager) Close() { atomic.StoreInt32(&m.closed, 1) diff --git a/src/vendor/google.golang.org/grpc/internal/internal.go b/src/vendor/google.golang.org/grpc/internal/internal.go index 7aae9240f..3afc18134 100644 --- a/src/vendor/google.golang.org/grpc/internal/internal.go +++ b/src/vendor/google.golang.org/grpc/internal/internal.go @@ -29,8 +29,6 @@ import ( ) var ( - // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // BalancerUnregister is exported by package balancer to unregister a balancer. @@ -149,6 +147,20 @@ var ( // other features, including the CSDS service. NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) + // NewXDSResolverWithClientForTesting creates a new xDS resolver builder + // using the provided xDS client instead of creating a new one using the + // bootstrap configuration specified by the supported environment variables. + // The resolver.Builder is meant to be used in conjunction with the + // grpc.WithResolvers DialOption. The resolver.Builder does not take + // ownership of the provided xDS client and it is the responsibility of the + // caller to close the client when no longer required. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error) + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment // variable. @@ -191,6 +203,8 @@ var ( // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. ExitIdleModeForTesting any // func(*grpc.ClientConn) error + // ChannelzTurnOffForTesting disables the Channelz service for testing + // purposes. ChannelzTurnOffForTesting func() // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to @@ -205,10 +219,6 @@ var ( // default resolver scheme. UserSetDefaultScheme = false - // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n - // is the number of elements. swap swaps the elements with indexes i and j. - ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) - // ConnectedAddress returns the connected address for a SubConnState. The // address is only valid if the state is READY. ConnectedAddress any // func (scs SubConnState) resolver.Address @@ -235,7 +245,7 @@ var ( // // The implementation is expected to create a health checking RPC stream by // calling newStream(), watch for the health status of serviceName, and report -// it's health back by calling setConnectivityState(). +// its health back by calling setConnectivityState(). // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md @@ -257,3 +267,9 @@ const ( // It currently has an experimental suffix which would be removed once // end-to-end testing of the policy is completed. const RLSLoadBalancingPolicyName = "rls_experimental" + +// EnforceSubConnEmbedding is used to enforce proper SubConn implementation +// embedding. +type EnforceSubConnEmbedding interface { + enforceSubConnEmbedding() +} diff --git a/src/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/src/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 4552db16b..ba5c5a95d 100644 --- a/src/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/src/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -24,8 +24,9 @@ import ( "context" "encoding/json" "fmt" - "math/rand" + rand "math/rand/v2" "net" + "net/netip" "os" "strconv" "strings" @@ -122,7 +123,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts } // IP address. - if ipAddr, ok := formatIP(host); ok { + if ipAddr, err := formatIP(host); err == nil { addr := []resolver.Address{{Addr: ipAddr + ":" + port}} cc.UpdateState(resolver.State{Addresses: addr}) return deadResolver{}, nil @@ -177,7 +178,7 @@ type dnsResolver struct { // finished. Otherwise, data race will be possible. [Race Example] in // dns_resolver_test we replace the real lookup functions with mocked ones to // facilitate testing. If Close() doesn't wait for watcher() goroutine - // finishes, race detector sometimes will warns lookup (READ the lookup + // finishes, race detector sometimes will warn lookup (READ the lookup // function pointers) inside watcher() goroutine has data race with // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup @@ -237,7 +238,9 @@ func (d *dnsResolver) watcher() { } func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) { - if !EnableSRVLookups { + // Skip this particular host to avoid timeouts with some versions of + // systemd-resolved. + if !EnableSRVLookups || d.host == "metadata.google.internal." { return nil, nil } var newAddrs []resolver.Address @@ -258,9 +261,9 @@ func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) return nil, err } for _, a := range lbAddrs { - ip, ok := formatIP(a) - if !ok { - return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + ip, err := formatIP(a) + if err != nil { + return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) } addr := ip + ":" + strconv.Itoa(int(s.Port)) newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) @@ -320,9 +323,9 @@ func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error } newAddrs := make([]resolver.Address, 0, len(addrs)) for _, a := range addrs { - ip, ok := formatIP(a) - if !ok { - return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + ip, err := formatIP(a) + if err != nil { + return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) } addr := ip + ":" + d.port newAddrs = append(newAddrs, resolver.Address{Addr: addr}) @@ -349,19 +352,19 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } -// formatIP returns ok = false if addr is not a valid textual representation of -// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// formatIP returns an error if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and error = nil. // If addr is an IPv6 address, return the addr enclosed in square brackets and -// ok = true. -func formatIP(addr string) (addrIP string, ok bool) { - ip := net.ParseIP(addr) - if ip == nil { - return "", false +// error = nil. +func formatIP(addr string) (string, error) { + ip, err := netip.ParseAddr(addr) + if err != nil { + return "", err } - if ip.To4() != nil { - return addr, true + if ip.Is4() { + return addr, nil } - return "[" + addr + "]", true + return "[" + addr + "]", nil } // parseTarget takes the user input target string and default port, returns @@ -377,7 +380,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { return "", "", internal.ErrMissingAddr } - if ip := net.ParseIP(target); ip != nil { + if _, err := netip.ParseAddr(target); err == nil { // target is an IPv4 or IPv6(without brackets) address return target, defaultPort, nil } @@ -425,7 +428,7 @@ func chosenByPercentage(a *int) bool { if a == nil { return true } - return rand.Intn(100)+1 <= *a + return rand.IntN(100)+1 <= *a } func canaryingSC(js string) string { diff --git a/src/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/src/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go index be110d41f..79044657b 100644 --- a/src/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go +++ b/src/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -54,6 +54,8 @@ func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) { } } +// RecordInt64Count records the measurement alongside labels on the int +// count associated with the provided handle. func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -62,6 +64,8 @@ func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, } } +// RecordFloat64Count records the measurement alongside labels on the float +// count associated with the provided handle. func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -70,6 +74,8 @@ func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHand } } +// RecordInt64Histo records the measurement alongside labels on the int +// histo associated with the provided handle. func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -78,6 +84,8 @@ func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, } } +// RecordFloat64Histo records the measurement alongside labels on the float +// histo associated with the provided handle. func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -86,6 +94,8 @@ func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHand } } +// RecordInt64Gauge records the measurement alongside labels on the int +// gauge associated with the provided handle. func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) diff --git a/src/vendor/google.golang.org/grpc/internal/status/status.go b/src/vendor/google.golang.org/grpc/internal/status/status.go index 757925381..1186f1e9a 100644 --- a/src/vendor/google.golang.org/grpc/internal/status/status.go +++ b/src/vendor/google.golang.org/grpc/internal/status/status.go @@ -149,6 +149,8 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. +// If the detail can be decoded, the proto message returned is of the same +// type that was given to WithDetails(). func (s *Status) Details() []any { if s == nil || s.s == nil { return nil @@ -160,7 +162,38 @@ func (s *Status) Details() []any { details = append(details, err) continue } - details = append(details, detail) + // The call to MessageV1Of is required to unwrap the proto message if + // it implemented only the MessageV1 API. The proto message would have + // been wrapped in a V2 wrapper in Status.WithDetails. V2 messages are + // added to a global registry used by any.UnmarshalNew(). + // MessageV1Of has the following behaviour: + // 1. If the given message is a wrapped MessageV1, it returns the + // unwrapped value. + // 2. If the given message already implements MessageV1, it returns it + // as is. + // 3. Else, it wraps the MessageV2 in a MessageV1 wrapper. + // + // Since the Status.WithDetails() API only accepts MessageV1, calling + // MessageV1Of ensures we return the same type that was given to + // WithDetails: + // * If the give type implemented only MessageV1, the unwrapping from + // point 1 above will restore the type. + // * If the given type implemented both MessageV1 and MessageV2, point 2 + // above will ensure no wrapping is performed. + // * If the given type implemented only MessageV2 and was wrapped using + // MessageV1Of before passing to WithDetails(), it would be unwrapped + // in WithDetails by calling MessageV2Of(). Point 3 above will ensure + // that the type is wrapped in a MessageV1 wrapper again before + // returning. Note that protoc-gen-go doesn't generate code which + // implements ONLY MessageV2 at the time of writing. + // + // NOTE: Status details can also be added using the FromProto method. + // This could theoretically allow passing a Detail message that only + // implements the V2 API. In such a case the message will be wrapped in + // a MessageV1 wrapper when fetched using Details(). + // Since protoc-gen-go generates only code that implements both V1 and + // V2 APIs for backward compatibility, this is not a concern. + details = append(details, protoadapt.MessageV1Of(detail)) } return details } diff --git a/src/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/src/vendor/google.golang.org/grpc/internal/transport/client_stream.go new file mode 100644 index 000000000..8ed347c54 --- /dev/null +++ b/src/vendor/google.golang.org/grpc/internal/transport/client_stream.go @@ -0,0 +1,144 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync/atomic" + + "golang.org/x/net/http2" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// ClientStream implements streaming functionality for a gRPC client. +type ClientStream struct { + *Stream // Embed for common stream functionality. + + ct *http2Client + done chan struct{} // closed at the end of stream to unblock writers. + doneFunc func() // invoked at the end of stream. + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). + headerValid bool + header metadata.MD // the received header metadata + noHeaders bool // set if the client never received headers (set only after the stream is done). + + bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream + unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream + + status *status.Status // the status error received from the server +} + +// Read reads an n byte message from the input stream. +func (s *ClientStream) Read(n int) (mem.BufferSlice, error) { + b, err := s.Stream.read(n) + if err == nil { + s.ct.incrMsgRecv() + } + return b, err +} + +// Close closes the stream and popagates err to any readers. +func (s *ClientStream) Close(err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + s.ct.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +// Write writes the hdr and data bytes to the output stream. +func (s *ClientStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { + return s.ct.write(s, hdr, data, opts) +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *ClientStream) BytesReceived() bool { + return s.bytesReceived.Load() +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *ClientStream) Unprocessed() bool { + return s.unprocessed.Load() +} + +func (s *ClientStream) waitOnHeader() { + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.Close(ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *ClientStream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *ClientStream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. Acquires the key-value +// pairs of header metadata once it is available. It blocks until i) the +// metadata is ready or ii) there is no header metadata or iii) the stream is +// canceled/expired. +func (s *ClientStream) Header() (metadata.MD, error) { + s.waitOnHeader() + + if !s.headerValid || s.noHeaders { + return nil, s.status.Err() + } + + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. +func (s *ClientStream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *ClientStream) Status() *status.Status { + return s.status +} diff --git a/src/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/src/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index 97198c515..dfc0f224e 100644 --- a/src/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/src/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -92,14 +92,11 @@ func (f *trInFlow) newLimit(n uint32) uint32 { func (f *trInFlow) onData(n uint32) uint32 { f.unacked += n - if f.unacked >= f.limit/4 { - w := f.unacked - f.unacked = 0 + if f.unacked < f.limit/4 { f.updateEffectiveWindowSize() - return w + return 0 } - f.updateEffectiveWindowSize() - return 0 + return f.reset() } func (f *trInFlow) reset() uint32 { diff --git a/src/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/src/vendor/google.golang.org/grpc/internal/transport/handler_server.go index ce878693b..d9305a65d 100644 --- a/src/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/src/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -225,7 +225,7 @@ func (ht *serverHandlerTransport) do(fn func()) error { } } -func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { +func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status) error { ht.writeStatusMu.Lock() defer ht.writeStatusMu.Unlock() @@ -289,14 +289,14 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro // writePendingHeaders sets common and custom headers on the first // write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { +func (ht *serverHandlerTransport) writePendingHeaders(s *ServerStream) { ht.writeCommonHeaders(s) ht.writeCustomHeaders(s) } // writeCommonHeaders sets common headers on the first write // call (Write, WriteHeader, or WriteStatus). -func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { +func (ht *serverHandlerTransport) writeCommonHeaders(s *ServerStream) { h := ht.rw.Header() h["Date"] = nil // suppress Date to make tests happy; TODO: restore h.Set("Content-Type", ht.contentType) @@ -317,7 +317,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { // writeCustomHeaders sets custom headers set on the stream via SetHeader // on the first write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { +func (ht *serverHandlerTransport) writeCustomHeaders(s *ServerStream) { h := ht.rw.Header() s.hdrMu.Lock() @@ -333,7 +333,7 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { +func (ht *serverHandlerTransport) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { // Always take a reference because otherwise there is no guarantee the data will // be available after this function returns. This is what callers to Write // expect. @@ -357,7 +357,7 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSl return nil } -func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { +func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) error { if err := s.SetHeader(md); err != nil { return err } @@ -385,7 +385,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) { // With this transport type there will be exactly 1 stream: this HTTP request. var cancel context.CancelFunc if ht.timeoutSet { @@ -408,16 +408,18 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req - s := &Stream{ - id: 0, // irrelevant - ctx: ctx, - requestRead: func(int) {}, + s := &ServerStream{ + Stream: &Stream{ + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + buf: newRecvBuffer(), + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + }, cancel: cancel, - buf: newRecvBuffer(), st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ @@ -471,9 +473,7 @@ func (ht *serverHandlerTransport) runStream() { } } -func (ht *serverHandlerTransport) IncrMsgSent() {} - -func (ht *serverHandlerTransport) IncrMsgRecv() {} +func (ht *serverHandlerTransport) incrMsgRecv() {} func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") diff --git a/src/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/src/vendor/google.golang.org/grpc/internal/transport/http2_client.go index c769deab5..f323ab7f4 100644 --- a/src/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/src/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -86,9 +86,9 @@ type http2Client struct { writerDone chan struct{} // sync point to enable testing. // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // that the server sent GoAway on this transport. - goAway chan struct{} - - framer *framer + goAway chan struct{} + keepaliveDone chan struct{} // Closed when the keepalive goroutine exits. + framer *framer // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. // Do not access controlBuf with mu held. @@ -123,7 +123,7 @@ type http2Client struct { mu sync.Mutex // guard the following variables nextID uint32 state transportState - activeStreams map[uint32]*Stream + activeStreams map[uint32]*ClientStream // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. prevGoAwayID uint32 // goAwayReason records the http2.ErrCode and debug data received with the @@ -199,10 +199,10 @@ func isTemporary(err error) bool { return true } -// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// NewHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { +func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ ClientTransport, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -335,10 +335,11 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), + keepaliveDone: make(chan struct{}), framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, - activeStreams: make(map[uint32]*Stream), + activeStreams: make(map[uint32]*ClientStream), isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, @@ -479,17 +480,19 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return t, nil } -func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. - s := &Stream{ - ct: t, - done: make(chan struct{}), - method: callHdr.Method, - sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), - headerChan: make(chan struct{}), - contentSubtype: callHdr.ContentSubtype, - doneFunc: callHdr.DoneFunc, + s := &ClientStream{ + Stream: &Stream{ + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + contentSubtype: callHdr.ContentSubtype, + }, + ct: t, + done: make(chan struct{}), + headerChan: make(chan struct{}), + doneFunc: callHdr.DoneFunc, } s.wq = newWriteQuota(defaultWriteQuota, s.done) s.requestRead = func(n int) { @@ -505,7 +508,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { ctxDone: s.ctx.Done(), recv: s.buf, closeStream: func(err error) { - t.CloseStream(s, err) + s.Close(err) }, }, windowHandler: func(n int) { @@ -527,8 +530,9 @@ func (t *http2Client) getPeer() *peer.Peer { // to be the last frame loopy writes to the transport. func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) { t.mu.Lock() - defer t.mu.Unlock() - if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil { + maxStreamID := t.nextID - 2 + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(maxStreamID, http2.ErrCodeNo, g.debugData); err != nil { return false, err } return false, g.closeConn @@ -595,12 +599,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) for k, v := range callAuthData { headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } - if b := stats.OutgoingTags(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) - } - if b := stats.OutgoingTrace(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) - } if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { var k string @@ -736,7 +734,7 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) { ctx = peer.NewContext(ctx, t.getPeer()) // ServerName field of the resolver returned address takes precedence over @@ -761,7 +759,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return } // The stream was unprocessed by the server. - atomic.StoreUint32(&s.unprocessed, 1) + s.unprocessed.Store(true) s.write(recvMsg{err: err}) close(s.done) // If headerChan isn't closed, then close it. @@ -906,21 +904,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return s, nil } -// CloseStream clears the footprint of a stream when the stream is not needed any more. -// This must not be executed in reader's goroutine. -func (t *http2Client) CloseStream(s *Stream, err error) { - var ( - rst bool - rstCode http2.ErrCode - ) - if err != nil { - rst = true - rstCode = http2.ErrCodeCancel - } - t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) -} - -func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { +func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { // Set stream status to done. if s.swapState(streamDone) == streamDone { // If it was already done, return. If multiple closeStream calls @@ -1008,6 +992,9 @@ func (t *http2Client) Close(err error) { // should unblock it so that the goroutine eventually exits. t.kpDormancyCond.Signal() } + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + goAwayDebugMessage := t.goAwayDebugMessage t.mu.Unlock() // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the @@ -1025,11 +1012,13 @@ func (t *http2Client) Close(err error) { } t.cancel() t.conn.Close() + // Waits for the reader and keepalive goroutines to exit before returning to + // ensure all resources are cleaned up before Close can return. + <-t.readerDone + if t.keepaliveEnabled { + <-t.keepaliveDone + } channelz.RemoveEntry(t.channelz.ID) - // Append info about previous goaways if there were any, since this may be important - // for understanding the root cause for this connection to be closed. - _, goAwayDebugMessage := t.GetGoAwayReason() - var st *status.Status if len(goAwayDebugMessage) > 0 { st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) @@ -1078,7 +1067,7 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { +func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { reader := data.Reader() if opts.Last { @@ -1107,10 +1096,11 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *O _ = reader.Close() return err } + t.incrMsgSent() return nil } -func (t *http2Client) getStream(f http2.Frame) *Stream { +func (t *http2Client) getStream(f http2.Frame) *ClientStream { t.mu.Lock() s := t.activeStreams[f.Header().StreamID] t.mu.Unlock() @@ -1120,7 +1110,7 @@ func (t *http2Client) getStream(f http2.Frame) *Stream { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -func (t *http2Client) adjustWindow(s *Stream, n uint32) { +func (t *http2Client) adjustWindow(s *ClientStream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1129,7 +1119,7 @@ func (t *http2Client) adjustWindow(s *Stream, n uint32) { // updateWindow adjusts the inbound quota for the stream. // Window updates will be sent out when the cumulative quota // exceeds the corresponding threshold. -func (t *http2Client) updateWindow(s *Stream, n uint32) { +func (t *http2Client) updateWindow(s *ClientStream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1235,7 +1225,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } if f.ErrCode == http2.ErrCodeRefusedStream { // The stream was unprocessed by the server. - atomic.StoreUint32(&s.unprocessed, 1) + s.unprocessed.Store(true) } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { @@ -1316,11 +1306,11 @@ func (t *http2Client) handlePing(f *http2.PingFrame) { t.controlBuf.put(pingAck) } -func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { t.mu.Lock() if t.state == closing { t.mu.Unlock() - return + return nil } if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug @@ -1332,8 +1322,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)) - return + return connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id) } // A client can receive multiple GoAways from the server (see // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first @@ -1350,8 +1339,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // If there are multiple GoAways the first one should always have an ID greater than the following ones. if id > t.prevGoAwayID { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) - return + return connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID) } default: t.setGoAwayReason(f) @@ -1375,15 +1363,14 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.prevGoAwayID = id if len(t.activeStreams) == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) - return + return connectionErrorf(true, nil, "received goaway and there are no active streams") } - streamsToClose := make([]*Stream, 0) + streamsToClose := make([]*ClientStream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - atomic.StoreUint32(&stream.unprocessed, 1) + stream.unprocessed.Store(true) streamsToClose = append(streamsToClose, stream) } } @@ -1393,6 +1380,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { for _, stream := range streamsToClose { t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } + return nil } // setGoAwayReason sets the value of t.goAwayReason based @@ -1434,7 +1422,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } endStream := frame.StreamEnded() - atomic.StoreUint32(&s.bytesReceived, 1) + s.bytesReceived.Store(true) initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 if !initialHeader && !endStream { @@ -1628,7 +1616,13 @@ func (t *http2Client) readServerPreface() error { // network connection. If the server preface is not read successfully, an // error is pushed to errCh; otherwise errCh is closed with no error. func (t *http2Client) reader(errCh chan<- error) { - defer close(t.readerDone) + var errClose error + defer func() { + close(t.readerDone) + if errClose != nil { + t.Close(errClose) + } + }() if err := t.readServerPreface(); err != nil { errCh <- err @@ -1669,7 +1663,7 @@ func (t *http2Client) reader(errCh chan<- error) { continue } // Transport error. - t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) + errClose = connectionErrorf(true, err, "error reading from server: %v", err) return } switch frame := frame.(type) { @@ -1684,7 +1678,7 @@ func (t *http2Client) reader(errCh chan<- error) { case *http2.PingFrame: t.handlePing(frame) case *http2.GoAwayFrame: - t.handleGoAway(frame) + errClose = t.handleGoAway(frame) case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: @@ -1697,6 +1691,13 @@ func (t *http2Client) reader(errCh chan<- error) { // keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { + var err error + defer func() { + close(t.keepaliveDone) + if err != nil { + t.Close(err) + } + }() p := &ping{data: [8]byte{}} // True iff a ping has been sent, and no data has been received since then. outstandingPing := false @@ -1720,7 +1721,7 @@ func (t *http2Client) keepalive() { continue } if outstandingPing && timeoutLeft <= 0 { - t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) + err = connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout") return } t.mu.Lock() @@ -1791,14 +1792,18 @@ func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics { func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } -func (t *http2Client) IncrMsgSent() { - t.channelz.SocketMetrics.MessagesSent.Add(1) - t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) +func (t *http2Client) incrMsgSent() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) + } } -func (t *http2Client) IncrMsgRecv() { - t.channelz.SocketMetrics.MessagesReceived.Add(1) - t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) +func (t *http2Client) incrMsgRecv() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) + } } func (t *http2Client) getOutFlowWindow() int64 { diff --git a/src/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/src/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 584b50fe5..0055fddd7 100644 --- a/src/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/src/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -25,7 +25,7 @@ import ( "fmt" "io" "math" - "math/rand" + rand "math/rand/v2" "net" "net/http" "strconv" @@ -111,7 +111,7 @@ type http2Server struct { // already initialized since draining is already underway. drainEvent *grpcsync.Event state transportState - activeStreams map[uint32]*Stream + activeStreams map[uint32]*ServerStream // idle is the time instant when the connection went idle. // This is either the beginning of the connection or when the number of // RPCs go down to 0. @@ -256,7 +256,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, - activeStreams: make(map[uint32]*Stream), + activeStreams: make(map[uint32]*ServerStream), stats: config.StatsHandlers, kp: kp, idle: time.Now(), @@ -359,7 +359,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*ServerStream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -385,11 +385,13 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.maxStreamID = streamID buf := newRecvBuffer() - s := &Stream{ - id: streamID, + s := &ServerStream{ + Stream: &Stream{ + id: streamID, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + }, st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, headerWireLength: int(frame.Header().Length), } var ( @@ -537,12 +539,6 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // Attach the received metadata to the context. if len(mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, mdata) - if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { - s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) - } - if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { - s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) - } } t.mu.Lock() if t.state != reachable { @@ -634,7 +630,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStream)) { defer func() { close(t.readerDone) <-t.loopyWriterDone @@ -698,7 +694,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { } } -func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { +func (t *http2Server) getStream(f http2.Frame) (*ServerStream, bool) { t.mu.Lock() defer t.mu.Unlock() if t.activeStreams == nil { @@ -716,7 +712,7 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -func (t *http2Server) adjustWindow(s *Stream, n uint32) { +func (t *http2Server) adjustWindow(s *ServerStream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -726,7 +722,7 @@ func (t *http2Server) adjustWindow(s *Stream, n uint32) { // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. -func (t *http2Server) updateWindow(s *Stream, n uint32) { +func (t *http2Server) updateWindow(s *ServerStream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w, @@ -963,7 +959,7 @@ func (t *http2Server) checkForHeaderListSize(it any) bool { return true } -func (t *http2Server) streamContextErr(s *Stream) error { +func (t *http2Server) streamContextErr(s *ServerStream) error { select { case <-t.done: return ErrConnClosing @@ -973,7 +969,7 @@ func (t *http2Server) streamContextErr(s *Stream) error { } // WriteHeader sends the header metadata md back to the client. -func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { +func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error { s.hdrMu.Lock() defer s.hdrMu.Unlock() if s.getState() == streamDone { @@ -1006,7 +1002,7 @@ func (t *http2Server) setResetPingStrikes() { atomic.StoreUint32(&t.resetPingStrikes, 1) } -func (t *http2Server) writeHeaderLocked(s *Stream) error { +func (t *http2Server) writeHeaderLocked(s *ServerStream) error { // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. @@ -1046,7 +1042,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // There is no further I/O operations being able to perform on this stream. // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. -func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { +func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { s.hdrMu.Lock() defer s.hdrMu.Unlock() @@ -1117,11 +1113,11 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { +func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { reader := data.Reader() if !s.isHeaderSent() { // Headers haven't been written yet. - if err := t.WriteHeader(s, nil); err != nil { + if err := t.writeHeader(s, nil); err != nil { _ = reader.Close() return err } @@ -1147,6 +1143,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Opti _ = reader.Close() return err } + t.incrMsgSent() return nil } @@ -1276,7 +1273,7 @@ func (t *http2Server) Close(err error) { } // deleteStream deletes the stream s from transport's active streams. -func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { +func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1297,7 +1294,7 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { } // finishStream closes the stream and puts the trailing headerFrame into controlbuf. -func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { +func (t *http2Server) finishStream(s *ServerStream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1321,7 +1318,7 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h } // closeStream clears the footprint of a stream when the stream is not needed any more. -func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { +func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCode, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1415,14 +1412,18 @@ func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics { } } -func (t *http2Server) IncrMsgSent() { - t.channelz.SocketMetrics.MessagesSent.Add(1) - t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) +func (t *http2Server) incrMsgSent() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) + } } -func (t *http2Server) IncrMsgRecv() { - t.channelz.SocketMetrics.MessagesReceived.Add(1) - t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) +func (t *http2Server) incrMsgRecv() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) + } } func (t *http2Server) getOutFlowWindow() int64 { @@ -1455,7 +1456,7 @@ func getJitter(v time.Duration) time.Duration { } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) - j := rand.Int63n(2*r) - r + j := rand.Int64N(2*r) - r return time.Duration(j) } diff --git a/src/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/src/vendor/google.golang.org/grpc/internal/transport/server_stream.go new file mode 100644 index 000000000..a22a90151 --- /dev/null +++ b/src/vendor/google.golang.org/grpc/internal/transport/server_stream.go @@ -0,0 +1,178 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "errors" + "strings" + "sync" + "sync/atomic" + + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// ServerStream implements streaming functionality for a gRPC server. +type ServerStream struct { + *Stream // Embed for common stream functionality. + + st internalServerTransport + ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) + cancel context.CancelFunc // invoked at the end of stream to cancel ctx. + + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. + clientAdvertisedCompressors string + headerWireLength int + + // hdrMu protects outgoing header and trailer metadata. + hdrMu sync.Mutex + header metadata.MD // the outgoing header metadata. Updated by WriteHeader. + headerSent atomic.Bool // atomically set when the headers are sent out. +} + +// Read reads an n byte message from the input stream. +func (s *ServerStream) Read(n int) (mem.BufferSlice, error) { + b, err := s.Stream.read(n) + if err == nil { + s.st.incrMsgRecv() + } + return b, err +} + +// SendHeader sends the header metadata for the given stream. +func (s *ServerStream) SendHeader(md metadata.MD) error { + return s.st.writeHeader(s, md) +} + +// Write writes the hdr and data bytes to the output stream. +func (s *ServerStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { + return s.st.write(s, hdr, data, opts) +} + +// WriteStatus sends the status of a stream to the client. WriteStatus is +// the final call made on a stream and always occurs. +func (s *ServerStream) WriteStatus(st *status.Status) error { + return s.st.writeStatus(s, st) +} + +// isHeaderSent indicates whether headers have been sent. +func (s *ServerStream) isHeaderSent() bool { + return s.headerSent.Load() +} + +// updateHeaderSent updates headerSent and returns true +// if it was already set. +func (s *ServerStream) updateHeaderSent() bool { + return s.headerSent.Swap(true) +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *ServerStream) RecvCompress() string { + return s.recvCompress +} + +// SendCompress returns the send compressor name. +func (s *ServerStream) SendCompress() string { + return s.sendCompress +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *ServerStream) ContentSubtype() string { + return s.contentSubtype +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *ServerStream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *ServerStream) SetContext(ctx context.Context) { + s.ctx = ctx +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *ServerStream) ClientAdvertisedCompressors() []string { + values := strings.Split(s.clientAdvertisedCompressors, ",") + for i, v := range values { + values[i] = strings.TrimSpace(v) + } + return values +} + +// Header returns the header metadata of the stream. It returns the out header +// after t.WriteHeader is called. It does not block and must not be called +// until after WriteHeader. +func (s *ServerStream) Header() (metadata.MD, error) { + // Return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil +} + +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. +func (s *ServerStream) HeaderWireLength() int { + return s.headerWireLength +} + +// SetHeader sets the header metadata. This can be called multiple times. +// This should not be called in parallel to other data writes. +func (s *ServerStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. +// This should not be called parallel to other data writes. +func (s *ServerStream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} diff --git a/src/vendor/google.golang.org/grpc/internal/transport/transport.go b/src/vendor/google.golang.org/grpc/internal/transport/transport.go index fdd6fa86c..2859b8775 100644 --- a/src/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/src/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -27,7 +27,6 @@ import ( "fmt" "io" "net" - "strings" "sync" "sync/atomic" "time" @@ -39,7 +38,6 @@ import ( "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" - "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" @@ -133,7 +131,7 @@ type recvBufferReader struct { err error } -func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { +func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { if r.err != nil { return 0, r.err } @@ -142,9 +140,9 @@ func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { return n, nil } if r.closeStream != nil { - n, r.err = r.readHeaderClient(header) + n, r.err = r.readMessageHeaderClient(header) } else { - n, r.err = r.readHeader(header) + n, r.err = r.readMessageHeader(header) } return n, r.err } @@ -174,12 +172,12 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { return buf, r.err } -func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeader(header []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readHeaderAdditional(m, header) + return r.readMessageHeaderAdditional(m, header) } } @@ -192,7 +190,7 @@ func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { } } -func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -213,9 +211,9 @@ func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() - return r.readHeaderAdditional(m, header) + return r.readMessageHeaderAdditional(m, header) case m := <-r.recv.get(): - return r.readHeaderAdditional(m, header) + return r.readMessageHeaderAdditional(m, header) } } @@ -246,7 +244,7 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { } } -func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeaderAdditional(m recvMsg, header []byte) (n int, err error) { r.recv.load() if m.err != nil { if m.buffer != nil { @@ -288,14 +286,8 @@ const ( // Stream represents an RPC in the transport layer. type Stream struct { id uint32 - st ServerTransport // nil for client side Stream - ct ClientTransport // nil for server side Stream - ctx context.Context // the associated context of the stream - cancel context.CancelFunc // always nil for client side Stream - done chan struct{} // closed at the end of stream to unblock writers. On the client side. - doneFunc func() // invoked at the end of stream on client side. - ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) - method string // the associated RPC method of the stream + ctx context.Context // the associated context of the stream + method string // the associated RPC method of the stream recvCompress string sendCompress string buf *recvBuffer @@ -303,58 +295,17 @@ type Stream struct { fc *inFlow wq *writeQuota - // Holds compressor names passed in grpc-accept-encoding metadata from the - // client. This is empty for the client side stream. - clientAdvertisedCompressors string // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) - headerChan chan struct{} // closed to indicate the end of header metadata. - headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. - // headerValid indicates whether a valid header was received. Only - // meaningful after headerChan is closed (always call waitOnHeader() before - // reading its value). Not valid on server side. - headerValid bool - headerWireLength int // Only set on server side. - - // hdrMu protects header and trailer metadata on the server-side. - hdrMu sync.Mutex - // On client side, header keeps the received header metadata. - // - // On server side, header keeps the header set by SetHeader(). The complete - // header will merged into this after t.WriteHeader() is called. - header metadata.MD - trailer metadata.MD // the key-value map of trailer metadata. - - noHeaders bool // set if the client never received headers (set only after the stream is done). - - // On the server-side, headerSent is atomically set to 1 when the headers are sent out. - headerSent uint32 - state streamState - // On client-side it is the status error received from the server. - // On server-side it is unused. - status *status.Status - - bytesReceived uint32 // indicates whether any bytes have been received on this stream - unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream - // contentSubtype is the content-subtype for requests. // this must be lowercase or the behavior is undefined. contentSubtype string -} - -// isHeaderSent is only valid on the server-side. -func (s *Stream) isHeaderSent() bool { - return atomic.LoadUint32(&s.headerSent) == 1 -} -// updateHeaderSent updates headerSent and returns true -// if it was already set. It is valid only on server-side. -func (s *Stream) updateHeaderSent() bool { - return atomic.SwapUint32(&s.headerSent, 1) == 1 + trailer metadata.MD // the key-value map of trailer metadata. } func (s *Stream) swapState(st streamState) streamState { @@ -369,110 +320,12 @@ func (s *Stream) getState() streamState { return streamState(atomic.LoadUint32((*uint32)(&s.state))) } -func (s *Stream) waitOnHeader() { - if s.headerChan == nil { - // On the server headerChan is always nil since a stream originates - // only after having received headers. - return - } - select { - case <-s.ctx.Done(): - // Close the stream to prevent headers/trailers from changing after - // this function returns. - s.ct.CloseStream(s, ContextErr(s.ctx.Err())) - // headerChan could possibly not be closed yet if closeStream raced - // with operateHeaders; wait until it is closed explicitly here. - <-s.headerChan - case <-s.headerChan: - } -} - -// RecvCompress returns the compression algorithm applied to the inbound -// message. It is empty string if there is no compression applied. -func (s *Stream) RecvCompress() string { - s.waitOnHeader() - return s.recvCompress -} - -// SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(name string) error { - if s.isHeaderSent() || s.getState() == streamDone { - return errors.New("transport: set send compressor called after headers sent or stream done") - } - - s.sendCompress = name - return nil -} - -// SendCompress returns the send compressor name. -func (s *Stream) SendCompress() string { - return s.sendCompress -} - -// ClientAdvertisedCompressors returns the compressor names advertised by the -// client via grpc-accept-encoding header. -func (s *Stream) ClientAdvertisedCompressors() []string { - values := strings.Split(s.clientAdvertisedCompressors, ",") - for i, v := range values { - values[i] = strings.TrimSpace(v) - } - return values -} - -// Done returns a channel which is closed when it receives the final status -// from the server. -func (s *Stream) Done() <-chan struct{} { - return s.done -} - -// Header returns the header metadata of the stream. -// -// On client side, it acquires the key-value pairs of header metadata once it is -// available. It blocks until i) the metadata is ready or ii) there is no header -// metadata or iii) the stream is canceled/expired. -// -// On server side, it returns the out header after t.WriteHeader is called. It -// does not block and must not be called until after WriteHeader. -func (s *Stream) Header() (metadata.MD, error) { - if s.headerChan == nil { - // On server side, return the header in stream. It will be the out - // header after t.WriteHeader is called. - return s.header.Copy(), nil - } - s.waitOnHeader() - - if !s.headerValid || s.noHeaders { - return nil, s.status.Err() - } - - return s.header.Copy(), nil -} - -// TrailersOnly blocks until a header or trailers-only frame is received and -// then returns true if the stream was trailers-only. If the stream ends -// before headers are received, returns true, nil. Client-side only. -func (s *Stream) TrailersOnly() bool { - s.waitOnHeader() - return s.noHeaders -} - // Trailer returns the cached trailer metadata. Note that if it is not called -// after the entire stream is done, it could return an empty MD. Client -// side only. +// after the entire stream is done, it could return an empty MD. // It can be safely read only after stream has ended that is either read // or write have returned io.EOF. func (s *Stream) Trailer() metadata.MD { - c := s.trailer.Copy() - return c -} - -// ContentSubtype returns the content-subtype for a request. For example, a -// content-subtype of "proto" will result in a content-type of -// "application/grpc+proto". This will always be lowercase. See -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -func (s *Stream) ContentSubtype() string { - return s.contentSubtype + return s.trailer.Copy() } // Context returns the context of the stream. @@ -480,81 +333,31 @@ func (s *Stream) Context() context.Context { return s.ctx } -// SetContext sets the context of the stream. This will be deleted once the -// stats handler callouts all move to gRPC layer. -func (s *Stream) SetContext(ctx context.Context) { - s.ctx = ctx -} - // Method returns the method for the stream. func (s *Stream) Method() string { return s.method } -// Status returns the status received from the server. -// Status can be read safely only after the stream has ended, -// that is, after Done() is closed. -func (s *Stream) Status() *status.Status { - return s.status -} - -// HeaderWireLength returns the size of the headers of the stream as received -// from the wire. Valid only on the server. -func (s *Stream) HeaderWireLength() int { - return s.headerWireLength -} - -// SetHeader sets the header metadata. This can be called multiple times. -// Server side only. -// This should not be called in parallel to other data writes. -func (s *Stream) SetHeader(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.isHeaderSent() || s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.header = metadata.Join(s.header, md) - s.hdrMu.Unlock() - return nil -} - -// SendHeader sends the given header metadata. The given metadata is -// combined with any metadata set by previous calls to SetHeader and -// then written to the transport stream. -func (s *Stream) SendHeader(md metadata.MD) error { - return s.st.WriteHeader(s, md) -} - -// SetTrailer sets the trailer metadata which will be sent with the RPC status -// by the server. This can be called multiple times. Server side only. -// This should not be called parallel to other data writes. -func (s *Stream) SetTrailer(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.trailer = metadata.Join(s.trailer, md) - s.hdrMu.Unlock() - return nil -} - func (s *Stream) write(m recvMsg) { s.buf.put(m) } -func (s *Stream) ReadHeader(header []byte) (err error) { +// ReadMessageHeader reads data into the provided header slice from the stream. +// It first checks if there was an error during a previous read operation and +// returns it if present. It then requests a read operation for the length of +// the header. It continues to read from the stream until the entire header +// slice is filled or an error occurs. If an `io.EOF` error is encountered with +// partially read data, it is converted to `io.ErrUnexpectedEOF` to indicate an +// unexpected end of the stream. The method returns any error encountered during +// the read process or nil if the header was successfully read. +func (s *Stream) ReadMessageHeader(header []byte) (err error) { // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { return er } s.requestRead(len(header)) for len(header) != 0 { - n, err := s.trReader.ReadHeader(header) + n, err := s.trReader.ReadMessageHeader(header) header = header[n:] if len(header) == 0 { err = nil @@ -570,7 +373,7 @@ func (s *Stream) ReadHeader(header []byte) (err error) { } // Read reads n bytes from the wire for this stream. -func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { +func (s *Stream) read(n int) (data mem.BufferSlice, err error) { // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { return nil, er @@ -610,13 +413,13 @@ type transportReader struct { er error } -func (t *transportReader) ReadHeader(header []byte) (int, error) { - n, err := t.reader.ReadHeader(header) +func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { + n, err := t.reader.ReadMessageHeader(header) if err != nil { t.er = err return 0, err } - t.windowHandler(len(header)) + t.windowHandler(n) return n, nil } @@ -630,17 +433,6 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) { return buf, nil } -// BytesReceived indicates whether any bytes have been received on this stream. -func (s *Stream) BytesReceived() bool { - return atomic.LoadUint32(&s.bytesReceived) == 1 -} - -// Unprocessed indicates whether the server did not process this stream -- -// i.e. it sent a refused stream or GOAWAY including this stream ID. -func (s *Stream) Unprocessed() bool { - return atomic.LoadUint32(&s.unprocessed) == 1 -} - // GoString is implemented by Stream so context.String() won't // race when printing %#v. func (s *Stream) GoString() string { @@ -716,15 +508,9 @@ type ConnectOptions struct { BufferPool mem.BufferPool } -// NewClientTransport establishes the transport with the required ConnectOptions -// and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) -} - -// Options provides additional hints and information for message +// WriteOptions provides additional hints and information for message // transmission. -type Options struct { +type WriteOptions struct { // Last indicates whether this write is the last piece for // this stream. Last bool @@ -773,18 +559,8 @@ type ClientTransport interface { // It does not block. GracefulClose() - // Write sends the data for the given stream. A nil stream indicates - // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error - // NewStream creates a Stream for an RPC. - NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) - - // CloseStream clears the footprint of a stream when the stream is - // not needed any more. The err indicates the error incurred when - // CloseStream is called. Must be called when a stream is finished - // unless the associated transport is closing. - CloseStream(stream *Stream, err error) + NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) // Error returns a channel that is closed when some I/O error // happens. Typically the caller should have a goroutine to monitor @@ -804,12 +580,6 @@ type ClientTransport interface { // RemoteAddr returns the remote network address. RemoteAddr() net.Addr - - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() } // ServerTransport is the common interface for all gRPC server-side transport @@ -819,19 +589,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(context.Context, func(*Stream)) - - // WriteHeader sends the header metadata for the given stream. - // WriteHeader may not be called on all streams. - WriteHeader(s *Stream, md metadata.MD) error - - // Write sends the data for the given stream. - // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error - - // WriteStatus sends the status of a stream to the client. WriteStatus is - // the final call made on a stream and always occurs. - WriteStatus(s *Stream, st *status.Status) error + HandleStreams(context.Context, func(*ServerStream)) // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their @@ -843,12 +601,14 @@ type ServerTransport interface { // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain(debugData string) +} - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() +type internalServerTransport interface { + ServerTransport + writeHeader(s *ServerStream, md metadata.MD) error + write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error + writeStatus(s *ServerStream, st *status.Status) error + incrMsgRecv() } // connectionErrorf creates an ConnectionError with the specified error description. diff --git a/src/vendor/google.golang.org/grpc/mem/buffer_slice.go b/src/vendor/google.golang.org/grpc/mem/buffer_slice.go index 2d70b5a02..65002e2cc 100644 --- a/src/vendor/google.golang.org/grpc/mem/buffer_slice.go +++ b/src/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -19,10 +19,14 @@ package mem import ( - "compress/flate" "io" ) +const ( + // 32 KiB is what io.Copy uses. + readAllBufSize = 32 * 1024 +) + // BufferSlice offers a means to represent data that spans one or more Buffer // instances. A BufferSlice is meant to be immutable after creation, and methods // like Ref create and return copies of the slice. This is why all methods have @@ -126,7 +130,8 @@ func (s BufferSlice) Reader() Reader { // Remaining(), which returns the number of unread bytes remaining in the slice. // Buffers will be freed as they are read. type Reader interface { - flate.Reader + io.Reader + io.ByteReader // Close frees the underlying BufferSlice and never returns an error. Subsequent // calls to Read will return (0, io.EOF). Close() error @@ -219,8 +224,58 @@ func (w *writer) Write(p []byte) (n int, err error) { // NewWriter wraps the given BufferSlice and BufferPool to implement the // io.Writer interface. Every call to Write copies the contents of the given -// buffer into a new Buffer pulled from the given pool and the Buffer is added to -// the given BufferSlice. +// buffer into a new Buffer pulled from the given pool and the Buffer is +// added to the given BufferSlice. func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { return &writer{buffers: buffers, pool: pool} } + +// ReadAll reads from r until an error or EOF and returns the data it read. +// A successful call returns err == nil, not err == EOF. Because ReadAll is +// defined to read from src until EOF, it does not treat an EOF from Read +// as an error to be reported. +// +// Important: A failed call returns a non-nil error and may also return +// partially read buffers. It is the responsibility of the caller to free the +// BufferSlice returned, or its memory will not be reused. +func ReadAll(r io.Reader, pool BufferPool) (BufferSlice, error) { + var result BufferSlice + if wt, ok := r.(io.WriterTo); ok { + // This is more optimal since wt knows the size of chunks it wants to + // write and, hence, we can allocate buffers of an optimal size to fit + // them. E.g. might be a single big chunk, and we wouldn't chop it + // into pieces. + w := NewWriter(&result, pool) + _, err := wt.WriteTo(w) + return result, err + } +nextBuffer: + for { + buf := pool.Get(readAllBufSize) + // We asked for 32KiB but may have been given a bigger buffer. + // Use all of it if that's the case. + *buf = (*buf)[:cap(*buf)] + usedCap := 0 + for { + n, err := r.Read((*buf)[usedCap:]) + usedCap += n + if err != nil { + if usedCap == 0 { + // Nothing in this buf, put it back + pool.Put(buf) + } else { + *buf = (*buf)[:usedCap] + result = append(result, NewBuffer(buf, pool)) + } + if err == io.EOF { + err = nil + } + return result, err + } + if len(*buf) == usedCap { + result = append(result, NewBuffer(buf, pool)) + continue nextBuffer + } + } + } +} diff --git a/src/vendor/google.golang.org/grpc/mem/buffers.go b/src/vendor/google.golang.org/grpc/mem/buffers.go index 4d66b2ccc..ecbf0b9a7 100644 --- a/src/vendor/google.golang.org/grpc/mem/buffers.go +++ b/src/vendor/google.golang.org/grpc/mem/buffers.go @@ -65,6 +65,9 @@ var ( refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }} ) +// IsBelowBufferPoolingThreshold returns true if the given size is less than or +// equal to the threshold for buffer pooling. This is used to determine whether +// to pool buffers or allocate them directly. func IsBelowBufferPoolingThreshold(size int) bool { return size <= bufferPoolingThreshold } @@ -89,7 +92,11 @@ func newBuffer() *buffer { // // Note that the backing array of the given data is not copied. func NewBuffer(data *[]byte, pool BufferPool) Buffer { - if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) { + // Use the buffer's capacity instead of the length, otherwise buffers may + // not be reused under certain conditions. For example, if a large buffer + // is acquired from the pool, but fewer bytes than the buffering threshold + // are written to it, the buffer will not be returned to the pool. + if pool == nil || IsBelowBufferPoolingThreshold(cap(*data)) { return (SliceBuffer)(*data) } b := newBuffer() @@ -194,19 +201,19 @@ func (b *buffer) read(buf []byte) (int, Buffer) { return n, b } -// String returns a string representation of the buffer. May be used for -// debugging purposes. func (b *buffer) String() string { return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData())) } +// ReadUnsafe reads bytes from the given Buffer into the provided slice. +// It does not perform safety checks. func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) { return buf.read(dst) } // SplitUnsafe modifies the receiver to point to the first n bytes while it -// returns a new reference to the remaining bytes. The returned Buffer functions -// just like a normal reference acquired using Ref(). +// returns a new reference to the remaining bytes. The returned Buffer +// functions just like a normal reference acquired using Ref(). func SplitUnsafe(buf Buffer, n int) (left, right Buffer) { return buf.split(n) } @@ -232,12 +239,21 @@ func (e emptyBuffer) read([]byte) (int, Buffer) { return 0, e } +// SliceBuffer is a Buffer implementation that wraps a byte slice. It provides +// methods for reading, splitting, and managing the byte slice. type SliceBuffer []byte +// ReadOnlyData returns the byte slice. func (s SliceBuffer) ReadOnlyData() []byte { return s } -func (s SliceBuffer) Ref() {} -func (s SliceBuffer) Free() {} -func (s SliceBuffer) Len() int { return len(s) } + +// Ref is a noop implementation of Ref. +func (s SliceBuffer) Ref() {} + +// Free is a noop implementation of Free. +func (s SliceBuffer) Free() {} + +// Len is a noop implementation of Len. +func (s SliceBuffer) Len() int { return len(s) } func (s SliceBuffer) split(n int) (left, right Buffer) { return s[:n], s[n:] diff --git a/src/vendor/google.golang.org/grpc/preloader.go b/src/vendor/google.golang.org/grpc/preloader.go index e87a17f36..ee0ff969a 100644 --- a/src/vendor/google.golang.org/grpc/preloader.go +++ b/src/vendor/google.golang.org/grpc/preloader.go @@ -62,7 +62,7 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { materializedData := data.Materialize() data.Free() - p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} + p.encodedData = mem.BufferSlice{mem.SliceBuffer(materializedData)} // TODO: it should be possible to grab the bufferPool from the underlying // stream implementation with a type cast to its actual type (such as @@ -76,7 +76,7 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if p.pf.isCompressed() { materializedCompData := compData.Materialize() compData.Free() - compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} + compData = mem.BufferSlice{mem.SliceBuffer(materializedCompData)} } p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) diff --git a/src/vendor/google.golang.org/grpc/resolver/resolver.go b/src/vendor/google.golang.org/grpc/resolver/resolver.go index 202854511..8eb1cf3bc 100644 --- a/src/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/src/vendor/google.golang.org/grpc/resolver/resolver.go @@ -22,6 +22,7 @@ package resolver import ( "context" + "errors" "fmt" "net" "net/url" @@ -237,8 +238,8 @@ type ClientConn interface { // UpdateState can be omitted. UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an - // error. The ClientConn will notify the load balancer and begin calling - // ResolveNow on the Resolver with exponential backoff. + // error. The ClientConn then forwards this error to the load balancing + // policy. ReportError(error) // NewAddress is called by resolver to notify ClientConn a new list // of resolved addresses. @@ -330,3 +331,20 @@ type AuthorityOverrider interface { // typically in line, and must keep it unchanged. OverrideAuthority(Target) string } + +// ValidateEndpoints validates endpoints from a petiole policy's perspective. +// Petiole policies should call this before calling into their children. See +// [gRPC A61](https://github.com/grpc/proposal/blob/master/A61-IPv4-IPv6-dualstack-backends.md) +// for details. +func ValidateEndpoints(endpoints []Endpoint) error { + if len(endpoints) == 0 { + return errors.New("endpoints list is empty") + } + + for _, endpoint := range endpoints { + for range endpoint.Addresses { + return nil + } + } + return errors.New("endpoints list contains no addresses") +} diff --git a/src/vendor/google.golang.org/grpc/rpc_util.go b/src/vendor/google.golang.org/grpc/rpc_util.go index 2d96f1405..9fac2b08b 100644 --- a/src/vendor/google.golang.org/grpc/rpc_util.go +++ b/src/vendor/google.golang.org/grpc/rpc_util.go @@ -622,7 +622,7 @@ func (pf payloadFormat) isCompressed() bool { } type streamReader interface { - ReadHeader(header []byte) error + ReadMessageHeader(header []byte) error Read(n int) (mem.BufferSlice, error) } @@ -656,7 +656,7 @@ type parser struct { // that the underlying streamReader must not return an incompatible // error. func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { - err := p.r.ReadHeader(p.header[:]) + err := p.r.ReadMessageHeader(p.header[:]) if err != nil { return 0, nil, err } @@ -664,9 +664,6 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSl pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) - if length == 0 { - return pf, nil, nil - } if int64(length) > int64(maxInt) { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) } @@ -791,9 +788,8 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool if !haveCompressor { if isServer { return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) - } else { - return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) } + return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) } default: return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) @@ -818,7 +814,7 @@ func (p *payloadInfo) free() { // the buffer is no longer needed. // TODO: Refactor this function to reduce the number of arguments. // See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, ) (out mem.BufferSlice, err error) { pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { @@ -842,7 +838,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei var uncompressedBuf []byte uncompressedBuf, err = dc.Do(compressed.Reader()) if err == nil { - out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} + out = mem.BufferSlice{mem.SliceBuffer(uncompressedBuf)} } size = len(uncompressedBuf) } else { @@ -878,30 +874,7 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMes return nil, 0, err } - // TODO: Can/should this still be preserved with the new BufferSlice API? Are - // there any actual benefits to allocating a single large buffer instead of - // multiple smaller ones? - //if sizer, ok := compressor.(interface { - // DecompressedSize(compressedBytes []byte) int - //}); ok { - // if size := sizer.DecompressedSize(d); size >= 0 { - // if size > maxReceiveMessageSize { - // return nil, size, nil - // } - // // size is used as an estimate to size the buffer, but we - // // will read more data if available. - // // +MinRead so ReadFrom will not reallocate if size is correct. - // // - // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, - // // we can also utilize the recv buffer pool here. - // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) - // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - // return buf.Bytes(), int(bytesRead), err - // } - //} - - var out mem.BufferSlice - _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + out, err := mem.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1), pool) if err != nil { out.Free() return nil, 0, err @@ -909,10 +882,14 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMes return out, out.Len(), nil } +type recvCompressor interface { + RecvCompress() string +} + // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { +func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err diff --git a/src/vendor/google.golang.org/grpc/server.go b/src/vendor/google.golang.org/grpc/server.go index d1e1415a4..16065a027 100644 --- a/src/vendor/google.golang.org/grpc/server.go +++ b/src/vendor/google.golang.org/grpc/server.go @@ -87,12 +87,13 @@ func init() { var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) +// MethodHandler is a function type that processes a unary RPC method call. +type MethodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { MethodName string - Handler methodHandler + Handler MethodHandler } // ServiceDesc represents an RPC service's specification. @@ -621,8 +622,8 @@ func bufferPool(bufferPool mem.BufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -// serverWorker blocks on a *transport.Stream channel forever and waits for -// data to be fed by serveStreams. This allows multiple requests to be +// serverWorker blocks on a *transport.ServerStream channel forever and waits +// for data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // @@ -1020,7 +1021,7 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, }() streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(ctx, func(stream *transport.Stream) { + st.HandleStreams(ctx, func(stream *transport.ServerStream) { s.handlersWG.Add(1) streamQuota.acquire() f := func() { @@ -1136,7 +1137,7 @@ func (s *Server) incrCallsFailed() { s.channelz.ServerMetrics.CallsFailed.Add(1) } -func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStream, msg any, cp Compressor, opts *transport.WriteOptions, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) @@ -1165,7 +1166,7 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, if payloadLen > s.opts.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } - err = t.Write(stream, hdr, payload, opts) + err = stream.Write(hdr, payload, opts) if err == nil { if len(s.opts.statsHandlers) != 0 { for _, sh := range s.opts.statsHandlers { @@ -1212,7 +1213,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } } -func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1320,7 +1321,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor decomp = encoding.GetCompressor(rc) if decomp == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - t.WriteStatus(stream, st) + stream.WriteStatus(st) return st.Err() } } @@ -1354,15 +1355,12 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { - if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + if e := stream.WriteStatus(status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } defer d.Free() - if channelz.IsOn() { - t.IncrMsgRecv() - } df := func(v any) error { if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) @@ -1404,7 +1402,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor trInfo.tr.LazyLog(stringer(appStatus.Message()), true) trInfo.tr.SetError() } - if e := t.WriteStatus(stream, appStatus); e != nil { + if e := stream.WriteStatus(appStatus); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if len(binlogs) != 0 { @@ -1431,20 +1429,20 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor if trInfo != nil { trInfo.tr.LazyLog(stringer("OK"), false) } - opts := &transport.Options{Last: true} + opts := &transport.WriteOptions{Last: true} // Server handler could have set new compressor by calling SetSendCompressor. // In case it is set, we need to use it for compressing outbound message. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err } if sts, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, sts); e != nil { + if e := stream.WriteStatus(sts); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { @@ -1484,9 +1482,6 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor binlog.Log(ctx, sm) } } - if channelz.IsOn() { - t.IncrMsgSent() - } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) } @@ -1502,7 +1497,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor binlog.Log(ctx, st) } } - return t.WriteStatus(stream, statusOK) + return stream.WriteStatus(statusOK) } // chainStreamServerInterceptors chains all stream server interceptors into one. @@ -1541,7 +1536,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf } } -func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1561,7 +1556,6 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, - t: t, s: stream, p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), @@ -1648,7 +1642,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ss.decomp = encoding.GetCompressor(rc) if ss.decomp == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) return st.Err() } } @@ -1717,7 +1711,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran binlog.Log(ctx, st) } } - t.WriteStatus(ss.s, appStatus) + ss.s.WriteStatus(appStatus) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1735,10 +1729,10 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran binlog.Log(ctx, st) } } - return t.WriteStatus(ss.s, statusOK) + return ss.s.WriteStatus(statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.ServerStream) { ctx := stream.Context() ctx = contextWithServer(ctx, s) var ti *traceInfo @@ -1768,7 +1762,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -1783,17 +1777,20 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str service := sm[:pos] method := sm[pos+1:] - md, _ := metadata.FromIncomingContext(ctx) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) - sh.HandleRPC(ctx, &stats.InHeader{ - FullMethod: stream.Method(), - RemoteAddr: t.Peer().Addr, - LocalAddr: t.Peer().LocalAddr, - Compression: stream.RecvCompress(), - WireLength: stream.HeaderWireLength(), - Header: md, - }) + // FromIncomingContext is expensive: skip if there are no statsHandlers + if len(s.opts.statsHandlers) > 0 { + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } } // To have calls in stream callouts work. Will delete once all stats handler // calls come from the gRPC layer. @@ -1802,17 +1799,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(ctx, t, stream, srv, md, ti) + s.processUnaryRPC(ctx, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(ctx, t, stream, srv, sd, ti) + s.processStreamingRPC(ctx, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) + s.processStreamingRPC(ctx, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1825,7 +1822,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.LazyPrintf("%s", errDesc) ti.tr.SetError() } - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -2100,7 +2097,7 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func SetSendCompressor(ctx context.Context, name string) error { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) if !ok || stream == nil { return fmt.Errorf("failed to fetch the stream from the given context") } @@ -2122,7 +2119,7 @@ func SetSendCompressor(ctx context.Context, name string) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func ClientSupportedCompressors(ctx context.Context) ([]string, error) { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) if !ok || stream == nil { return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) } diff --git a/src/vendor/google.golang.org/grpc/service_config.go b/src/vendor/google.golang.org/grpc/service_config.go index 2671c5ef6..7e83027d1 100644 --- a/src/vendor/google.golang.org/grpc/service_config.go +++ b/src/vendor/google.golang.org/grpc/service_config.go @@ -168,6 +168,7 @@ func init() { return parseServiceConfig(js, defaultMaxCallAttempts) } } + func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { if len(js) == 0 { return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} @@ -297,7 +298,7 @@ func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalservi return rp, nil } -func min(a, b *int) *int { +func minPointers(a, b *int) *int { if *a < *b { return a } @@ -309,7 +310,7 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { return &defaultVal } if mcMax != nil && doptMax != nil { - return min(mcMax, doptMax) + return minPointers(mcMax, doptMax) } if mcMax != nil { return mcMax diff --git a/src/vendor/google.golang.org/grpc/stats/metrics.go b/src/vendor/google.golang.org/grpc/stats/metrics.go new file mode 100644 index 000000000..641c8e979 --- /dev/null +++ b/src/vendor/google.golang.org/grpc/stats/metrics.go @@ -0,0 +1,81 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import "maps" + +// MetricSet is a set of metrics to record. Once created, MetricSet is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetricSet instead. +type MetricSet struct { + // metrics are the set of metrics to initialize. + metrics map[string]bool +} + +// NewMetricSet returns a MetricSet containing metricNames. +func NewMetricSet(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for _, metric := range metricNames { + newMetrics[metric] = true + } + return &MetricSet{metrics: newMetrics} +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *MetricSet) Metrics() map[string]bool { + return m.metrics +} + +// Add adds the metricNames to the metrics set and returns a new copy with the +// additional metrics. +func (m *MetricSet) Add(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metricNames { + newMetrics[metric] = true + } + return &MetricSet{metrics: newMetrics} +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *MetricSet) Join(metrics *MetricSet) *MetricSet { + newMetrics := make(map[string]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &MetricSet{metrics: newMetrics} +} + +// Remove removes the metricNames from the metrics set and returns a new copy +// with the metrics removed. +func (m *MetricSet) Remove(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metricNames { + delete(newMetrics, metric) + } + return &MetricSet{metrics: newMetrics} +} diff --git a/src/vendor/google.golang.org/grpc/stats/stats.go b/src/vendor/google.golang.org/grpc/stats/stats.go index 71195c494..6f20d2d54 100644 --- a/src/vendor/google.golang.org/grpc/stats/stats.go +++ b/src/vendor/google.golang.org/grpc/stats/stats.go @@ -260,84 +260,42 @@ func (s *ConnEnd) IsClient() bool { return s.Client } func (s *ConnEnd) isConnStats() {} -type incomingTagsKey struct{} -type outgoingTagsKey struct{} - // SetTags attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-tags-bin. Subsequent calls to // SetTags will overwrite the values from earlier calls. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: set the `grpc-tags-bin` header in the metadata instead. func SetTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTagsKey{}, b) + return metadata.AppendToOutgoingContext(ctx, "grpc-tags-bin", string(b)) } // Tags returns the tags from the context for the inbound RPC. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: obtain the `grpc-tags-bin` header from metadata instead. func Tags(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTagsKey{}).([]byte) - return b -} - -// SetIncomingTags attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). -// -// This is intended for gRPC-internal use ONLY. -func SetIncomingTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTagsKey{}, b) -} - -// OutgoingTags returns the tags from the context for the outbound RPC. -// -// This is intended for gRPC-internal use ONLY. -func OutgoingTags(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTagsKey{}).([]byte) - return b + traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-tags-bin") + if len(traceValues) == 0 { + return nil + } + return []byte(traceValues[len(traceValues)-1]) } -type incomingTraceKey struct{} -type outgoingTraceKey struct{} - // SetTrace attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-trace-bin. Subsequent calls to // SetTrace will overwrite the values from earlier calls. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: set the `grpc-trace-bin` header in the metadata instead. func SetTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTraceKey{}, b) + return metadata.AppendToOutgoingContext(ctx, "grpc-trace-bin", string(b)) } // Trace returns the trace from the context for the inbound RPC. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: obtain the `grpc-trace-bin` header from metadata instead. func Trace(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTraceKey{}).([]byte) - return b -} - -// SetIncomingTrace attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). It is intended for -// gRPC-internal use. -func SetIncomingTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTraceKey{}, b) -} - -// OutgoingTrace returns the trace from the context for the outbound RPC. It is -// intended for gRPC-internal use. -func OutgoingTrace(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTraceKey{}).([]byte) - return b + traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-trace-bin") + if len(traceValues) == 0 { + return nil + } + return []byte(traceValues[len(traceValues)-1]) } diff --git a/src/vendor/google.golang.org/grpc/stream.go b/src/vendor/google.golang.org/grpc/stream.go index bb2b2a216..17e2267b3 100644 --- a/src/vendor/google.golang.org/grpc/stream.go +++ b/src/vendor/google.golang.org/grpc/stream.go @@ -23,7 +23,7 @@ import ( "errors" "io" "math" - "math/rand" + rand "math/rand/v2" "strconv" "sync" "time" @@ -113,7 +113,9 @@ type ClientStream interface { // SendMsg is generally called by generated code. On error, SendMsg aborts // the stream. If the error was generated by the client, the status is // returned directly; otherwise, io.EOF is returned and the status of - // the stream may be discovered using RecvMsg. + // the stream may be discovered using RecvMsg. For unary or server-streaming + // RPCs (StreamDesc.ClientStreams is false), a nil error is returned + // unconditionally. // // SendMsg blocks until: // - There is sufficient flow control to schedule m with the transport, or @@ -216,7 +218,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth var mc serviceconfig.MethodConfig var onCommit func() - var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) { return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) } @@ -584,7 +586,7 @@ type csAttempt struct { ctx context.Context cs *clientStream t transport.ClientTransport - s *transport.Stream + s *transport.ClientStream p *parser pickResult balancer.PickResult @@ -706,11 +708,10 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { cs.numRetriesSincePushback = 0 } else { fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) - cur := float64(rp.InitialBackoff) * fact - if max := float64(rp.MaxBackoff); cur > max { - cur = max - } - dur = time.Duration(rand.Int63n(int64(cur))) + cur := min(float64(rp.InitialBackoff)*fact, float64(rp.MaxBackoff)) + // Apply jitter by multiplying with a random factor between 0.8 and 1.2 + cur *= 0.8 + 0.4*rand.Float64() + dur = time.Duration(int64(cur)) cs.numRetriesSincePushback++ } @@ -991,7 +992,7 @@ func (cs *clientStream) CloseSend() error { } cs.sentLast = true op := func(a *csAttempt) error { - a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + a.s.Write(nil, nil, &transport.WriteOptions{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1083,7 +1084,7 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength } a.mu.Unlock() } - if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if err := a.s.Write(hdr, payld, &transport.WriteOptions{Last: !cs.desc.ClientStreams}); err != nil { if !cs.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1097,9 +1098,6 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) } } - if channelz.IsOn() { - a.t.IncrMsgSent() - } return nil } @@ -1153,9 +1151,6 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { Length: payInfo.uncompressedBytes.Len(), }) } - if channelz.IsOn() { - a.t.IncrMsgRecv() - } if cs.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil @@ -1183,7 +1178,7 @@ func (a *csAttempt) finish(err error) { } var tr metadata.MD if a.s != nil { - a.t.CloseStream(a.s, err) + a.s.Close(err) tr = a.s.Trailer() } @@ -1340,7 +1335,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin } type addrConnStream struct { - s *transport.Stream + s *transport.ClientStream ac *addrConn callHdr *transport.CallHdr cancel context.CancelFunc @@ -1380,7 +1375,7 @@ func (as *addrConnStream) CloseSend() error { } as.sentLast = true - as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) + as.s.Write(nil, nil, &transport.WriteOptions{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1430,7 +1425,7 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } - if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if err := as.s.Write(hdr, payload, &transport.WriteOptions{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1440,9 +1435,6 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return io.EOF } - if channelz.IsOn() { - as.t.IncrMsgSent() - } return nil } @@ -1480,9 +1472,6 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { return toRPCErr(err) } - if channelz.IsOn() { - as.t.IncrMsgRecv() - } if as.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil @@ -1510,7 +1499,7 @@ func (as *addrConnStream) finish(err error) { err = nil } if as.s != nil { - as.t.CloseStream(as.s, err) + as.s.Close(err) } if err != nil { @@ -1577,8 +1566,7 @@ type ServerStream interface { // serverStream implements a server side Stream. type serverStream struct { ctx context.Context - t transport.ServerTransport - s *transport.Stream + s *transport.ServerStream p *parser codec baseCodec @@ -1628,7 +1616,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error { return status.Error(codes.Internal, err.Error()) } - err = ss.t.WriteHeader(ss.s, md) + err = ss.s.SendHeader(md) if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() sh := &binarylog.ServerHeader{ @@ -1668,7 +1656,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1676,9 +1664,6 @@ func (ss *serverStream) SendMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } - if channelz.IsOn() && err == nil { - ss.t.IncrMsgSent() - } }() // Server handler could have set new compressor by calling SetSendCompressor. @@ -1710,7 +1695,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { if payloadLen > ss.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } - if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + if err := ss.s.Write(hdr, payload, &transport.WriteOptions{Last: false}); err != nil { return toRPCErr(err) } @@ -1756,7 +1741,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1764,9 +1749,6 @@ func (ss *serverStream) RecvMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } - if channelz.IsOn() && err == nil { - ss.t.IncrMsgRecv() - } }() var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { diff --git a/src/vendor/google.golang.org/grpc/version.go b/src/vendor/google.golang.org/grpc/version.go index d690c2b27..6cbe116f2 100644 --- a/src/vendor/google.golang.org/grpc/version.go +++ b/src/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.68.0-dev" +const Version = "1.69.2" diff --git a/src/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/src/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index 2c0693d7a..5a57ef6f3 100644 Binary files a/src/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/src/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ diff --git a/src/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/src/vendor/google.golang.org/protobuf/internal/errors/is_go112.go deleted file mode 100644 index fbcd34920..000000000 --- a/src/vendor/google.golang.org/protobuf/internal/errors/is_go112.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.13 -// +build !go1.13 - -package errors - -import "reflect" - -// Is is a copy of Go 1.13's errors.Is for use with older Go versions. -func Is(err, target error) bool { - if target == nil { - return err == target - } - - isComparable := reflect.TypeOf(target).Comparable() - for { - if isComparable && err == target { - return true - } - if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { - return true - } - if err = unwrap(err); err == nil { - return false - } - } -} - -func unwrap(err error) error { - u, ok := err.(interface { - Unwrap() error - }) - if !ok { - return nil - } - return u.Unwrap() -} diff --git a/src/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/src/vendor/google.golang.org/protobuf/internal/errors/is_go113.go deleted file mode 100644 index 5e72f1cde..000000000 --- a/src/vendor/google.golang.org/protobuf/internal/errors/is_go113.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 -// +build go1.13 - -package errors - -import "errors" - -// Is is errors.Is. -func Is(err, target error) bool { return errors.Is(err, target) } diff --git a/src/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/src/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index f32529856..378b826fa 100644 --- a/src/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/src/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -117,6 +117,9 @@ type ( // GenerateLegacyUnmarshalJSON determines if the plugin generates the // UnmarshalJSON([]byte) error method for enums. GenerateLegacyUnmarshalJSON bool + // APILevel controls which API (Open, Hybrid or Opaque) should be used + // for generated code (.pb.go files). + APILevel int } ) diff --git a/src/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/src/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 7611796e8..10132c9b3 100644 --- a/src/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/src/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -32,6 +32,10 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeVarint(b) b = b[m:] parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v) + case genid.GoFeatures_ApiLevel_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.APILevel = int(v) case genid.GoFeatures_StripEnumPrefix_field_number: v, m := protowire.ConsumeVarint(b) b = b[m:] diff --git a/src/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/src/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index 09792d96f..f5ee7f5c2 100644 --- a/src/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/src/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -21,18 +21,35 @@ const ( // Field names for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" + GoFeatures_ApiLevel_field_name protoreflect.Name = "api_level" GoFeatures_StripEnumPrefix_field_name protoreflect.Name = "strip_enum_prefix" GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_ApiLevel_field_fullname protoreflect.FullName = "pb.GoFeatures.api_level" GoFeatures_StripEnumPrefix_field_fullname protoreflect.FullName = "pb.GoFeatures.strip_enum_prefix" ) // Field numbers for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 + GoFeatures_ApiLevel_field_number protoreflect.FieldNumber = 2 GoFeatures_StripEnumPrefix_field_number protoreflect.FieldNumber = 3 ) +// Full and short names for pb.GoFeatures.APILevel. +const ( + GoFeatures_APILevel_enum_fullname = "pb.GoFeatures.APILevel" + GoFeatures_APILevel_enum_name = "APILevel" +) + +// Enum values for pb.GoFeatures.APILevel. +const ( + GoFeatures_API_LEVEL_UNSPECIFIED_enum_value = 0 + GoFeatures_API_OPEN_enum_value = 1 + GoFeatures_API_HYBRID_enum_value = 2 + GoFeatures_API_OPAQUE_enum_value = 3 +) + // Full and short names for pb.GoFeatures.StripEnumPrefix. const ( GoFeatures_StripEnumPrefix_enum_fullname = "pb.GoFeatures.StripEnumPrefix" diff --git a/src/vendor/golang.org/x/tools/internal/versions/constraint_go121.go b/src/vendor/google.golang.org/protobuf/internal/genid/name.go similarity index 50% rename from src/vendor/golang.org/x/tools/internal/versions/constraint_go121.go rename to src/vendor/google.golang.org/protobuf/internal/genid/name.go index 38011407d..224f33930 100644 --- a/src/vendor/golang.org/x/tools/internal/versions/constraint_go121.go +++ b/src/vendor/google.golang.org/protobuf/internal/genid/name.go @@ -2,13 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.21 -// +build go1.21 +package genid -package versions +const ( + NoUnkeyedLiteral_goname = "noUnkeyedLiteral" + NoUnkeyedLiteralA_goname = "XXX_NoUnkeyedLiteral" -import "go/build/constraint" - -func init() { - ConstraintGoVersion = constraint.GoVersion -} + BuilderSuffix_goname = "_builder" +) diff --git a/src/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go b/src/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go new file mode 100644 index 000000000..6075d6f69 --- /dev/null +++ b/src/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go @@ -0,0 +1,128 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "strconv" + "sync/atomic" + "unsafe" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +func (Export) UnmarshalField(msg any, fieldNum int32) { + UnmarshalField(msg.(protoreflect.ProtoMessage).ProtoReflect(), protoreflect.FieldNumber(fieldNum)) +} + +// Present checks the presence set for a certain field number (zero +// based, ordered by appearance in original proto file). part is +// a pointer to the correct element in the bitmask array, num is the +// field number unaltered. Example (field number 70 -> part = +// &m.XXX_presence[1], num = 70) +func (Export) Present(part *uint32, num uint32) bool { + // This hook will read an unprotected shadow presence set if + // we're unning under the race detector + raceDetectHookPresent(part, num) + return atomic.LoadUint32(part)&(1<<(num%32)) > 0 +} + +// SetPresent adds a field to the presence set. part is a pointer to +// the relevant element in the array and num is the field number +// unaltered. size is the number of fields in the protocol +// buffer. +func (Export) SetPresent(part *uint32, num uint32, size uint32) { + // This hook will mutate an unprotected shadow presence set if + // we're running under the race detector + raceDetectHookSetPresent(part, num, presenceSize(size)) + for { + old := atomic.LoadUint32(part) + if atomic.CompareAndSwapUint32(part, old, old|(1<<(num%32))) { + return + } + } +} + +// SetPresentNonAtomic is like SetPresent, but operates non-atomically. +// It is meant for use by builder methods, where the message is known not +// to be accessible yet by other goroutines. +func (Export) SetPresentNonAtomic(part *uint32, num uint32, size uint32) { + // This hook will mutate an unprotected shadow presence set if + // we're running under the race detector + raceDetectHookSetPresent(part, num, presenceSize(size)) + *part |= 1 << (num % 32) +} + +// ClearPresence removes a field from the presence set. part is a +// pointer to the relevant element in the presence array and num is +// the field number unaltered. +func (Export) ClearPresent(part *uint32, num uint32) { + // This hook will mutate an unprotected shadow presence set if + // we're running under the race detector + raceDetectHookClearPresent(part, num) + for { + old := atomic.LoadUint32(part) + if atomic.CompareAndSwapUint32(part, old, old&^(1<<(num%32))) { + return + } + } +} + +// interfaceToPointer takes a pointer to an empty interface whose value is a +// pointer type, and converts it into a "pointer" that points to the same +// target +func interfaceToPointer(i *any) pointer { + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +func (p pointer) atomicGetPointer() pointer { + return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))} +} + +func (p pointer) atomicSetPointer(q pointer) { + atomic.StorePointer((*unsafe.Pointer)(p.p), q.p) +} + +// AtomicCheckPointerIsNil takes an interface (which is a pointer to a +// pointer) and returns true if the pointed-to pointer is nil (using an +// atomic load). This function is inlineable and, on x86, just becomes a +// simple load and compare. +func (Export) AtomicCheckPointerIsNil(ptr any) bool { + return interfaceToPointer(&ptr).atomicGetPointer().IsNil() +} + +// AtomicSetPointer takes two interfaces (first is a pointer to a pointer, +// second is a pointer) and atomically sets the second pointer into location +// referenced by first pointer. Unfortunately, atomicSetPointer() does not inline +// (even on x86), so this does not become a simple store on x86. +func (Export) AtomicSetPointer(dstPtr, valPtr any) { + interfaceToPointer(&dstPtr).atomicSetPointer(interfaceToPointer(&valPtr)) +} + +// AtomicLoadPointer loads the pointer at the location pointed at by src, +// and stores that pointer value into the location pointed at by dst. +func (Export) AtomicLoadPointer(ptr Pointer, dst Pointer) { + *(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr))) +} + +// AtomicInitializePointer makes ptr and dst point to the same value. +// +// If *ptr is a nil pointer, it sets *ptr = *dst. +// +// If *ptr is a non-nil pointer, it sets *dst = *ptr. +func (Export) AtomicInitializePointer(ptr Pointer, dst Pointer) { + if !atomic.CompareAndSwapPointer((*unsafe.Pointer)(ptr), unsafe.Pointer(nil), *(*unsafe.Pointer)(dst)) { + *(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr))) + } +} + +// MessageFieldStringOf returns the field formatted as a string, +// either as the field name if resolvable otherwise as a decimal string. +func (Export) MessageFieldStringOf(md protoreflect.MessageDescriptor, n protoreflect.FieldNumber) string { + fd := md.Fields().ByNumber(n) + if fd != nil { + return string(fd.Name()) + } + return strconv.Itoa(int(n)) +} diff --git a/src/vendor/google.golang.org/protobuf/internal/impl/bitmap.go b/src/vendor/google.golang.org/protobuf/internal/impl/bitmap.go new file mode 100644 index 000000000..ea276547c --- /dev/null +++ b/src/vendor/google.golang.org/protobuf/internal/impl/bitmap.go @@ -0,0 +1,34 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !race + +package impl + +// There is no additional data as we're not running under race detector. +type RaceDetectHookData struct{} + +// Empty stubs for when not using the race detector. Calls to these from index.go should be optimized away. +func (presence) raceDetectHookPresent(num uint32) {} +func (presence) raceDetectHookSetPresent(num uint32, size presenceSize) {} +func (presence) raceDetectHookClearPresent(num uint32) {} +func (presence) raceDetectHookAllocAndCopy(src presence) {} + +// raceDetectHookPresent is called by the generated file interface +// (*proto.internalFuncs) Present to optionally read an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookPresent(field *uint32, num uint32) {} + +// raceDetectHookSetPresent is called by the generated file interface +// (*proto.internalFuncs) SetPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {} + +// raceDetectHookClearPresent is called by the generated file interface +// (*proto.internalFuncs) ClearPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookClearPresent(field *uint32, num uint32) {} diff --git a/src/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go b/src/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go new file mode 100644 index 000000000..e9a27583a --- /dev/null +++ b/src/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go @@ -0,0 +1,126 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build race + +package impl + +// When running under race detector, we add a presence map of bytes, that we can access +// in the hook functions so that we trigger the race detection whenever we have concurrent +// Read-Writes or Write-Writes. The race detector does not otherwise detect invalid concurrent +// access to lazy fields as all updates of bitmaps and pointers are done using atomic operations. +type RaceDetectHookData struct { + shadowPresence *[]byte +} + +// Hooks for presence bitmap operations that allocate, read and write the shadowPresence +// using non-atomic operations. +func (data *RaceDetectHookData) raceDetectHookAlloc(size presenceSize) { + sp := make([]byte, size) + atomicStoreShadowPresence(&data.shadowPresence, &sp) +} + +func (p presence) raceDetectHookPresent(num uint32) { + data := p.toRaceDetectData() + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + _ = (*sp)[num] + } +} + +func (p presence) raceDetectHookSetPresent(num uint32, size presenceSize) { + data := p.toRaceDetectData() + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp == nil { + data.raceDetectHookAlloc(size) + sp = atomicLoadShadowPresence(&data.shadowPresence) + } + (*sp)[num] = 1 +} + +func (p presence) raceDetectHookClearPresent(num uint32) { + data := p.toRaceDetectData() + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + (*sp)[num] = 0 + + } +} + +// raceDetectHookAllocAndCopy allocates a new shadowPresence slice at lazy and copies +// shadowPresence bytes from src to lazy. +func (p presence) raceDetectHookAllocAndCopy(q presence) { + sData := q.toRaceDetectData() + dData := p.toRaceDetectData() + if sData == nil { + return + } + srcSp := atomicLoadShadowPresence(&sData.shadowPresence) + if srcSp == nil { + atomicStoreShadowPresence(&dData.shadowPresence, nil) + return + } + n := len(*srcSp) + dSlice := make([]byte, n) + atomicStoreShadowPresence(&dData.shadowPresence, &dSlice) + for i := 0; i < n; i++ { + dSlice[i] = (*srcSp)[i] + } +} + +// raceDetectHookPresent is called by the generated file interface +// (*proto.internalFuncs) Present to optionally read an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookPresent(field *uint32, num uint32) { + data := findPointerToRaceDetectData(field, num) + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + _ = (*sp)[num] + } +} + +// raceDetectHookSetPresent is called by the generated file interface +// (*proto.internalFuncs) SetPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) { + data := findPointerToRaceDetectData(field, num) + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp == nil { + data.raceDetectHookAlloc(size) + sp = atomicLoadShadowPresence(&data.shadowPresence) + } + (*sp)[num] = 1 +} + +// raceDetectHookClearPresent is called by the generated file interface +// (*proto.internalFuncs) ClearPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookClearPresent(field *uint32, num uint32) { + data := findPointerToRaceDetectData(field, num) + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + (*sp)[num] = 0 + } +} diff --git a/src/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/src/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index f29e6a8fa..fe2c719ce 100644 --- a/src/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/src/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -35,6 +35,12 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error { } return nil } + + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + } + if mi.extensionOffset.IsValid() { e := p.Apply(mi.extensionOffset).Extensions() if err := mi.isInitExtensions(e); err != nil { @@ -45,6 +51,33 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error { if !f.isRequired && f.funcs.isInit == nil { continue } + + if f.presenceIndex != noPresence { + if !presence.Present(f.presenceIndex) { + if f.isRequired { + return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName())) + } + continue + } + if f.funcs.isInit != nil { + f.mi.init() + if f.mi.needsInitCheck { + if f.isLazy && p.Apply(f.offset).AtomicGetPointer().IsNil() { + lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr() + if !lazy.AllowedPartial() { + // Nothing to see here, it was checked on unmarshal + continue + } + mi.lazyUnmarshal(p, f.num) + } + if err := f.funcs.isInit(p.Apply(f.offset), f); err != nil { + return err + } + } + } + continue + } + fptr := p.Apply(f.offset) if f.isPointer && fptr.Elem().IsNil() { if f.isRequired { diff --git a/src/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go b/src/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go new file mode 100644 index 000000000..76818ea25 --- /dev/null +++ b/src/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go @@ -0,0 +1,264 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" +) + +func makeOpaqueMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { + mi := getMessageInfo(ft) + if mi == nil { + panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), ft)) + } + switch fd.Kind() { + case protoreflect.MessageKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueMessage, + marshal: appendOpaqueMessage, + unmarshal: consumeOpaqueMessage, + isInit: isInitOpaqueMessage, + merge: mergeOpaqueMessage, + } + case protoreflect.GroupKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueGroup, + marshal: appendOpaqueGroup, + unmarshal: consumeOpaqueGroup, + isInit: isInitOpaqueMessage, + merge: mergeOpaqueMessage, + } + } + panic("unexpected field kind") +} + +func sizeOpaqueMessage(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return protowire.SizeBytes(f.mi.sizePointer(p.AtomicGetPointer(), opts)) + f.tagsize +} + +func appendOpaqueMessage(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + mp := p.AtomicGetPointer() + calculatedSize := f.mi.sizePointer(mp, opts) + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := f.mi.marshalAppendPointer(b, mp, opts) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err +} + +func consumeOpaqueMessage(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + mp := p.AtomicGetPointer() + if mp.IsNil() { + mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + o, err := f.mi.unmarshalPointer(v, mp, 0, opts) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitOpaqueMessage(p pointer, f *coderFieldInfo) error { + mp := p.AtomicGetPointer() + if mp.IsNil() { + return nil + } + return f.mi.checkInitializedPointer(mp) +} + +func mergeOpaqueMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstmp := dst.AtomicGetPointer() + if dstmp.IsNil() { + dstmp = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + f.mi.mergePointer(dstmp, src.AtomicGetPointer(), opts) +} + +func sizeOpaqueGroup(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return 2*f.tagsize + f.mi.sizePointer(p.AtomicGetPointer(), opts) +} + +func appendOpaqueGroup(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err := f.mi.marshalAppendPointer(b, p.AtomicGetPointer(), opts) + b = protowire.AppendVarint(b, f.wiretag+1) // end group + return b, err +} + +func consumeOpaqueGroup(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + mp := p.AtomicGetPointer() + if mp.IsNil() { + mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + o, e := f.mi.unmarshalPointer(b, mp, f.num, opts) + return o, e +} + +func makeOpaqueRepeatedMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { + if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid field: %v: unsupported type for opaque repeated message: %v", fd.FullName(), ft)) + } + mt := ft.Elem().Elem() // *[]*T -> *T + mi := getMessageInfo(mt) + if mi == nil { + panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), mt)) + } + switch fd.Kind() { + case protoreflect.MessageKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueMessageSlice, + marshal: appendOpaqueMessageSlice, + unmarshal: consumeOpaqueMessageSlice, + isInit: isInitOpaqueMessageSlice, + merge: mergeOpaqueMessageSlice, + } + case protoreflect.GroupKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueGroupSlice, + marshal: appendOpaqueGroupSlice, + unmarshal: consumeOpaqueGroupSlice, + isInit: isInitOpaqueMessageSlice, + merge: mergeOpaqueMessageSlice, + } + } + panic("unexpected field kind") +} + +func sizeOpaqueMessageSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.AtomicGetPointer().PointerSlice() + n := 0 + for _, v := range s { + n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize + } + return n +} + +func appendOpaqueMessageSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.AtomicGetPointer().PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + siz := f.mi.sizePointer(v, opts) + b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } + } + return b, nil +} + +func consumeOpaqueMessageSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())) + o, err := f.mi.unmarshalPointer(v, mp, 0, opts) + if err != nil { + return out, err + } + sp := p.AtomicGetPointer() + if sp.IsNil() { + sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem()))) + } + sp.AppendPointerSlice(mp) + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitOpaqueMessageSlice(p pointer, f *coderFieldInfo) error { + sp := p.AtomicGetPointer() + if sp.IsNil() { + return nil + } + s := sp.PointerSlice() + for _, v := range s { + if err := f.mi.checkInitializedPointer(v); err != nil { + return err + } + } + return nil +} + +func mergeOpaqueMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + ds := dst.AtomicGetPointer() + if ds.IsNil() { + ds = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem()))) + } + for _, sp := range src.AtomicGetPointer().PointerSlice() { + dm := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())) + f.mi.mergePointer(dm, sp, opts) + ds.AppendPointerSlice(dm) + } +} + +func sizeOpaqueGroupSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.AtomicGetPointer().PointerSlice() + n := 0 + for _, v := range s { + n += 2*f.tagsize + f.mi.sizePointer(v, opts) + } + return n +} + +func appendOpaqueGroupSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.AtomicGetPointer().PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, f.wiretag+1) // end group + } + return b, nil +} + +func consumeOpaqueGroupSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())) + out, err = f.mi.unmarshalPointer(b, mp, f.num, opts) + if err != nil { + return out, err + } + sp := p.AtomicGetPointer() + if sp.IsNil() { + sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem()))) + } + sp.AppendPointerSlice(mp) + return out, err +} diff --git a/src/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/src/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 78be9df34..2f7b363ec 100644 --- a/src/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/src/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -32,6 +32,10 @@ type coderMessageInfo struct { needsInitCheck bool isMessageSet bool numRequiredFields uint8 + + lazyOffset offset + presenceOffset offset + presenceSize presenceSize } type coderFieldInfo struct { @@ -45,12 +49,19 @@ type coderFieldInfo struct { tagsize int // size of the varint-encoded tag isPointer bool // true if IsNil may be called on the struct field isRequired bool // true if field is required + + isLazy bool + presenceIndex uint32 } +const noPresence = 0xffffffff + func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { mi.sizecacheOffset = invalidOffset mi.unknownOffset = invalidOffset mi.extensionOffset = invalidOffset + mi.lazyOffset = invalidOffset + mi.presenceOffset = si.presenceOffset if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType { mi.sizecacheOffset = si.sizecacheOffset @@ -127,6 +138,8 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { validation: newFieldValidationInfo(mi, si, fd, ft), isPointer: fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(), isRequired: fd.Cardinality() == protoreflect.Required, + + presenceIndex: noPresence, } mi.orderedCoderFields = append(mi.orderedCoderFields, cf) mi.coderFields[cf.num] = cf diff --git a/src/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/src/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go new file mode 100644 index 000000000..88c16ae5b --- /dev/null +++ b/src/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go @@ -0,0 +1,156 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInfo) { + mi.sizecacheOffset = si.sizecacheOffset + mi.unknownOffset = si.unknownOffset + mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr + mi.extensionOffset = si.extensionOffset + mi.lazyOffset = si.lazyOffset + mi.presenceOffset = si.presenceOffset + + mi.coderFields = make(map[protowire.Number]*coderFieldInfo) + fields := mi.Desc.Fields() + for i := 0; i < fields.Len(); i++ { + fd := fields.Get(i) + + fs := si.fieldsByNumber[fd.Number()] + if fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + ft := fs.Type + var wiretag uint64 + if !fd.IsPacked() { + wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()]) + } else { + wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType) + } + var fieldOffset offset + var funcs pointerCoderFuncs + var childMessage *MessageInfo + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + fieldOffset = offsetOf(fs, mi.Exporter) + case fd.IsWeak(): + fieldOffset = si.weakOffset + funcs = makeWeakMessageFieldCoder(fd) + case fd.Message() != nil && !fd.IsMap(): + fieldOffset = offsetOf(fs, mi.Exporter) + if fd.IsList() { + childMessage, funcs = makeOpaqueRepeatedMessageFieldCoder(fd, ft) + } else { + childMessage, funcs = makeOpaqueMessageFieldCoder(fd, ft) + } + default: + fieldOffset = offsetOf(fs, mi.Exporter) + childMessage, funcs = fieldCoder(fd, ft) + } + cf := &coderFieldInfo{ + num: fd.Number(), + offset: fieldOffset, + wiretag: wiretag, + ft: ft, + tagsize: protowire.SizeVarint(wiretag), + funcs: funcs, + mi: childMessage, + validation: newFieldValidationInfo(mi, si.structInfo, fd, ft), + isPointer: (fd.Cardinality() == protoreflect.Repeated || + fd.Kind() == protoreflect.MessageKind || + fd.Kind() == protoreflect.GroupKind), + isRequired: fd.Cardinality() == protoreflect.Required, + presenceIndex: noPresence, + } + + // TODO: Use presence for all fields. + // + // In some cases, such as maps, presence means only "might be set" rather + // than "is definitely set", but every field should have a presence bit to + // permit us to skip over definitely-unset fields at marshal time. + + var hasPresence bool + hasPresence, cf.isLazy = usePresenceForField(si, fd) + + if hasPresence { + cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd) + } + + mi.orderedCoderFields = append(mi.orderedCoderFields, cf) + mi.coderFields[cf.num] = cf + } + for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ { + if od := oneofs.Get(i); !od.IsSynthetic() { + mi.initOneofFieldCoders(od, si.structInfo) + } + } + if messageset.IsMessageSet(mi.Desc) { + if !mi.extensionOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName())) + } + if !mi.unknownOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName())) + } + mi.isMessageSet = true + } + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num + }) + + var maxDense protoreflect.FieldNumber + for _, cf := range mi.orderedCoderFields { + if cf.num >= 16 && cf.num >= 2*maxDense { + break + } + maxDense = cf.num + } + mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1) + for _, cf := range mi.orderedCoderFields { + if int(cf.num) > len(mi.denseCoderFields) { + break + } + mi.denseCoderFields[cf.num] = cf + } + + // To preserve compatibility with historic wire output, marshal oneofs last. + if mi.Desc.Oneofs().Len() > 0 { + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + fi := fields.ByNumber(mi.orderedCoderFields[i].num) + fj := fields.ByNumber(mi.orderedCoderFields[j].num) + return order.LegacyFieldOrder(fi, fj) + }) + } + + mi.needsInitCheck = needsInitCheck(mi.Desc) + if mi.methods.Marshal == nil && mi.methods.Size == nil { + mi.methods.Flags |= piface.SupportMarshalDeterministic + mi.methods.Marshal = mi.marshal + mi.methods.Size = mi.size + } + if mi.methods.Unmarshal == nil { + mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown + mi.methods.Unmarshal = mi.unmarshal + } + if mi.methods.CheckInitialized == nil { + mi.methods.CheckInitialized = mi.checkInitialized + } + if mi.methods.Merge == nil { + mi.methods.Merge = mi.merge + } + if mi.methods.Equal == nil { + mi.methods.Equal = equal + } +} diff --git a/src/vendor/google.golang.org/protobuf/internal/impl/decode.go b/src/vendor/google.golang.org/protobuf/internal/impl/decode.go index cda0520c2..e0dd21fa5 100644 --- a/src/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/src/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -34,6 +34,8 @@ func (o unmarshalOptions) Options() proto.UnmarshalOptions { AllowPartial: true, DiscardUnknown: o.DiscardUnknown(), Resolver: o.resolver, + + NoLazyDecoding: o.NoLazyDecoding(), } } @@ -41,13 +43,26 @@ func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&protoiface.UnmarshalDiscardUnknown != 0 } -func (o unmarshalOptions) IsDefault() bool { - return o.flags == 0 && o.resolver == protoregistry.GlobalTypes +func (o unmarshalOptions) AliasBuffer() bool { return o.flags&protoiface.UnmarshalAliasBuffer != 0 } +func (o unmarshalOptions) Validated() bool { return o.flags&protoiface.UnmarshalValidated != 0 } +func (o unmarshalOptions) NoLazyDecoding() bool { + return o.flags&protoiface.UnmarshalNoLazyDecoding != 0 +} + +func (o unmarshalOptions) CanBeLazy() bool { + if o.resolver != protoregistry.GlobalTypes { + return false + } + // We ignore the UnmarshalInvalidateSizeCache even though it's not in the default set + return (o.flags & ^(protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated | protoiface.UnmarshalCheckRequired)) == 0 } var lazyUnmarshalOptions = unmarshalOptions{ resolver: protoregistry.GlobalTypes, - depth: protowire.DefaultRecursionLimit, + + flags: protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated, + + depth: protowire.DefaultRecursionLimit, } type unmarshalOutput struct { @@ -94,9 +109,30 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. if flags.ProtoLegacy && mi.isMessageSet { return unmarshalMessageSet(mi, b, p, opts) } + + lazyDecoding := LazyEnabled() // default + if opts.NoLazyDecoding() { + lazyDecoding = false // explicitly disabled + } + if mi.lazyOffset.IsValid() && lazyDecoding { + return mi.unmarshalPointerLazy(b, p, groupTag, opts) + } + return mi.unmarshalPointerEager(b, p, groupTag, opts) +} + +// unmarshalPointerEager is the message unmarshalling function for all messages that are not lazy. +// The corresponding function for Lazy is in google_lazy.go. +func (mi *MessageInfo) unmarshalPointerEager(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { + initialized := true var requiredMask uint64 var exts *map[int32]ExtensionField + + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + } + start := len(b) for len(b) > 0 { // Parse the tag (field number and wire type). @@ -154,6 +190,11 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. if f.funcs.isInit != nil && !o.initialized { initialized = false } + + if f.presenceIndex != noPresence { + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + } + default: // Possible extension. if exts == nil && mi.extensionOffset.IsValid() { @@ -222,7 +263,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p return out, errUnknown } if flags.LazyUnmarshalExtensions { - if opts.IsDefault() && x.canLazy(xt) { + if opts.CanBeLazy() && x.canLazy(xt) { out, valid := skipExtension(b, xi, num, wtyp, opts) switch valid { case ValidationValid: @@ -270,6 +311,13 @@ func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp if n < 0 { return out, ValidationUnknown } + + if opts.Validated() { + out.initialized = true + out.n = n + return out, ValidationValid + } + out, st := xi.validation.mi.validate(v, 0, opts) out.n = n return out, st diff --git a/src/vendor/google.golang.org/protobuf/internal/impl/encode.go b/src/vendor/google.golang.org/protobuf/internal/impl/encode.go index 6254f5de4..b2e212291 100644 --- a/src/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/src/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -10,6 +10,7 @@ import ( "sync/atomic" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/protolazy" "google.golang.org/protobuf/proto" piface "google.golang.org/protobuf/runtime/protoiface" ) @@ -71,11 +72,39 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int e := p.Apply(mi.extensionOffset).Extensions() size += mi.sizeExtensions(e, opts) } + + var lazy **protolazy.XXX_lazyUnmarshalInfo + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + if mi.lazyOffset.IsValid() { + lazy = p.Apply(mi.lazyOffset).LazyInfoPtr() + } + } + for _, f := range mi.orderedCoderFields { if f.funcs.size == nil { continue } fptr := p.Apply(f.offset) + + if f.presenceIndex != noPresence { + if !presence.Present(f.presenceIndex) { + continue + } + + if f.isLazy && fptr.AtomicGetPointer().IsNil() { + if lazyFields(opts) { + size += (*lazy).SizeField(uint32(f.num)) + continue + } else { + mi.lazyUnmarshal(p, f.num) + } + } + size += f.funcs.size(fptr, f, opts) + continue + } + if f.isPointer && fptr.Elem().IsNil() { continue } @@ -134,11 +163,52 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt return b, err } } + + var lazy **protolazy.XXX_lazyUnmarshalInfo + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + if mi.lazyOffset.IsValid() { + lazy = p.Apply(mi.lazyOffset).LazyInfoPtr() + } + } + for _, f := range mi.orderedCoderFields { if f.funcs.marshal == nil { continue } fptr := p.Apply(f.offset) + + if f.presenceIndex != noPresence { + if !presence.Present(f.presenceIndex) { + continue + } + if f.isLazy { + // Be careful, this field needs to be read atomically, like for a get + if f.isPointer && fptr.AtomicGetPointer().IsNil() { + if lazyFields(opts) { + b, _ = (*lazy).AppendField(b, uint32(f.num)) + continue + } else { + mi.lazyUnmarshal(p, f.num) + } + } + + b, err = f.funcs.marshal(b, fptr, f, opts) + if err != nil { + return b, err + } + continue + } else if f.isPointer && fptr.Elem().IsNil() { + continue + } + b, err = f.funcs.marshal(b, fptr, f, opts) + if err != nil { + return b, err + } + continue + } + if f.isPointer && fptr.Elem().IsNil() { continue } @@ -163,6 +233,14 @@ func fullyLazyExtensions(opts marshalOptions) bool { return opts.flags&piface.MarshalDeterministic == 0 } +// lazyFields returns true if we should attempt to keep fields lazy over size and marshal. +func lazyFields(opts marshalOptions) bool { + // When deterministic marshaling is requested, force an unmarshal for lazy + // fields to produce a deterministic result, instead of passing through + // bytes lazily that may or may not match what Go Protobuf would produce. + return opts.flags&piface.MarshalDeterministic == 0 +} + func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { if ext == nil { return 0 diff --git a/src/vendor/google.golang.org/protobuf/internal/impl/lazy.go b/src/vendor/google.golang.org/protobuf/internal/impl/lazy.go new file mode 100644 index 000000000..e8fb6c35b --- /dev/null +++ b/src/vendor/google.golang.org/protobuf/internal/impl/lazy.go @@ -0,0 +1,433 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math/bits" + "os" + "reflect" + "sort" + "sync/atomic" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/protolazy" + "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +var enableLazy int32 = func() int32 { + if os.Getenv("GOPROTODEBUG") == "nolazy" { + return 0 + } + return 1 +}() + +// EnableLazyUnmarshal enables lazy unmarshaling. +func EnableLazyUnmarshal(enable bool) { + if enable { + atomic.StoreInt32(&enableLazy, 1) + return + } + atomic.StoreInt32(&enableLazy, 0) +} + +// LazyEnabled reports whether lazy unmarshalling is currently enabled. +func LazyEnabled() bool { + return atomic.LoadInt32(&enableLazy) != 0 +} + +// UnmarshalField unmarshals a field in a message. +func UnmarshalField(m interface{}, num protowire.Number) { + switch m := m.(type) { + case *messageState: + m.messageInfo().lazyUnmarshal(m.pointer(), num) + case *messageReflectWrapper: + m.messageInfo().lazyUnmarshal(m.pointer(), num) + default: + panic(fmt.Sprintf("unsupported wrapper type %T", m)) + } +} + +func (mi *MessageInfo) lazyUnmarshal(p pointer, num protoreflect.FieldNumber) { + var f *coderFieldInfo + if int(num) < len(mi.denseCoderFields) { + f = mi.denseCoderFields[num] + } else { + f = mi.coderFields[num] + } + if f == nil { + panic(fmt.Sprintf("lazyUnmarshal: field info for %v.%v", mi.Desc.FullName(), num)) + } + lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr() + start, end, found, _, multipleEntries := lazy.FindFieldInProto(uint32(num)) + if !found && multipleEntries == nil { + panic(fmt.Sprintf("lazyUnmarshal: can't find field data for %v.%v", mi.Desc.FullName(), num)) + } + // The actual pointer in the message can not be set until the whole struct is filled in, otherwise we will have races. + // Create another pointer and set it atomically, if we won the race and the pointer in the original message is still nil. + fp := pointerOfValue(reflect.New(f.ft)) + if multipleEntries != nil { + for _, entry := range multipleEntries { + mi.unmarshalField(lazy.Buffer()[entry.Start:entry.End], fp, f, lazy, lazy.UnmarshalFlags()) + } + } else { + mi.unmarshalField(lazy.Buffer()[start:end], fp, f, lazy, lazy.UnmarshalFlags()) + } + p.Apply(f.offset).AtomicSetPointerIfNil(fp.Elem()) +} + +func (mi *MessageInfo) unmarshalField(b []byte, p pointer, f *coderFieldInfo, lazyInfo *protolazy.XXX_lazyUnmarshalInfo, flags piface.UnmarshalInputFlags) error { + opts := lazyUnmarshalOptions + opts.flags |= flags + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return errors.New("invalid wire data") + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return errors.New("invalid wire data") + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + if num == f.num { + o, err := f.funcs.unmarshal(b, p, wtyp, f, opts) + if err == nil { + b = b[o.n:] + continue + } + if err != errUnknown { + return err + } + } + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return errors.New("invalid wire data") + } + b = b[n:] + } + return nil +} + +func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) { + fmi := f.validation.mi + if fmi == nil { + fd := mi.Desc.Fields().ByNumber(f.num) + if fd == nil || !fd.IsWeak() { + return out, ValidationUnknown + } + messageName := fd.Message().FullName() + messageType, err := preg.GlobalTypes.FindMessageByName(messageName) + if err != nil { + return out, ValidationUnknown + } + var ok bool + fmi, ok = messageType.(*MessageInfo) + if !ok { + return out, ValidationUnknown + } + } + fmi.init() + switch f.validation.typ { + case validationTypeMessage: + if wtyp != protowire.BytesType { + return out, ValidationWrongWireType + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, ValidationInvalid + } + out, st := fmi.validate(v, 0, opts) + out.n = n + return out, st + case validationTypeGroup: + if wtyp != protowire.StartGroupType { + return out, ValidationWrongWireType + } + out, st := fmi.validate(b, f.num, opts) + return out, st + default: + return out, ValidationUnknown + } +} + +// unmarshalPointerLazy is similar to unmarshalPointerEager, but it +// specifically handles lazy unmarshalling. it expects lazyOffset and +// presenceOffset to both be valid. +func (mi *MessageInfo) unmarshalPointerLazy(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { + initialized := true + var requiredMask uint64 + var lazy **protolazy.XXX_lazyUnmarshalInfo + var presence presence + var lazyIndex []protolazy.IndexEntry + var lastNum protowire.Number + outOfOrder := false + lazyDecode := false + presence = p.Apply(mi.presenceOffset).PresenceInfo() + lazy = p.Apply(mi.lazyOffset).LazyInfoPtr() + if !presence.AnyPresent(mi.presenceSize) { + if opts.CanBeLazy() { + // If the message contains existing data, we need to merge into it. + // Lazy unmarshaling doesn't merge, so only enable it when the + // message is empty (has no presence bitmap). + lazyDecode = true + if *lazy == nil { + *lazy = &protolazy.XXX_lazyUnmarshalInfo{} + } + (*lazy).SetUnmarshalFlags(opts.flags) + if !opts.AliasBuffer() { + // Make a copy of the buffer for lazy unmarshaling. + // Set the AliasBuffer flag so recursive unmarshal + // operations reuse the copy. + b = append([]byte{}, b...) + opts.flags |= piface.UnmarshalAliasBuffer + } + (*lazy).SetBuffer(b) + } + } + // Track special handling of lazy fields. + // + // In the common case, all fields are lazyValidateOnly (and lazyFields remains nil). + // In the event that validation for a field fails, this map tracks handling of the field. + type lazyAction uint8 + const ( + lazyValidateOnly lazyAction = iota // validate the field only + lazyUnmarshalNow // eagerly unmarshal the field + lazyUnmarshalLater // unmarshal the field after the message is fully processed + ) + var lazyFields map[*coderFieldInfo]lazyAction + var exts *map[int32]ExtensionField + start := len(b) + pos := 0 + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, errDecode + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return out, errors.New("invalid field number") + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + + if wtyp == protowire.EndGroupType { + if num != groupTag { + return out, errors.New("mismatching end group marker") + } + groupTag = 0 + break + } + + var f *coderFieldInfo + if int(num) < len(mi.denseCoderFields) { + f = mi.denseCoderFields[num] + } else { + f = mi.coderFields[num] + } + var n int + err := errUnknown + discardUnknown := false + Field: + switch { + case f != nil: + if f.funcs.unmarshal == nil { + break + } + if f.isLazy && lazyDecode { + switch { + case lazyFields == nil || lazyFields[f] == lazyValidateOnly: + // Attempt to validate this field and leave it for later lazy unmarshaling. + o, valid := mi.skipField(b, f, wtyp, opts) + switch valid { + case ValidationValid: + // Skip over the valid field and continue. + err = nil + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + requiredMask |= f.validation.requiredBit + if !o.initialized { + initialized = false + } + n = o.n + break Field + case ValidationInvalid: + return out, errors.New("invalid proto wire format") + case ValidationWrongWireType: + break Field + case ValidationUnknown: + if lazyFields == nil { + lazyFields = make(map[*coderFieldInfo]lazyAction) + } + if presence.Present(f.presenceIndex) { + // We were unable to determine if the field is valid or not, + // and we've already skipped over at least one instance of this + // field. Clear the presence bit (so if we stop decoding early, + // we don't leave a partially-initialized field around) and flag + // the field for unmarshaling before we return. + presence.ClearPresent(f.presenceIndex) + lazyFields[f] = lazyUnmarshalLater + discardUnknown = true + break Field + } else { + // We were unable to determine if the field is valid or not, + // but this is the first time we've seen it. Flag it as needing + // eager unmarshaling and fall through to the eager unmarshal case below. + lazyFields[f] = lazyUnmarshalNow + } + } + case lazyFields[f] == lazyUnmarshalLater: + // This field will be unmarshaled in a separate pass below. + // Skip over it here. + discardUnknown = true + break Field + default: + // Eagerly unmarshal the field. + } + } + if f.isLazy && !lazyDecode && presence.Present(f.presenceIndex) { + if p.Apply(f.offset).AtomicGetPointer().IsNil() { + mi.lazyUnmarshal(p, f.num) + } + } + var o unmarshalOutput + o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts) + n = o.n + if err != nil { + break + } + requiredMask |= f.validation.requiredBit + if f.funcs.isInit != nil && !o.initialized { + initialized = false + } + if f.presenceIndex != noPresence { + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + } + default: + // Possible extension. + if exts == nil && mi.extensionOffset.IsValid() { + exts = p.Apply(mi.extensionOffset).Extensions() + if *exts == nil { + *exts = make(map[int32]ExtensionField) + } + } + if exts == nil { + break + } + var o unmarshalOutput + o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts) + if err != nil { + break + } + n = o.n + if !o.initialized { + initialized = false + } + } + if err != nil { + if err != errUnknown { + return out, err + } + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, errDecode + } + if !discardUnknown && !opts.DiscardUnknown() && mi.unknownOffset.IsValid() { + u := mi.mutableUnknownBytes(p) + *u = protowire.AppendTag(*u, num, wtyp) + *u = append(*u, b[:n]...) + } + } + b = b[n:] + end := start - len(b) + if lazyDecode && f != nil && f.isLazy { + if num != lastNum { + lazyIndex = append(lazyIndex, protolazy.IndexEntry{ + FieldNum: uint32(num), + Start: uint32(pos), + End: uint32(end), + }) + } else { + i := len(lazyIndex) - 1 + lazyIndex[i].End = uint32(end) + lazyIndex[i].MultipleContiguous = true + } + } + if num < lastNum { + outOfOrder = true + } + pos = end + lastNum = num + } + if groupTag != 0 { + return out, errors.New("missing end group marker") + } + if lazyFields != nil { + // Some fields failed validation, and now need to be unmarshaled. + for f, action := range lazyFields { + if action != lazyUnmarshalLater { + continue + } + initialized = false + if *lazy == nil { + *lazy = &protolazy.XXX_lazyUnmarshalInfo{} + } + if err := mi.unmarshalField((*lazy).Buffer(), p.Apply(f.offset), f, *lazy, opts.flags); err != nil { + return out, err + } + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + } + } + if lazyDecode { + if outOfOrder { + sort.Slice(lazyIndex, func(i, j int) bool { + return lazyIndex[i].FieldNum < lazyIndex[j].FieldNum || + (lazyIndex[i].FieldNum == lazyIndex[j].FieldNum && + lazyIndex[i].Start < lazyIndex[j].Start) + }) + } + if *lazy == nil { + *lazy = &protolazy.XXX_lazyUnmarshalInfo{} + } + + (*lazy).SetIndex(lazyIndex) + } + if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) { + initialized = false + } + if initialized { + out.initialized = true + } + out.n = start - len(b) + return out, nil +} diff --git a/src/vendor/google.golang.org/protobuf/internal/impl/merge.go b/src/vendor/google.golang.org/protobuf/internal/impl/merge.go index 7e65f64f2..8ffdce67d 100644 --- a/src/vendor/google.golang.org/protobuf/internal/impl/merge.go +++ b/src/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -41,11 +41,38 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { if src.IsNil() { return } + + var presenceSrc presence + var presenceDst presence + if mi.presenceOffset.IsValid() { + presenceSrc = src.Apply(mi.presenceOffset).PresenceInfo() + presenceDst = dst.Apply(mi.presenceOffset).PresenceInfo() + } + for _, f := range mi.orderedCoderFields { if f.funcs.merge == nil { continue } sfptr := src.Apply(f.offset) + + if f.presenceIndex != noPresence { + if !presenceSrc.Present(f.presenceIndex) { + continue + } + dfptr := dst.Apply(f.offset) + if f.isLazy { + if sfptr.AtomicGetPointer().IsNil() { + mi.lazyUnmarshal(src, f.num) + } + if presenceDst.Present(f.presenceIndex) && dfptr.AtomicGetPointer().IsNil() { + mi.lazyUnmarshal(dst, f.num) + } + } + f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts) + presenceDst.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + continue + } + if f.isPointer && sfptr.Elem().IsNil() { continue } diff --git a/src/vendor/google.golang.org/protobuf/internal/impl/message.go b/src/vendor/google.golang.org/protobuf/internal/impl/message.go index 741b5ed29..fa10a0f5c 100644 --- a/src/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/src/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -79,6 +79,9 @@ func (mi *MessageInfo) initOnce() { if mi.initDone == 1 { return } + if opaqueInitHook(mi) { + return + } t := mi.GoReflectType if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct { @@ -133,6 +136,9 @@ type structInfo struct { extensionOffset offset extensionType reflect.Type + lazyOffset offset + presenceOffset offset + fieldsByNumber map[protoreflect.FieldNumber]reflect.StructField oneofsByName map[protoreflect.Name]reflect.StructField oneofWrappersByType map[reflect.Type]protoreflect.FieldNumber @@ -145,6 +151,8 @@ func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { weakOffset: invalidOffset, unknownOffset: invalidOffset, extensionOffset: invalidOffset, + lazyOffset: invalidOffset, + presenceOffset: invalidOffset, fieldsByNumber: map[protoreflect.FieldNumber]reflect.StructField{}, oneofsByName: map[protoreflect.Name]reflect.StructField{}, @@ -175,6 +183,10 @@ fieldLoop: si.extensionOffset = offsetOf(f, mi.Exporter) si.extensionType = f.Type } + case "lazyFields", "XXX_lazyUnmarshalInfo": + si.lazyOffset = offsetOf(f, mi.Exporter) + case "XXX_presence": + si.presenceOffset = offsetOf(f, mi.Exporter) default: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { if len(s) > 0 && strings.Trim(s, "0123456789") == "" { diff --git a/src/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/src/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go new file mode 100644 index 000000000..d407dd791 --- /dev/null +++ b/src/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go @@ -0,0 +1,614 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math" + "reflect" + "strings" + "sync/atomic" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +type opaqueStructInfo struct { + structInfo +} + +// isOpaque determines whether a protobuf message type is on the Opaque API. It +// checks whether the type is a Go struct that protoc-gen-go would generate. +// +// This function only detects newly generated messages from the v2 +// implementation of protoc-gen-go. It is unable to classify generated messages +// that are too old or those that are generated by a different generator +// such as protoc-gen-gogo. +func isOpaque(t reflect.Type) bool { + // The current detection mechanism is to simply check the first field + // for a struct tag with the "protogen" key. + if t.Kind() == reflect.Struct && t.NumField() > 0 { + pgt := t.Field(0).Tag.Get("protogen") + return strings.HasPrefix(pgt, "opaque.") + } + return false +} + +func opaqueInitHook(mi *MessageInfo) bool { + mt := mi.GoReflectType.Elem() + si := opaqueStructInfo{ + structInfo: mi.makeStructInfo(mt), + } + + if !isOpaque(mt) { + return false + } + + defer atomic.StoreUint32(&mi.initDone, 1) + + mi.fields = map[protoreflect.FieldNumber]*fieldInfo{} + fds := mi.Desc.Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + fs := si.fieldsByNumber[fd.Number()] + var fi fieldInfo + usePresence, _ := usePresenceForField(si, fd) + + switch { + case fd.IsWeak(): + // Weak fields are no different for opaque. + fi = fieldInfoForWeakMessage(fd, si.weakOffset) + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + // Oneofs are no different for opaque. + fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) + case fd.IsMap(): + fi = mi.fieldInfoForMapOpaque(si, fd, fs) + case fd.IsList() && fd.Message() == nil && usePresence: + fi = mi.fieldInfoForScalarListOpaque(si, fd, fs) + case fd.IsList() && fd.Message() == nil: + // Proto3 lists without presence can use same access methods as open + fi = fieldInfoForList(fd, fs, mi.Exporter) + case fd.IsList() && usePresence: + fi = mi.fieldInfoForMessageListOpaque(si, fd, fs) + case fd.IsList(): + // Proto3 opaque messages that does not need presence bitmap. + // Different representation than open struct, but same logic + fi = mi.fieldInfoForMessageListOpaqueNoPresence(si, fd, fs) + case fd.Message() != nil && usePresence: + fi = mi.fieldInfoForMessageOpaque(si, fd, fs) + case fd.Message() != nil: + // Proto3 messages without presence can use same access methods as open + fi = fieldInfoForMessage(fd, fs, mi.Exporter) + default: + fi = mi.fieldInfoForScalarOpaque(si, fd, fs) + } + mi.fields[fd.Number()] = &fi + } + mi.oneofs = map[protoreflect.Name]*oneofInfo{} + for i := 0; i < mi.Desc.Oneofs().Len(); i++ { + od := mi.Desc.Oneofs().Get(i) + if !od.IsSynthetic() { + mi.oneofs[od.Name()] = makeOneofInfo(od, si.structInfo, mi.Exporter) + } + } + + mi.denseFields = make([]*fieldInfo, fds.Len()*2) + for i := 0; i < fds.Len(); i++ { + if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) { + mi.denseFields[fd.Number()] = mi.fields[fd.Number()] + } + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil && !fd.ContainingOneof().IsSynthetic() { + mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()]) + i += od.Fields().Len() + } else { + mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()]) + i++ + } + } + + mi.makeExtensionFieldsFunc(mt, si.structInfo) + mi.makeUnknownFieldsFunc(mt, si.structInfo) + mi.makeOpaqueCoderMethods(mt, si) + mi.makeFieldTypes(si.structInfo) + + return true +} + +func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid type: got %v, want map kind", ft)) + } + fieldOffset := offsetOf(fs, mi.Exporter) + conv := NewConverter(ft, fd) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + // Don't bother checking presence bits, since we need to + // look at the map length even if the presence bit is set. + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("invalid value: setting map field to read-only value")) + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(pv) + }, + mutable: func(p pointer) protoreflect.Value { + v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if v.IsNil() { + v.Set(reflect.MakeMap(fs.Type)) + } + return conv.PBValueOf(v) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForScalarListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) + } + conv := NewConverter(reflect.PtrTo(ft), fd) + fieldOffset := offsetOf(fs, mi.Exporter) + index, _ := presenceIndex(mi.Desc, fd) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("invalid value: setting repeated field to read-only value")) + } + mi.setPresent(p, index) + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(pv.Elem()) + }, + mutable: func(p pointer) protoreflect.Value { + mi.setPresent(p, index) + return conv.PBValueOf(p.Apply(fieldOffset).AsValueOf(fs.Type)) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForMessageListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) + } + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs, mi.Exporter) + index, _ := presenceIndex(mi.Desc, fd) + fieldNumber := fd.Number() + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + if !mi.present(p, index) { + return false + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + sp = p.Apply(fieldOffset).AtomicGetPointer() + } + rv := sp.AsValueOf(fs.Type.Elem()) + return rv.Elem().Len() > 0 + }, + clear: func(p pointer) { + fp := p.Apply(fieldOffset) + sp := fp.AtomicGetPointer() + if sp.IsNil() { + sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem()))) + mi.setPresent(p, index) + } + rv := sp.AsValueOf(fs.Type.Elem()) + rv.Elem().Set(reflect.Zero(rv.Type().Elem())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + if !mi.present(p, index) { + return conv.Zero() + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + sp = p.Apply(fieldOffset).AtomicGetPointer() + } + rv := sp.AsValueOf(fs.Type.Elem()) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + fp := p.Apply(fieldOffset) + sp := fp.AtomicGetPointer() + if sp.IsNil() { + sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem()))) + mi.setPresent(p, index) + } + rv := sp.AsValueOf(fs.Type.Elem()) + val := conv.GoValueOf(v) + if val.IsNil() { + panic(fmt.Sprintf("invalid value: setting repeated field to read-only value")) + } else { + rv.Elem().Set(val.Elem()) + } + }, + mutable: func(p pointer) protoreflect.Value { + fp := p.Apply(fieldOffset) + sp := fp.AtomicGetPointer() + if sp.IsNil() { + if mi.present(p, index) { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + sp = p.Apply(fieldOffset).AtomicGetPointer() + } else { + sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem()))) + mi.setPresent(p, index) + } + } + rv := sp.AsValueOf(fs.Type.Elem()) + return conv.PBValueOf(rv) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) + } + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs, mi.Exporter) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + return false + } + rv := sp.AsValueOf(fs.Type.Elem()) + return rv.Elem().Len() > 0 + }, + clear: func(p pointer) { + sp := p.Apply(fieldOffset).AtomicGetPointer() + if !sp.IsNil() { + rv := sp.AsValueOf(fs.Type.Elem()) + rv.Elem().Set(reflect.Zero(rv.Type().Elem())) + } + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + return conv.Zero() + } + rv := sp.AsValueOf(fs.Type.Elem()) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { + rv.Set(reflect.New(fs.Type.Elem())) + } + val := conv.GoValueOf(v) + if val.IsNil() { + panic(fmt.Sprintf("invalid value: setting repeated field to read-only value")) + } else { + rv.Elem().Set(val.Elem()) + } + }, + mutable: func(p pointer) protoreflect.Value { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { + rv.Set(reflect.New(fs.Type.Elem())) + } + return conv.PBValueOf(rv) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + nullable := fd.HasPresence() + if oneof := fd.ContainingOneof(); oneof != nil && oneof.IsSynthetic() { + nullable = true + } + deref := false + if nullable && ft.Kind() == reflect.Ptr { + ft = ft.Elem() + deref = true + } + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs, mi.Exporter) + index, _ := presenceIndex(mi.Desc, fd) + var getter func(p pointer) protoreflect.Value + if !nullable { + getter = getterForDirectScalar(fd, fs, conv, fieldOffset) + } else { + getter = getterForOpaqueNullableScalar(mi, index, fd, fs, conv, fieldOffset) + } + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + if nullable { + return mi.present(p, index) + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + switch rv.Kind() { + case reflect.Bool: + return rv.Bool() + case reflect.Int32, reflect.Int64: + return rv.Int() != 0 + case reflect.Uint32, reflect.Uint64: + return rv.Uint() != 0 + case reflect.Float32, reflect.Float64: + return rv.Float() != 0 || math.Signbit(rv.Float()) + case reflect.String, reflect.Slice: + return rv.Len() > 0 + default: + panic(fmt.Sprintf("invalid type: %v", rv.Type())) // should never happen + } + }, + clear: func(p pointer) { + if nullable { + mi.clearPresent(p, index) + } + // This is only valuable for bytes and strings, but we do it unconditionally. + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: getter, + // TODO: Implement unsafe fast path for set? + set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if deref { + if rv.IsNil() { + rv.Set(reflect.New(ft)) + } + rv = rv.Elem() + } + + rv.Set(conv.GoValueOf(v)) + if nullable && rv.Kind() == reflect.Slice && rv.IsNil() { + rv.Set(emptyBytes) + } + if nullable { + mi.setPresent(p, index) + } + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForMessageOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs, mi.Exporter) + index, _ := presenceIndex(mi.Desc, fd) + fieldNumber := fd.Number() + elemType := fs.Type.Elem() + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + return mi.present(p, index) + }, + clear: func(p pointer) { + mi.clearPresent(p, index) + p.Apply(fieldOffset).AtomicSetNilPointer() + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + fp := p.Apply(fieldOffset) + mp := fp.AtomicGetPointer() + if mp.IsNil() { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + mp = fp.AtomicGetPointer() + } + rv := mp.AsValueOf(elemType) + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + val := pointerOfValue(conv.GoValueOf(v)) + if val.IsNil() { + panic("invalid nil pointer") + } + p.Apply(fieldOffset).AtomicSetPointer(val) + mi.setPresent(p, index) + }, + mutable: func(p pointer) protoreflect.Value { + fp := p.Apply(fieldOffset) + mp := fp.AtomicGetPointer() + if mp.IsNil() { + if mi.present(p, index) { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + mp = fp.AtomicGetPointer() + } else { + mp = pointerOfValue(conv.GoValueOf(conv.New())) + fp.AtomicSetPointer(mp) + mi.setPresent(p, index) + } + } + return conv.PBValueOf(mp.AsValueOf(fs.Type.Elem())) + }, + newMessage: func() protoreflect.Message { + return conv.New().Message() + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +// A presenceList wraps a List, updating presence bits as necessary when the +// list contents change. +type presenceList struct { + pvalueList + setPresence func(bool) +} +type pvalueList interface { + protoreflect.List + //Unwrapper +} + +func (list presenceList) Append(v protoreflect.Value) { + list.pvalueList.Append(v) + list.setPresence(true) +} +func (list presenceList) Truncate(i int) { + list.pvalueList.Truncate(i) + list.setPresence(i > 0) +} + +// presenceIndex returns the index to pass to presence functions. +// +// TODO: field.Desc.Index() would be simpler, and would give space to record the presence of oneof fields. +func presenceIndex(md protoreflect.MessageDescriptor, fd protoreflect.FieldDescriptor) (uint32, presenceSize) { + found := false + var index, numIndices uint32 + for i := 0; i < md.Fields().Len(); i++ { + f := md.Fields().Get(i) + if f == fd { + found = true + index = numIndices + } + if f.ContainingOneof() == nil || isLastOneofField(f) { + numIndices++ + } + } + if !found { + panic(fmt.Sprintf("BUG: %v not in %v", fd.Name(), md.FullName())) + } + return index, presenceSize(numIndices) +} + +func isLastOneofField(fd protoreflect.FieldDescriptor) bool { + fields := fd.ContainingOneof().Fields() + return fields.Get(fields.Len()-1) == fd +} + +func (mi *MessageInfo) setPresent(p pointer, index uint32) { + p.Apply(mi.presenceOffset).PresenceInfo().SetPresent(index, mi.presenceSize) +} + +func (mi *MessageInfo) clearPresent(p pointer, index uint32) { + p.Apply(mi.presenceOffset).PresenceInfo().ClearPresent(index) +} + +func (mi *MessageInfo) present(p pointer, index uint32) bool { + return p.Apply(mi.presenceOffset).PresenceInfo().Present(index) +} + +// usePresenceForField implements the somewhat intricate logic of when +// the presence bitmap is used for a field. The main logic is that a +// field that is optional or that can be lazy will use the presence +// bit, but for proto2, also maps have a presence bit. It also records +// if the field can ever be lazy, which is true if we have a +// lazyOffset and the field is a message or a slice of messages. A +// field that is lazy will always need a presence bit. Oneofs are not +// lazy and do not use presence, unless they are a synthetic oneof, +// which is a proto3 optional field. For proto3 optionals, we use the +// presence and they can also be lazy when applicable (a message). +func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) { + hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy() + + // Non-oneof scalar fields with explicit field presence use the presence array. + usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic()) + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + return false, false + case fd.IsWeak(): + return false, false + case fd.IsMap(): + return false, false + case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: + return hasLazyField, hasLazyField + default: + return usesPresenceArray || (hasLazyField && fd.HasPresence()), false + } +} diff --git a/src/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go b/src/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go new file mode 100644 index 000000000..a69825699 --- /dev/null +++ b/src/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go @@ -0,0 +1,132 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +func getterForOpaqueNullableScalar(mi *MessageInfo, index uint32, fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value { + ft := fs.Type + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if fd.Kind() == protoreflect.EnumKind { + // Enums for nullable opaque types. + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return conv.PBValueOf(rv) + } + } + switch ft.Kind() { + case reflect.Bool: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bool() + return protoreflect.ValueOfBool(*x) + } + case reflect.Int32: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int32() + return protoreflect.ValueOfInt32(*x) + } + case reflect.Uint32: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint32() + return protoreflect.ValueOfUint32(*x) + } + case reflect.Int64: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int64() + return protoreflect.ValueOfInt64(*x) + } + case reflect.Uint64: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint64() + return protoreflect.ValueOfUint64(*x) + } + case reflect.Float32: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float32() + return protoreflect.ValueOfFloat32(*x) + } + case reflect.Float64: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float64() + return protoreflect.ValueOfFloat64(*x) + } + case reflect.String: + if fd.Kind() == protoreflect.BytesKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + if len(**x) == 0 { + return protoreflect.ValueOfBytes(nil) + } + return protoreflect.ValueOfBytes([]byte(**x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfString(**x) + } + case reflect.Slice: + if fd.Kind() == protoreflect.StringKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfString(string(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfBytes(*x) + } + } + panic("unexpected protobuf kind: " + ft.Kind().String()) +} diff --git a/src/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/src/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index ecb4623d7..31c19b54f 100644 --- a/src/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/src/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -205,6 +205,11 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { case fd.IsList(): if fd.Enum() != nil || fd.Message() != nil { ft = fs.Type.Elem() + + if ft.Kind() == reflect.Slice { + ft = ft.Elem() + } + } isMessage = fd.Message() != nil case fd.Enum() != nil: diff --git a/src/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/src/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 986322b19..a74064620 100644 --- a/src/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/src/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -256,6 +256,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, ft := fs.Type nullable := fd.HasPresence() isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 + var getter func(p pointer) protoreflect.Value if nullable { if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice { // This never occurs for generated message types. @@ -268,19 +269,25 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } } conv := NewConverter(ft, fd) - - // TODO: Implement unsafe fast path? fieldOffset := offsetOf(fs, x) + + // Generate specialized getter functions to avoid going through reflect.Value + if nullable { + getter = getterForNullableScalar(fd, fs, conv, fieldOffset) + } else { + getter = getterForDirectScalar(fd, fs, conv, fieldOffset) + } + return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { if p.IsNil() { return false } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if nullable { - return !rv.IsNil() + return !p.Apply(fieldOffset).Elem().IsNil() } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() switch rv.Kind() { case reflect.Bool: return rv.Bool() @@ -300,21 +307,8 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) protoreflect.Value { - if p.IsNil() { - return conv.Zero() - } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if nullable { - if rv.IsNil() { - return conv.Zero() - } - if rv.Kind() == reflect.Ptr { - rv = rv.Elem() - } - } - return conv.PBValueOf(rv) - }, + get: getter, + // TODO: Implement unsafe fast path for set? set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if nullable && rv.Kind() == reflect.Ptr { diff --git a/src/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go b/src/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go new file mode 100644 index 000000000..af5e063a1 --- /dev/null +++ b/src/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go @@ -0,0 +1,273 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +func getterForNullableScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value { + ft := fs.Type + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if fd.Kind() == protoreflect.EnumKind { + elemType := fs.Type.Elem() + // Enums for nullable types. + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).Elem().AsValueOf(elemType) + if rv.IsNil() { + return conv.Zero() + } + return conv.PBValueOf(rv.Elem()) + } + } + switch ft.Kind() { + case reflect.Bool: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).BoolPtr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfBool(**x) + } + case reflect.Int32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int32Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfInt32(**x) + } + case reflect.Uint32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint32Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfUint32(**x) + } + case reflect.Int64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int64Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfInt64(**x) + } + case reflect.Uint64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint64Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfUint64(**x) + } + case reflect.Float32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float32Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfFloat32(**x) + } + case reflect.Float64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float64Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfFloat64(**x) + } + case reflect.String: + if fd.Kind() == protoreflect.BytesKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + if len(**x) == 0 { + return protoreflect.ValueOfBytes(nil) + } + return protoreflect.ValueOfBytes([]byte(**x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfString(**x) + } + case reflect.Slice: + if fd.Kind() == protoreflect.StringKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + if len(*x) == 0 { + return conv.Zero() + } + return protoreflect.ValueOfString(string(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfBytes(*x) + } + } + panic("unexpected protobuf kind: " + ft.Kind().String()) +} + +func getterForDirectScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value { + ft := fs.Type + if fd.Kind() == protoreflect.EnumKind { + // Enums for non nullable types. + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return conv.PBValueOf(rv) + } + } + switch ft.Kind() { + case reflect.Bool: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bool() + return protoreflect.ValueOfBool(*x) + } + case reflect.Int32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int32() + return protoreflect.ValueOfInt32(*x) + } + case reflect.Uint32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint32() + return protoreflect.ValueOfUint32(*x) + } + case reflect.Int64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int64() + return protoreflect.ValueOfInt64(*x) + } + case reflect.Uint64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint64() + return protoreflect.ValueOfUint64(*x) + } + case reflect.Float32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float32() + return protoreflect.ValueOfFloat32(*x) + } + case reflect.Float64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float64() + return protoreflect.ValueOfFloat64(*x) + } + case reflect.String: + if fd.Kind() == protoreflect.BytesKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).String() + if len(*x) == 0 { + return protoreflect.ValueOfBytes(nil) + } + return protoreflect.ValueOfBytes([]byte(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).String() + return protoreflect.ValueOfString(*x) + } + case reflect.Slice: + if fd.Kind() == protoreflect.StringKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfString(string(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfBytes(*x) + } + } + panic("unexpected protobuf kind: " + ft.Kind().String()) +} diff --git a/src/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/src/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 79e186667..041ebde2d 100644 --- a/src/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/src/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -8,6 +8,8 @@ import ( "reflect" "sync/atomic" "unsafe" + + "google.golang.org/protobuf/internal/protolazy" ) const UnsafeEnabled = true @@ -111,6 +113,13 @@ func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } +func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo { + return (**protolazy.XXX_lazyUnmarshalInfo)(p.p) +} + +func (p pointer) PresenceInfo() presence { + return presence{P: p.p} +} func (p pointer) Elem() pointer { return pointer{p: *(*unsafe.Pointer)(p.p)} diff --git a/src/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go b/src/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go new file mode 100644 index 000000000..38aa7b7dc --- /dev/null +++ b/src/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go @@ -0,0 +1,42 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync/atomic" + "unsafe" +) + +func (p pointer) AtomicGetPointer() pointer { + return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))} +} + +func (p pointer) AtomicSetPointer(v pointer) { + atomic.StorePointer((*unsafe.Pointer)(p.p), v.p) +} + +func (p pointer) AtomicSetNilPointer() { + atomic.StorePointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil)) +} + +func (p pointer) AtomicSetPointerIfNil(v pointer) pointer { + if atomic.CompareAndSwapPointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil), v.p) { + return v + } + return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))} +} + +type atomicV1MessageInfo struct{ p Pointer } + +func (mi *atomicV1MessageInfo) Get() Pointer { + return Pointer(atomic.LoadPointer((*unsafe.Pointer)(&mi.p))) +} + +func (mi *atomicV1MessageInfo) SetIfNil(p Pointer) Pointer { + if atomic.CompareAndSwapPointer((*unsafe.Pointer)(&mi.p), nil, unsafe.Pointer(p)) { + return p + } + return mi.Get() +} diff --git a/src/vendor/google.golang.org/protobuf/internal/impl/presence.go b/src/vendor/google.golang.org/protobuf/internal/impl/presence.go new file mode 100644 index 000000000..914cb1ded --- /dev/null +++ b/src/vendor/google.golang.org/protobuf/internal/impl/presence.go @@ -0,0 +1,142 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync/atomic" + "unsafe" +) + +// presenceSize represents the size of a presence set, which should be the largest index of the set+1 +type presenceSize uint32 + +// presence is the internal representation of the bitmap array in a generated protobuf +type presence struct { + // This is a pointer to the beginning of an array of uint32 + P unsafe.Pointer +} + +func (p presence) toElem(num uint32) (ret *uint32) { + const ( + bitsPerByte = 8 + siz = unsafe.Sizeof(*ret) + ) + // p.P points to an array of uint32, num is the bit in this array that the + // caller wants to check/manipulate. Calculate the index in the array that + // contains this specific bit. E.g.: 76 / 32 = 2 (integer division). + offset := uintptr(num) / (siz * bitsPerByte) * siz + return (*uint32)(unsafe.Pointer(uintptr(p.P) + offset)) +} + +// Present checks for the presence of a specific field number in a presence set. +func (p presence) Present(num uint32) bool { + if p.P == nil { + return false + } + return Export{}.Present(p.toElem(num), num) +} + +// SetPresent adds presence for a specific field number in a presence set. +func (p presence) SetPresent(num uint32, size presenceSize) { + Export{}.SetPresent(p.toElem(num), num, uint32(size)) +} + +// SetPresentUnatomic adds presence for a specific field number in a presence set without using +// atomic operations. Only to be called during unmarshaling. +func (p presence) SetPresentUnatomic(num uint32, size presenceSize) { + Export{}.SetPresentNonAtomic(p.toElem(num), num, uint32(size)) +} + +// ClearPresent removes presence for a specific field number in a presence set. +func (p presence) ClearPresent(num uint32) { + Export{}.ClearPresent(p.toElem(num), num) +} + +// LoadPresenceCache (together with PresentInCache) allows for a +// cached version of checking for presence without re-reading the word +// for every field. It is optimized for efficiency and assumes no +// simltaneous mutation of the presence set (or at least does not have +// a problem with simultaneous mutation giving inconsistent results). +func (p presence) LoadPresenceCache() (current uint32) { + if p.P == nil { + return 0 + } + return atomic.LoadUint32((*uint32)(p.P)) +} + +// PresentInCache reads presence from a cached word in the presence +// bitmap. It caches up a new word if the bit is outside the +// word. This is for really fast iteration through bitmaps in cases +// where we either know that the bitmap will not be altered, or we +// don't care about inconsistencies caused by simultaneous writes. +func (p presence) PresentInCache(num uint32, cachedElement *uint32, current *uint32) bool { + if num/32 != *cachedElement { + o := uintptr(num/32) * unsafe.Sizeof(uint32(0)) + q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o)) + *current = atomic.LoadUint32(q) + *cachedElement = num / 32 + } + return (*current & (1 << (num % 32))) > 0 +} + +// AnyPresent checks if any field is marked as present in the bitmap. +func (p presence) AnyPresent(size presenceSize) bool { + n := uintptr((size + 31) / 32) + for j := uintptr(0); j < n; j++ { + o := j * unsafe.Sizeof(uint32(0)) + q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o)) + b := atomic.LoadUint32(q) + if b > 0 { + return true + } + } + return false +} + +// toRaceDetectData finds the preceding RaceDetectHookData in a +// message by using pointer arithmetic. As the type of the presence +// set (bitmap) varies with the number of fields in the protobuf, we +// can not have a struct type containing the array and the +// RaceDetectHookData. instead the RaceDetectHookData is placed +// immediately before the bitmap array, and we find it by walking +// backwards in the struct. +// +// This method is only called from the race-detect version of the code, +// so RaceDetectHookData is never an empty struct. +func (p presence) toRaceDetectData() *RaceDetectHookData { + var template struct { + d RaceDetectHookData + a [1]uint32 + } + o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d))) + return (*RaceDetectHookData)(unsafe.Pointer(uintptr(p.P) - o)) +} + +func atomicLoadShadowPresence(p **[]byte) *[]byte { + return (*[]byte)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreShadowPresence(p **[]byte, v *[]byte) { + atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(p)), nil, unsafe.Pointer(v)) +} + +// findPointerToRaceDetectData finds the preceding RaceDetectHookData +// in a message by using pointer arithmetic. For the methods called +// directy from generated code, we don't have a pointer to the +// beginning of the presence set, but a pointer inside the array. As +// we know the index of the bit we're manipulating (num), we can +// calculate which element of the array ptr is pointing to. With that +// information we find the preceding RaceDetectHookData and can +// manipulate the shadow bitmap. +// +// This method is only called from the race-detect version of the +// code, so RaceDetectHookData is never an empty struct. +func findPointerToRaceDetectData(ptr *uint32, num uint32) *RaceDetectHookData { + var template struct { + d RaceDetectHookData + a [1]uint32 + } + o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d))) + uintptr(num/32)*unsafe.Sizeof(uint32(0)) + return (*RaceDetectHookData)(unsafe.Pointer(uintptr(unsafe.Pointer(ptr)) - o)) +} diff --git a/src/vendor/google.golang.org/protobuf/internal/impl/validate.go b/src/vendor/google.golang.org/protobuf/internal/impl/validate.go index a24e6bbd7..b534a3d6d 100644 --- a/src/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/src/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -37,6 +37,10 @@ const ( // ValidationValid indicates that unmarshaling the message will succeed. ValidationValid + + // ValidationWrongWireType indicates that a validated field does not have + // the expected wire type. + ValidationWrongWireType ) func (v ValidationStatus) String() string { @@ -149,11 +153,23 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat switch fd.Kind() { case protoreflect.MessageKind: vi.typ = validationTypeMessage + + if ft.Kind() == reflect.Ptr { + // Repeated opaque message fields are *[]*T. + ft = ft.Elem() + } + if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } case protoreflect.GroupKind: vi.typ = validationTypeGroup + + if ft.Kind() == reflect.Ptr { + // Repeated opaque message fields are *[]*T. + ft = ft.Elem() + } + if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } diff --git a/src/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go b/src/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go new file mode 100644 index 000000000..82e5cab4a --- /dev/null +++ b/src/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go @@ -0,0 +1,364 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Helper code for parsing a protocol buffer + +package protolazy + +import ( + "errors" + "fmt" + "io" + + "google.golang.org/protobuf/encoding/protowire" +) + +// BufferReader is a structure encapsulating a protobuf and a current position +type BufferReader struct { + Buf []byte + Pos int +} + +// NewBufferReader creates a new BufferRead from a protobuf +func NewBufferReader(buf []byte) BufferReader { + return BufferReader{Buf: buf, Pos: 0} +} + +var errOutOfBounds = errors.New("protobuf decoding: out of bounds") +var errOverflow = errors.New("proto: integer overflow") + +func (b *BufferReader) DecodeVarintSlow() (x uint64, err error) { + i := b.Pos + l := len(b.Buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + v := b.Buf[i] + i++ + x |= (uint64(v) & 0x7F) << shift + if v < 0x80 { + b.Pos = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// decodeVarint decodes a varint at the current position +func (b *BufferReader) DecodeVarint() (x uint64, err error) { + i := b.Pos + buf := b.Buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + b.Pos++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return b.DecodeVarintSlow() + } + + var v uint64 + // we already checked the first byte + x = uint64(buf[i]) & 127 + i++ + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 7 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 14 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 21 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 28 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 35 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 42 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 49 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 56 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 63 + if v < 128 { + goto done + } + + return 0, errOverflow + +done: + b.Pos = i + return +} + +// decodeVarint32 decodes a varint32 at the current position +func (b *BufferReader) DecodeVarint32() (x uint32, err error) { + i := b.Pos + buf := b.Buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + b.Pos++ + return uint32(buf[i]), nil + } else if len(buf)-i < 5 { + v, err := b.DecodeVarintSlow() + return uint32(v), err + } + + var v uint32 + // we already checked the first byte + x = uint32(buf[i]) & 127 + i++ + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 7 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 14 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 21 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 28 + if v < 128 { + goto done + } + + return 0, errOverflow + +done: + b.Pos = i + return +} + +// skipValue skips a value in the protobuf, based on the specified tag +func (b *BufferReader) SkipValue(tag uint32) (err error) { + wireType := tag & 0x7 + switch protowire.Type(wireType) { + case protowire.VarintType: + err = b.SkipVarint() + case protowire.Fixed64Type: + err = b.SkipFixed64() + case protowire.BytesType: + var n uint32 + n, err = b.DecodeVarint32() + if err == nil { + err = b.Skip(int(n)) + } + case protowire.StartGroupType: + err = b.SkipGroup(tag) + case protowire.Fixed32Type: + err = b.SkipFixed32() + default: + err = fmt.Errorf("Unexpected wire type (%d)", wireType) + } + return +} + +// skipGroup skips a group with the specified tag. It executes efficiently using a tag stack +func (b *BufferReader) SkipGroup(tag uint32) (err error) { + tagStack := make([]uint32, 0, 16) + tagStack = append(tagStack, tag) + var n uint32 + for len(tagStack) > 0 { + tag, err = b.DecodeVarint32() + if err != nil { + return err + } + switch protowire.Type(tag & 0x7) { + case protowire.VarintType: + err = b.SkipVarint() + case protowire.Fixed64Type: + err = b.Skip(8) + case protowire.BytesType: + n, err = b.DecodeVarint32() + if err == nil { + err = b.Skip(int(n)) + } + case protowire.StartGroupType: + tagStack = append(tagStack, tag) + case protowire.Fixed32Type: + err = b.SkipFixed32() + case protowire.EndGroupType: + if protoFieldNumber(tagStack[len(tagStack)-1]) == protoFieldNumber(tag) { + tagStack = tagStack[:len(tagStack)-1] + } else { + err = fmt.Errorf("end group tag %d does not match begin group tag %d at pos %d", + protoFieldNumber(tag), protoFieldNumber(tagStack[len(tagStack)-1]), b.Pos) + } + } + if err != nil { + return err + } + } + return nil +} + +// skipVarint effiently skips a varint +func (b *BufferReader) SkipVarint() (err error) { + i := b.Pos + + if len(b.Buf)-i < 10 { + // Use DecodeVarintSlow() to check for buffer overflow, but ignore result + if _, err := b.DecodeVarintSlow(); err != nil { + return err + } + return nil + } + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + return errOverflow + +out: + b.Pos = i + 1 + return nil +} + +// skip skips the specified number of bytes +func (b *BufferReader) Skip(n int) (err error) { + if len(b.Buf) < b.Pos+n { + return io.ErrUnexpectedEOF + } + b.Pos += n + return +} + +// skipFixed64 skips a fixed64 +func (b *BufferReader) SkipFixed64() (err error) { + return b.Skip(8) +} + +// skipFixed32 skips a fixed32 +func (b *BufferReader) SkipFixed32() (err error) { + return b.Skip(4) +} + +// skipBytes skips a set of bytes +func (b *BufferReader) SkipBytes() (err error) { + n, err := b.DecodeVarint32() + if err != nil { + return err + } + return b.Skip(int(n)) +} + +// Done returns whether we are at the end of the protobuf +func (b *BufferReader) Done() bool { + return b.Pos == len(b.Buf) +} + +// Remaining returns how many bytes remain +func (b *BufferReader) Remaining() int { + return len(b.Buf) - b.Pos +} diff --git a/src/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go b/src/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go new file mode 100644 index 000000000..ff4d4834b --- /dev/null +++ b/src/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go @@ -0,0 +1,359 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protolazy contains internal data structures for lazy message decoding. +package protolazy + +import ( + "fmt" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// IndexEntry is the structure for an index of the fields in a message of a +// proto (not descending to sub-messages) +type IndexEntry struct { + FieldNum uint32 + // first byte of this tag/field + Start uint32 + // first byte after a contiguous sequence of bytes for this tag/field, which could + // include a single encoding of the field, or multiple encodings for the field + End uint32 + // True if this protobuf segment includes multiple encodings of the field + MultipleContiguous bool +} + +// XXX_lazyUnmarshalInfo has information about a particular lazily decoded message +// +// Deprecated: Do not use. This will be deleted in the near future. +type XXX_lazyUnmarshalInfo struct { + // Index of fields and their positions in the protobuf for this + // message. Make index be a pointer to a slice so it can be updated + // atomically. The index pointer is only set once (lazily when/if + // the index is first needed), and must always be SET and LOADED + // ATOMICALLY. + index *[]IndexEntry + // The protobuf associated with this lazily decoded message. It is + // only set during proto.Unmarshal(). It doesn't need to be set and + // loaded atomically, since any simultaneous set (Unmarshal) and read + // (during a get) would already be a race in the app code. + Protobuf []byte + // The flags present when Unmarshal was originally called for this particular message + unmarshalFlags piface.UnmarshalInputFlags +} + +// The Buffer and SetBuffer methods let v2/internal/impl interact with +// XXX_lazyUnmarshalInfo via an interface, to avoid an import cycle. + +// Buffer returns the lazy unmarshal buffer. +// +// Deprecated: Do not use. This will be deleted in the near future. +func (lazy *XXX_lazyUnmarshalInfo) Buffer() []byte { + return lazy.Protobuf +} + +// SetBuffer sets the lazy unmarshal buffer. +// +// Deprecated: Do not use. This will be deleted in the near future. +func (lazy *XXX_lazyUnmarshalInfo) SetBuffer(b []byte) { + lazy.Protobuf = b +} + +// SetUnmarshalFlags is called to set a copy of the original unmarshalInputFlags. +// The flags should reflect how Unmarshal was called. +func (lazy *XXX_lazyUnmarshalInfo) SetUnmarshalFlags(f piface.UnmarshalInputFlags) { + lazy.unmarshalFlags = f +} + +// UnmarshalFlags returns the original unmarshalInputFlags. +func (lazy *XXX_lazyUnmarshalInfo) UnmarshalFlags() piface.UnmarshalInputFlags { + return lazy.unmarshalFlags +} + +// AllowedPartial returns true if the user originally unmarshalled this message with +// AllowPartial set to true +func (lazy *XXX_lazyUnmarshalInfo) AllowedPartial() bool { + return (lazy.unmarshalFlags & piface.UnmarshalCheckRequired) == 0 +} + +func protoFieldNumber(tag uint32) uint32 { + return tag >> 3 +} + +// buildIndex builds an index of the specified protobuf, return the index +// array and an error. +func buildIndex(buf []byte) ([]IndexEntry, error) { + index := make([]IndexEntry, 0, 16) + var lastProtoFieldNum uint32 + var outOfOrder bool + + var r BufferReader = NewBufferReader(buf) + + for !r.Done() { + var tag uint32 + var err error + var curPos = r.Pos + // INLINED: tag, err = r.DecodeVarint32() + { + i := r.Pos + buf := r.Buf + + if i >= len(buf) { + return nil, errOutOfBounds + } else if buf[i] < 0x80 { + r.Pos++ + tag = uint32(buf[i]) + } else if r.Remaining() < 5 { + var v uint64 + v, err = r.DecodeVarintSlow() + tag = uint32(v) + } else { + var v uint32 + // we already checked the first byte + tag = uint32(buf[i]) & 127 + i++ + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 7 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 14 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 21 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 28 + if v < 128 { + goto done + } + + return nil, errOutOfBounds + + done: + r.Pos = i + } + } + // DONE: tag, err = r.DecodeVarint32() + + fieldNum := protoFieldNumber(tag) + if fieldNum < lastProtoFieldNum { + outOfOrder = true + } + + // Skip the current value -- will skip over an entire group as well. + // INLINED: err = r.SkipValue(tag) + wireType := tag & 0x7 + switch protowire.Type(wireType) { + case protowire.VarintType: + // INLINED: err = r.SkipVarint() + i := r.Pos + + if len(r.Buf)-i < 10 { + // Use DecodeVarintSlow() to skip while + // checking for buffer overflow, but ignore result + _, err = r.DecodeVarintSlow() + goto out2 + } + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + return nil, errOverflow + out: + r.Pos = i + 1 + // DONE: err = r.SkipVarint() + case protowire.Fixed64Type: + err = r.SkipFixed64() + case protowire.BytesType: + var n uint32 + n, err = r.DecodeVarint32() + if err == nil { + err = r.Skip(int(n)) + } + case protowire.StartGroupType: + err = r.SkipGroup(tag) + case protowire.Fixed32Type: + err = r.SkipFixed32() + default: + err = fmt.Errorf("Unexpected wire type (%d)", wireType) + } + // DONE: err = r.SkipValue(tag) + + out2: + if err != nil { + return nil, err + } + if fieldNum != lastProtoFieldNum { + index = append(index, IndexEntry{FieldNum: fieldNum, + Start: uint32(curPos), + End: uint32(r.Pos)}, + ) + } else { + index[len(index)-1].End = uint32(r.Pos) + index[len(index)-1].MultipleContiguous = true + } + lastProtoFieldNum = fieldNum + } + if outOfOrder { + sort.Slice(index, func(i, j int) bool { + return index[i].FieldNum < index[j].FieldNum || + (index[i].FieldNum == index[j].FieldNum && + index[i].Start < index[j].Start) + }) + } + return index, nil +} + +func (lazy *XXX_lazyUnmarshalInfo) SizeField(num uint32) (size int) { + start, end, found, _, multipleEntries := lazy.FindFieldInProto(num) + if multipleEntries != nil { + for _, entry := range multipleEntries { + size += int(entry.End - entry.Start) + } + return size + } + if !found { + return 0 + } + return int(end - start) +} + +func (lazy *XXX_lazyUnmarshalInfo) AppendField(b []byte, num uint32) ([]byte, bool) { + start, end, found, _, multipleEntries := lazy.FindFieldInProto(num) + if multipleEntries != nil { + for _, entry := range multipleEntries { + b = append(b, lazy.Protobuf[entry.Start:entry.End]...) + } + return b, true + } + if !found { + return nil, false + } + b = append(b, lazy.Protobuf[start:end]...) + return b, true +} + +func (lazy *XXX_lazyUnmarshalInfo) SetIndex(index []IndexEntry) { + atomicStoreIndex(&lazy.index, &index) +} + +// FindFieldInProto looks for field fieldNum in lazyUnmarshalInfo information +// (including protobuf), returns startOffset/endOffset/found. +func (lazy *XXX_lazyUnmarshalInfo) FindFieldInProto(fieldNum uint32) (start, end uint32, found, multipleContiguous bool, multipleEntries []IndexEntry) { + if lazy.Protobuf == nil { + // There is no backing protobuf for this message -- it was made from a builder + return 0, 0, false, false, nil + } + index := atomicLoadIndex(&lazy.index) + if index == nil { + r, err := buildIndex(lazy.Protobuf) + if err != nil { + panic(fmt.Sprintf("findFieldInfo: error building index when looking for field %d: %v", fieldNum, err)) + } + // lazy.index is a pointer to the slice returned by BuildIndex + index = &r + atomicStoreIndex(&lazy.index, index) + } + return lookupField(index, fieldNum) +} + +// lookupField returns the offset at which the indicated field starts using +// the index, offset immediately after field ends (including all instances of +// a repeated field), and bools indicating if field was found and if there +// are multiple encodings of the field in the byte range. +// +// To hande the uncommon case where there are repeated encodings for the same +// field which are not consecutive in the protobuf (so we need to returns +// multiple start/end offsets), we also return a slice multipleEntries. If +// multipleEntries is non-nil, then multiple entries were found, and the +// values in the slice should be used, rather than start/end/found. +func lookupField(indexp *[]IndexEntry, fieldNum uint32) (start, end uint32, found bool, multipleContiguous bool, multipleEntries []IndexEntry) { + // The pointer indexp to the index was already loaded atomically. + // The slice is uniquely associated with the pointer, so it doesn't + // need to be loaded atomically. + index := *indexp + for i, entry := range index { + if fieldNum == entry.FieldNum { + if i < len(index)-1 && entry.FieldNum == index[i+1].FieldNum { + // Handle the uncommon case where there are + // repeated entries for the same field which + // are not contiguous in the protobuf. + multiple := make([]IndexEntry, 1, 2) + multiple[0] = IndexEntry{fieldNum, entry.Start, entry.End, entry.MultipleContiguous} + i++ + for i < len(index) && index[i].FieldNum == fieldNum { + multiple = append(multiple, IndexEntry{fieldNum, index[i].Start, index[i].End, index[i].MultipleContiguous}) + i++ + } + return 0, 0, false, false, multiple + + } + return entry.Start, entry.End, true, entry.MultipleContiguous, nil + } + if fieldNum < entry.FieldNum { + return 0, 0, false, false, nil + } + } + return 0, 0, false, false, nil +} diff --git a/src/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go b/src/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go new file mode 100644 index 000000000..dc2a64ca6 --- /dev/null +++ b/src/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go @@ -0,0 +1,17 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protolazy + +import ( + "sync/atomic" + "unsafe" +) + +func atomicLoadIndex(p **[]IndexEntry) *[]IndexEntry { + return (*[]IndexEntry)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreIndex(p **[]IndexEntry, v *[]IndexEntry) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/src/vendor/google.golang.org/protobuf/internal/version/version.go b/src/vendor/google.golang.org/protobuf/internal/version/version.go index 62a52a40a..3018450df 100644 --- a/src/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/src/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 35 - Patch = 2 + Minor = 36 + Patch = 1 PreRelease = "" ) diff --git a/src/vendor/google.golang.org/protobuf/proto/decode.go b/src/vendor/google.golang.org/protobuf/proto/decode.go index d75a6534c..a3b5e142d 100644 --- a/src/vendor/google.golang.org/protobuf/proto/decode.go +++ b/src/vendor/google.golang.org/protobuf/proto/decode.go @@ -47,6 +47,12 @@ type UnmarshalOptions struct { // RecursionLimit limits how deeply messages may be nested. // If zero, a default limit is applied. RecursionLimit int + + // + // NoLazyDecoding turns off lazy decoding, which otherwise is enabled by + // default. Lazy decoding only affects submessages (annotated with [lazy = + // true] in the .proto file) within messages that use the Opaque API. + NoLazyDecoding bool } // Unmarshal parses the wire-format message in b and places the result in m. @@ -104,6 +110,16 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto if o.DiscardUnknown { in.Flags |= protoiface.UnmarshalDiscardUnknown } + + if !allowPartial { + // This does not affect how current unmarshal functions work, it just allows them + // to record this for lazy the decoding case. + in.Flags |= protoiface.UnmarshalCheckRequired + } + if o.NoLazyDecoding { + in.Flags |= protoiface.UnmarshalNoLazyDecoding + } + out, err = methods.Unmarshal(in) } else { o.RecursionLimit-- diff --git a/src/vendor/google.golang.org/protobuf/proto/encode.go b/src/vendor/google.golang.org/protobuf/proto/encode.go index 1f847bcc3..f0473c586 100644 --- a/src/vendor/google.golang.org/protobuf/proto/encode.go +++ b/src/vendor/google.golang.org/protobuf/proto/encode.go @@ -63,7 +63,8 @@ type MarshalOptions struct { // options (except for UseCachedSize itself). // // 2. The message and all its submessages have not changed in any - // way since the Size call. + // way since the Size call. For lazily decoded messages, accessing + // a message results in decoding the message, which is a change. // // If either of these invariants is violated, // the results are undefined and may include panics or corrupted output. diff --git a/src/vendor/google.golang.org/protobuf/proto/size.go b/src/vendor/google.golang.org/protobuf/proto/size.go index 052fb5ae3..c8675806c 100644 --- a/src/vendor/google.golang.org/protobuf/proto/size.go +++ b/src/vendor/google.golang.org/protobuf/proto/size.go @@ -12,11 +12,19 @@ import ( ) // Size returns the size in bytes of the wire-format encoding of m. +// +// Note that Size might return more bytes than Marshal will write in the case of +// lazily decoded messages that arrive in non-minimal wire format: see +// https://protobuf.dev/reference/go/size/ for more details. func Size(m Message) int { return MarshalOptions{}.Size(m) } // Size returns the size in bytes of the wire-format encoding of m. +// +// Note that Size might return more bytes than Marshal will write in the case of +// lazily decoded messages that arrive in non-minimal wire format: see +// https://protobuf.dev/reference/go/size/ for more details. func (o MarshalOptions) Size(m Message) int { // Treat a nil message interface as an empty message; nothing to output. if m == nil { diff --git a/src/vendor/google.golang.org/protobuf/proto/wrapperopaque.go b/src/vendor/google.golang.org/protobuf/proto/wrapperopaque.go new file mode 100644 index 000000000..267fd0f1f --- /dev/null +++ b/src/vendor/google.golang.org/protobuf/proto/wrapperopaque.go @@ -0,0 +1,80 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// ValueOrNil returns nil if has is false, or a pointer to a new variable +// containing the value returned by the specified getter. +// +// This function is similar to the wrappers (proto.Int32(), proto.String(), +// etc.), but is generic (works for any field type) and works with the hasser +// and getter of a field, as opposed to a value. +// +// This is convenient when populating builder fields. +// +// Example: +// +// hop := attr.GetDirectHop() +// injectedRoute := ripb.InjectedRoute_builder{ +// Prefixes: route.GetPrefixes(), +// NextHop: proto.ValueOrNil(hop.HasAddress(), hop.GetAddress), +// } +func ValueOrNil[T any](has bool, getter func() T) *T { + if !has { + return nil + } + v := getter() + return &v +} + +// ValueOrDefault returns the protobuf message val if val is not nil, otherwise +// it returns a pointer to an empty val message. +// +// This function allows for translating code from the old Open Struct API to the +// new Opaque API. +// +// The old Open Struct API represented oneof fields with a wrapper struct: +// +// var signedImg *accountpb.SignedImage +// profile := &accountpb.Profile{ +// // The Avatar oneof will be set, with an empty SignedImage. +// Avatar: &accountpb.Profile_SignedImage{signedImg}, +// } +// +// The new Opaque API treats oneof fields like regular fields, there are no more +// wrapper structs: +// +// var signedImg *accountpb.SignedImage +// profile := &accountpb.Profile{} +// profile.SetSignedImage(signedImg) +// +// For convenience, the Opaque API also offers Builders, which allow for a +// direct translation of struct initialization. However, because Builders use +// nilness to represent field presence (but there is no non-nil wrapper struct +// anymore), Builders cannot distinguish between an unset oneof and a set oneof +// with nil message. The above code would need to be translated with help of the +// ValueOrDefault function to retain the same behavior: +// +// var signedImg *accountpb.SignedImage +// return &accountpb.Profile_builder{ +// SignedImage: proto.ValueOrDefault(signedImg), +// }.Build() +func ValueOrDefault[T interface { + *P + Message +}, P any](val T) T { + if val == nil { + return T(new(P)) + } + return val +} + +// ValueOrDefaultBytes is like ValueOrDefault but for working with fields of +// type []byte. +func ValueOrDefaultBytes(val []byte) []byte { + if val == nil { + return []byte{} + } + return val +} diff --git a/src/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/src/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index a7b0d06ff..a4b78acef 100644 --- a/src/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/src/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -152,7 +152,7 @@ type Message interface { // This method may return nil. // // The returned methods type is identical to - // google.golang.org/protobuf/runtime/protoiface.Methods. + // [google.golang.org/protobuf/runtime/protoiface.Methods]. // Consult the protoiface package documentation for details. ProtoMethods() *methods } diff --git a/src/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/src/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 246156561..28e9e9f03 100644 --- a/src/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/src/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -122,6 +122,22 @@ type UnmarshalInputFlags = uint8 const ( UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota + + // UnmarshalAliasBuffer permits unmarshal operations to alias the input buffer. + // The unmarshaller must not modify the contents of the buffer. + UnmarshalAliasBuffer + + // UnmarshalValidated indicates that validation has already been + // performed on the input buffer. + UnmarshalValidated + + // UnmarshalCheckRequired is set if this unmarshal operation ultimately will care if required fields are + // initialized. + UnmarshalCheckRequired + + // UnmarshalNoLazyDecoding is set if this unmarshal operation should not use + // lazy decoding, even when otherwise available. + UnmarshalNoLazyDecoding ) // UnmarshalOutputFlags are output from the Unmarshal method. diff --git a/src/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/src/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go index 4a1ab7fb3..93df1b569 100644 --- a/src/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go +++ b/src/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go @@ -15,6 +15,7 @@ import ( "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/filetype" "google.golang.org/protobuf/internal/impl" + "google.golang.org/protobuf/internal/protolazy" ) // UnsafeEnabled specifies whether package unsafe can be used. @@ -39,6 +40,9 @@ type ( ExtensionFieldV1 = impl.ExtensionField Pointer = impl.Pointer + + LazyUnmarshalInfo = *protolazy.XXX_lazyUnmarshalInfo + RaceDetectHookData = impl.RaceDetectHookData ) var X impl.Export diff --git a/src/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/src/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 87da199a3..191552cce 100644 --- a/src/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/src/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -210,10 +210,7 @@ import ( // "value": "1.212s" // } type Any struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // A URL/resource name that uniquely identifies the type of the serialized // protocol buffer message. This string must contain at least // one "/" character. The last segment of the URL's path must represent @@ -244,7 +241,9 @@ type Any struct { // used with implementation specific semantics. TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // New marshals src into a new Any instance. diff --git a/src/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/src/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index b99d4d241..34d76e6cd 100644 --- a/src/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/src/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -141,10 +141,7 @@ import ( // be expressed in JSON format as "3.000000001s", and 3 seconds and 1 // microsecond should be expressed in JSON format as "3.000001s". type Duration struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Signed seconds of the span of time. Must be from -315,576,000,000 // to +315,576,000,000 inclusive. Note: these bounds are computed from: // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years @@ -155,7 +152,9 @@ type Duration struct { // of one second or more, a non-zero value for the `nanos` field must be // of the same sign as the `seconds` field. Must be from -999,999,999 // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // New constructs a new Duration from the provided time.Duration. diff --git a/src/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/src/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index 19de8d371..e5d7da38c 100644 --- a/src/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/src/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -284,12 +284,11 @@ import ( // request should verify the included field paths, and return an // `INVALID_ARGUMENT` error if any path is unmappable. type FieldMask struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The set of field mask paths. - Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // New constructs a field mask from a list of paths and verifies that diff --git a/src/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/src/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index 8f206a661..f2c53ea33 100644 --- a/src/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/src/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -187,12 +187,11 @@ func (NullValue) EnumDescriptor() ([]byte, []int) { // // The JSON representation for `Struct` is JSON object. type Struct struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Unordered map of dynamically typed values. - Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // NewStruct constructs a Struct from a general-purpose Go map. @@ -276,13 +275,10 @@ func (x *Struct) GetFields() map[string]*Value { // // The JSON representation for `Value` is JSON value. type Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The kind of value. // - // Types that are assignable to Kind: + // Types that are valid to be assigned to Kind: // // *Value_NullValue // *Value_NumberValue @@ -290,7 +286,9 @@ type Value struct { // *Value_BoolValue // *Value_StructValue // *Value_ListValue - Kind isValue_Kind `protobuf_oneof:"kind"` + Kind isValue_Kind `protobuf_oneof:"kind"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // NewValue constructs a Value from a general-purpose Go interface. @@ -483,51 +481,63 @@ func (*Value) Descriptor() ([]byte, []int) { return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1} } -func (m *Value) GetKind() isValue_Kind { - if m != nil { - return m.Kind +func (x *Value) GetKind() isValue_Kind { + if x != nil { + return x.Kind } return nil } func (x *Value) GetNullValue() NullValue { - if x, ok := x.GetKind().(*Value_NullValue); ok { - return x.NullValue + if x != nil { + if x, ok := x.Kind.(*Value_NullValue); ok { + return x.NullValue + } } return NullValue_NULL_VALUE } func (x *Value) GetNumberValue() float64 { - if x, ok := x.GetKind().(*Value_NumberValue); ok { - return x.NumberValue + if x != nil { + if x, ok := x.Kind.(*Value_NumberValue); ok { + return x.NumberValue + } } return 0 } func (x *Value) GetStringValue() string { - if x, ok := x.GetKind().(*Value_StringValue); ok { - return x.StringValue + if x != nil { + if x, ok := x.Kind.(*Value_StringValue); ok { + return x.StringValue + } } return "" } func (x *Value) GetBoolValue() bool { - if x, ok := x.GetKind().(*Value_BoolValue); ok { - return x.BoolValue + if x != nil { + if x, ok := x.Kind.(*Value_BoolValue); ok { + return x.BoolValue + } } return false } func (x *Value) GetStructValue() *Struct { - if x, ok := x.GetKind().(*Value_StructValue); ok { - return x.StructValue + if x != nil { + if x, ok := x.Kind.(*Value_StructValue); ok { + return x.StructValue + } } return nil } func (x *Value) GetListValue() *ListValue { - if x, ok := x.GetKind().(*Value_ListValue); ok { - return x.ListValue + if x != nil { + if x, ok := x.Kind.(*Value_ListValue); ok { + return x.ListValue + } } return nil } @@ -582,12 +592,11 @@ func (*Value_ListValue) isValue_Kind() {} // // The JSON representation for `ListValue` is JSON array. type ListValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Repeated field of dynamically typed values. - Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // NewList constructs a ListValue from a general-purpose Go slice. diff --git a/src/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/src/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 0d20722d7..9550109aa 100644 --- a/src/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/src/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -170,10 +170,7 @@ import ( // http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime() // ) to obtain a formatter capable of generating timestamps in this format. type Timestamp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. @@ -182,7 +179,9 @@ type Timestamp struct { // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Now constructs a new Timestamp from the current time. diff --git a/src/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/src/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index 006060e56..15b424ec1 100644 --- a/src/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/src/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -54,12 +54,11 @@ import ( // // The JSON representation for `DoubleValue` is JSON number. type DoubleValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The double value. - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Double stores v in a new DoubleValue and returns a pointer to it. @@ -108,12 +107,11 @@ func (x *DoubleValue) GetValue() float64 { // // The JSON representation for `FloatValue` is JSON number. type FloatValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The float value. - Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Float stores v in a new FloatValue and returns a pointer to it. @@ -162,12 +160,11 @@ func (x *FloatValue) GetValue() float32 { // // The JSON representation for `Int64Value` is JSON string. type Int64Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The int64 value. - Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Int64 stores v in a new Int64Value and returns a pointer to it. @@ -216,12 +213,11 @@ func (x *Int64Value) GetValue() int64 { // // The JSON representation for `UInt64Value` is JSON string. type UInt64Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The uint64 value. - Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // UInt64 stores v in a new UInt64Value and returns a pointer to it. @@ -270,12 +266,11 @@ func (x *UInt64Value) GetValue() uint64 { // // The JSON representation for `Int32Value` is JSON number. type Int32Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The int32 value. - Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Int32 stores v in a new Int32Value and returns a pointer to it. @@ -324,12 +319,11 @@ func (x *Int32Value) GetValue() int32 { // // The JSON representation for `UInt32Value` is JSON number. type UInt32Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The uint32 value. - Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // UInt32 stores v in a new UInt32Value and returns a pointer to it. @@ -378,12 +372,11 @@ func (x *UInt32Value) GetValue() uint32 { // // The JSON representation for `BoolValue` is JSON `true` and `false`. type BoolValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The bool value. - Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Bool stores v in a new BoolValue and returns a pointer to it. @@ -432,12 +425,11 @@ func (x *BoolValue) GetValue() bool { // // The JSON representation for `StringValue` is JSON string. type StringValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The string value. - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // String stores v in a new StringValue and returns a pointer to it. @@ -486,12 +478,11 @@ func (x *StringValue) GetValue() string { // // The JSON representation for `BytesValue` is JSON string. type BytesValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The bytes value. - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Bytes stores v in a new BytesValue and returns a pointer to it. diff --git a/src/vendor/modules.txt b/src/vendor/modules.txt index 14de31f65..e3bab3a80 100644 --- a/src/vendor/modules.txt +++ b/src/vendor/modules.txt @@ -1,7 +1,7 @@ -# code.cloudfoundry.org/go-batching v0.0.0-20241125161418-1c4c43ed1a1e +# code.cloudfoundry.org/go-batching v0.0.0-20241223135445-b8f45a03eb70 ## explicit; go 1.22.0 code.cloudfoundry.org/go-batching -# code.cloudfoundry.org/go-diodes v0.0.0-20241125060457-612558937770 +# code.cloudfoundry.org/go-diodes v0.0.0-20241223074059-7f8c1f03edeb ## explicit; go 1.22.0 code.cloudfoundry.org/go-diodes # code.cloudfoundry.org/go-envstruct v1.7.0 @@ -13,11 +13,11 @@ code.cloudfoundry.org/go-loggregator/v9 code.cloudfoundry.org/go-loggregator/v9/conversion code.cloudfoundry.org/go-loggregator/v9/rfc5424 code.cloudfoundry.org/go-loggregator/v9/rpc/loggregator_v2 -# code.cloudfoundry.org/go-metric-registry v0.0.0-20241125210533-4270b99b04f2 +# code.cloudfoundry.org/go-metric-registry v0.0.0-20241216212740-f99815f94a2a ## explicit; go 1.22.0 code.cloudfoundry.org/go-metric-registry code.cloudfoundry.org/go-metric-registry/testhelpers -# code.cloudfoundry.org/tlsconfig v0.10.0 +# code.cloudfoundry.org/tlsconfig v0.13.0 ## explicit; go 1.22 code.cloudfoundry.org/tlsconfig code.cloudfoundry.org/tlsconfig/certtest @@ -44,10 +44,10 @@ github.com/cespare/xxhash/v2 # github.com/cloudfoundry/dropsonde v1.1.0 ## explicit; go 1.18 github.com/cloudfoundry/dropsonde/emitter -# github.com/cloudfoundry/sonde-go v0.0.0-20241016180203-3c0e1c24e908 +# github.com/cloudfoundry/sonde-go v0.0.0-20241223073712-8666f251e654 ## explicit; go 1.18 github.com/cloudfoundry/sonde-go/events -# github.com/go-chi/chi/v5 v5.1.0 +# github.com/go-chi/chi/v5 v5.2.0 ## explicit; go 1.14 github.com/go-chi/chi/v5 # github.com/go-logr/logr v1.4.2 @@ -65,11 +65,11 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/pprof v0.0.0-20241128161848-dc51965c6481 +# github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad ## explicit; go 1.22 github.com/google/pprof/profile -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 -## explicit; go 1.21 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 +## explicit; go 1.22.0 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities @@ -91,7 +91,7 @@ github.com/klauspost/compress/zstd/internal/xxhash # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg -# github.com/onsi/ginkgo/v2 v2.22.0 +# github.com/onsi/ginkgo/v2 v2.22.1 ## explicit; go 1.22.0 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -113,7 +113,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.36.0 +# github.com/onsi/gomega v1.36.1 ## explicit; go 1.22 github.com/onsi/gomega github.com/onsi/gomega/format @@ -142,7 +142,7 @@ github.com/prometheus/client_golang/prometheus/promhttp # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.60.1 +# github.com/prometheus/common v0.61.0 ## explicit; go 1.21 github.com/prometheus/common/expfmt github.com/prometheus/common/model @@ -163,7 +163,7 @@ github.com/square/certstrap/pkix # github.com/valyala/bytebufferpool v1.0.0 ## explicit github.com/valyala/bytebufferpool -# github.com/valyala/fasthttp v1.57.0 +# github.com/valyala/fasthttp v1.58.0 ## explicit; go 1.21 github.com/valyala/fasthttp github.com/valyala/fasthttp/fasthttputil @@ -174,7 +174,7 @@ go.opentelemetry.io/proto/otlp/collector/metrics/v1 go.opentelemetry.io/proto/otlp/common/v1 go.opentelemetry.io/proto/otlp/metrics/v1 go.opentelemetry.io/proto/otlp/resource/v1 -# go.step.sm/crypto v0.54.2 +# go.step.sm/crypto v0.56.0 ## explicit; go 1.22 go.step.sm/crypto/fingerprint go.step.sm/crypto/internal/bcrypt_pbkdf @@ -185,7 +185,7 @@ go.step.sm/crypto/keyutil go.step.sm/crypto/pemutil go.step.sm/crypto/randutil go.step.sm/crypto/x25519 -# golang.org/x/crypto v0.29.0 +# golang.org/x/crypto v0.31.0 ## explicit; go 1.20 golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 @@ -201,7 +201,7 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf # golang.org/x/mod v0.22.0 ## explicit; go 1.22.0 golang.org/x/mod/semver -# golang.org/x/net v0.31.0 +# golang.org/x/net v0.33.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -213,15 +213,15 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/sync v0.9.0 +# golang.org/x/sync v0.10.0 ## explicit; go 1.18 golang.org/x/sync/errgroup -# golang.org/x/sys v0.27.0 +# golang.org/x/sys v0.28.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/text v0.20.0 +# golang.org/x/text v0.21.0 ## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/charmap @@ -243,7 +243,7 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/tools v0.27.0 +# golang.org/x/tools v0.28.0 ## explicit; go 1.22.0 golang.org/x/tools/cover golang.org/x/tools/go/ast/astutil @@ -265,14 +265,14 @@ golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697 +# google.golang.org/genproto/googleapis/api v0.0.0-20241223144023-3abc09e42ca8 ## explicit; go 1.21 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 ## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.68.0-dev -## explicit; go 1.21 +# google.golang.org/grpc v1.69.2 +## explicit; go 1.22 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -280,6 +280,8 @@ google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/pickfirst +google.golang.org/grpc/balancer/pickfirst/internal +google.golang.org/grpc/balancer/pickfirst/pickfirstleaf google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz @@ -329,7 +331,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.35.2 +# google.golang.org/protobuf v1.36.1 ## explicit; go 1.21 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson @@ -353,6 +355,7 @@ google.golang.org/protobuf/internal/impl google.golang.org/protobuf/internal/msgfmt google.golang.org/protobuf/internal/order google.golang.org/protobuf/internal/pragma +google.golang.org/protobuf/internal/protolazy google.golang.org/protobuf/internal/set google.golang.org/protobuf/internal/strs google.golang.org/protobuf/internal/version