diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0854d298e413..d9bfa6e1e7c0 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,7 +4,7 @@ We definitely welcome your patches and contributions to gRPC! Please read the gR organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. -If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) +If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) ## Legal requirements @@ -25,8 +25,8 @@ How to get your contributions merged smoothly and quickly. is a great place to start. These issues are well-documented and usually can be resolved with a single pull request. -- If you are adding a new file, make sure it has the copyright message template - at the top as a comment. You can copy over the message from an existing file +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file and update the year. - The grpc package should only depend on standard Go packages and a small number @@ -39,12 +39,12 @@ How to get your contributions merged smoothly and quickly. proposal](https://github.com/grpc/proposal). - Provide a good **PR description** as a record of **what** change is being made - and **why** it was made. Link to a github issue if it exists. + and **why** it was made. Link to a GitHub issue if it exists. -- If you want to fix formatting or style, consider whether your changes are an - obvious improvement or might be considered a personal preference. If a style - change is based on preference, it likely will not be accepted. If it corrects - widely agreed-upon anti-patterns, then please do create a PR and explain the +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the benefits of the change. - Unless your PR is trivial, you should expect there will be reviewer comments diff --git a/Documentation/encoding.md b/Documentation/encoding.md index dd49f55e5d44..64358f10a7b0 100644 --- a/Documentation/encoding.md +++ b/Documentation/encoding.md @@ -83,7 +83,7 @@ performing compression and decompression. A `Compressor` contains code to compress and decompress by wrapping `io.Writer`s and `io.Reader`s, respectively. (The form of `Compress` and `Decompress` were chosen to most closely match Go's standard package -[implementations](https://golang.org/pkg/compress/) of compressors. Like +[implementations](https://golang.org/pkg/compress/) of compressors). Like `Codec`s, `Compressor`s are registered by name into a global registry maintained in the `encoding` package. diff --git a/Documentation/grpc-auth-support.md b/Documentation/grpc-auth-support.md index 1362eeaa4ae2..cedcaa6fa026 100644 --- a/Documentation/grpc-auth-support.md +++ b/Documentation/grpc-auth-support.md @@ -1,6 +1,6 @@ # Authentication -As outlined in the [gRPC authentication guide](https://grpc.io/docs/guides/auth.html) there are a number of different mechanisms for asserting identity between an client and server. We'll present some code-samples here demonstrating how to provide TLS support encryption and identity assertions as well as passing OAuth2 tokens to services that support it. +As outlined in the [gRPC authentication guide](https://grpc.io/docs/guides/auth.html) there are a number of different mechanisms for asserting identity between a client and server. We'll present some code-samples here demonstrating how to provide TLS support encryption and identity assertions as well as passing OAuth2 tokens to services that support it. # Enabling TLS on a gRPC client diff --git a/balancer/balancer.go b/balancer/balancer.go index b181f386a1ba..8d125d2aa207 100644 --- a/balancer/balancer.go +++ b/balancer/balancer.go @@ -130,7 +130,7 @@ type SubConn interface { // UpdateAddresses updates the addresses used in this SubConn. // gRPC checks if currently-connected address is still in the new list. // If it's in the list, the connection will be kept. - // If it's not in the list, the connection will gracefully closed, and + // If it's not in the list, the connection will gracefully close, and // a new connection will be created. // // This will trigger a state transition for the SubConn. diff --git a/balancer/base/balancer.go b/balancer/base/balancer.go index 2b87bd79c757..d5ed172ae695 100644 --- a/balancer/base/balancer.go +++ b/balancer/base/balancer.go @@ -133,7 +133,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { } } // If resolver state contains no addresses, return an error so ClientConn - // will trigger re-resolve. Also records this as an resolver error, so when + // will trigger re-resolve. Also records this as a resolver error, so when // the overall state turns transient failure, the error message will have // the zero address information. if len(s.ResolverState.Addresses) == 0 { diff --git a/balancer/endpointsharding/endpointsharding.go b/balancer/endpointsharding/endpointsharding.go index 43d960df0af5..78996405aeaf 100644 --- a/balancer/endpointsharding/endpointsharding.go +++ b/balancer/endpointsharding/endpointsharding.go @@ -133,7 +133,7 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState // Return first error found, and always commit full processing of // updating children. If desired to process more specific errors // across all endpoints, caller should make these specific - // validations, this is a current limitation for simplicities sake. + // validations, this is a current limitation for simplicity sake. ret = err } } diff --git a/balancer/grpclb/grpclb.go b/balancer/grpclb/grpclb.go index c09876274131..0770b88e96d5 100644 --- a/balancer/grpclb/grpclb.go +++ b/balancer/grpclb/grpclb.go @@ -197,7 +197,7 @@ type lbBalancer struct { // manualResolver is used in the remote LB ClientConn inside grpclb. When // resolved address updates are received by grpclb, filtered updates will be - // send to remote LB ClientConn through this resolver. + // sent to remote LB ClientConn through this resolver. manualResolver *manual.Resolver // The ClientConn to talk to the remote balancer. ccRemoteLB *remoteBalancerCCWrapper diff --git a/balancer/rls/internal/keys/builder.go b/balancer/rls/internal/keys/builder.go index cc5ce510ad90..7deb7dc7a249 100644 --- a/balancer/rls/internal/keys/builder.go +++ b/balancer/rls/internal/keys/builder.go @@ -189,7 +189,7 @@ func (b builder) Equal(a builder) bool { // Protobuf serialization maintains the order of repeated fields. Matchers // are specified as a repeated field inside the KeyBuilder proto. If the // order changes, it means that the order in the protobuf changed. We report - // this case as not being equal even though the builders could possible be + // this case as not being equal even though the builders could possibly be // functionally equal. for i, bMatcher := range b.headerKeys { aMatcher := a.headerKeys[i] diff --git a/balancer/weightedroundrobin/balancer_test.go b/balancer/weightedroundrobin/balancer_test.go index 6ffddc0a7739..379a2fa07f59 100644 --- a/balancer/weightedroundrobin/balancer_test.go +++ b/balancer/weightedroundrobin/balancer_test.go @@ -212,7 +212,7 @@ func (s) TestBalancer_OneAddress(t *testing.T) { // balancer startup case which triggers the first picker and scheduler update // before any load reports are received. // -// Note that this test and others, metrics emission asssertions are a snapshot +// Note that this test and others, metrics emission assertions are a snapshot // of the most recently emitted metrics. This is due to the nondeterminism of // scheduler updates with respect to test bodies, so the assertions made are // from the most recently synced state of the system (picker/scheduler) from the diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index 1ff88294acef..07c8c9db6651 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -276,7 +276,7 @@ func StartServer(info ServerInfo, opts ...grpc.ServerOption) func() { } } -// DoUnaryCall performs an unary RPC with given stub and request and response sizes. +// DoUnaryCall performs a unary RPC with given stub and request and response sizes. func DoUnaryCall(tc testgrpc.BenchmarkServiceClient, reqSize, respSize int) error { pl := NewPayload(testpb.PayloadType_COMPRESSABLE, reqSize) req := &testpb.SimpleRequest{ diff --git a/benchmark/benchresult/main.go b/benchmark/benchresult/main.go index 5bd9ce6ff891..89598555f080 100644 --- a/benchmark/benchresult/main.go +++ b/benchmark/benchresult/main.go @@ -21,7 +21,7 @@ To format the benchmark result: go run benchmark/benchresult/main.go resultfile -To see the performance change based on a old result: +To see the performance change based on an old result: go run benchmark/benchresult/main.go resultfile_old resultfile diff --git a/clientconn.go b/clientconn.go index 9c8850e3fdd5..e5615e71ed43 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1292,7 +1292,7 @@ func (ac *addrConn) resetTransportAndUnlock() { ac.mu.Unlock() } -// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// tryAllAddrs tries to create a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { diff --git a/clientconn_test.go b/clientconn_test.go index 468f8752346a..0cb09001da04 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -326,7 +326,7 @@ func (s) TestCloseConnectionWhenServerPrefaceNotReceived(t *testing.T) { case <-timer.C: t.Fatalf("Client didn't make another connection request in time.") } - // Make sure the connection stays alive for sometime. + // Make sure the connection stays alive for some time. time.Sleep(time.Second) atomic.StoreUint32(&over, 1) client.Close() diff --git a/credentials/xds/xds_client_test.go b/credentials/xds/xds_client_test.go index 0cf8b3fbcbca..3d07c0b567ca 100644 --- a/credentials/xds/xds_client_test.go +++ b/credentials/xds/xds_client_test.go @@ -117,7 +117,7 @@ func (ts *testServer) start() error { return nil } -// handleconn accepts a new raw connection, and invokes the test provided +// handleConn accepts a new raw connection, and invokes the test provided // handshake function to perform TLS handshake, and returns the result on the // `hsResult` channel. func (ts *testServer) handleConn() { diff --git a/dialoptions.go b/dialoptions.go index 2b285beee376..518692c3afb8 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -436,7 +436,7 @@ func WithTimeout(d time.Duration) DialOption { // option to true from the Control field. For a concrete example of how to do // this, see internal.NetDialerWithTCPKeepalive(). // -// For more information, please see [issue 23459] in the Go github repo. +// For more information, please see [issue 23459] in the Go GitHub repo. // // [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { diff --git a/encoding/encoding_test.go b/encoding/encoding_test.go index 9ac59e461633..19769146f91b 100644 --- a/encoding/encoding_test.go +++ b/encoding/encoding_test.go @@ -114,7 +114,7 @@ func (c *errProtoCodec) Name() string { func (s) TestEncodeDoesntPanicOnServer(t *testing.T) { grpctest.TLogger.ExpectError("grpc: server failed to encode response") - // Create an codec that errors when encoding messages. + // Create a codec that errors when encoding messages. encodingErr := errors.New("encoding failed") ec := &errProtoCodec{name: t.Name(), encodingErr: encodingErr} @@ -150,7 +150,7 @@ func (s) TestEncodeDoesntPanicOnServer(t *testing.T) { // Tests the case where decoding fails on the server. Verifies that there is // no panic and that the decoding error is propagated to the client. func (s) TestDecodeDoesntPanicOnServer(t *testing.T) { - // Create an codec that errors when decoding messages. + // Create a codec that errors when decoding messages. decodingErr := errors.New("decoding failed") ec := &errProtoCodec{name: t.Name(), decodingErr: decodingErr} @@ -192,7 +192,7 @@ func (s) TestEncodeDoesntPanicOnClient(t *testing.T) { backend := stubserver.StartTestService(t, nil) defer backend.Stop() - // Create an codec that errors when encoding messages. + // Create a codec that errors when encoding messages. encodingErr := errors.New("encoding failed") ec := &errProtoCodec{name: t.Name(), encodingErr: encodingErr} @@ -228,7 +228,7 @@ func (s) TestDecodeDoesntPanicOnClient(t *testing.T) { backend := stubserver.StartTestService(t, nil) defer backend.Stop() - // Create an codec that errors when decoding messages. + // Create a codec that errors when decoding messages. decodingErr := errors.New("decoding failed") ec := &errProtoCodec{name: t.Name(), decodingErr: decodingErr} @@ -283,7 +283,7 @@ func (p *countingProtoCodec) Name() string { // Tests the case where ForceServerCodec option is used on the server. Verifies // that encoding and decoding happen once per RPC. func (s) TestForceServerCodec(t *testing.T) { - // Create an server with the counting proto codec. + // Create a server with the counting proto codec. codec := &countingProtoCodec{name: t.Name()} backend := stubserver.StartTestService(t, nil, grpc.ForceServerCodecV2(codec)) defer backend.Stop() diff --git a/examples/features/advancedtls/client/main.go b/examples/features/advancedtls/client/main.go index a4cd98ca1a61..f66707618a57 100644 --- a/examples/features/advancedtls/client/main.go +++ b/examples/features/advancedtls/client/main.go @@ -149,7 +149,7 @@ func makeCRLProvider(crlDirectory string) *advancedtls.FileWatcherCRLProvider { } // --- Custom Verification --- -func customVerificaitonSucceed(info *advancedtls.HandshakeVerificationInfo) (*advancedtls.PostHandshakeVerificationResults, error) { +func customVerificationSucceed(info *advancedtls.HandshakeVerificationInfo) (*advancedtls.PostHandshakeVerificationResults, error) { // Looks at info for what you care about as the custom verification implementer if info.ServerName != "localhost:50051" { return nil, fmt.Errorf("expected servername of localhost:50051, got %v", info.ServerName) @@ -157,7 +157,7 @@ func customVerificaitonSucceed(info *advancedtls.HandshakeVerificationInfo) (*ad return &advancedtls.PostHandshakeVerificationResults{}, nil } -func customVerificaitonFail(info *advancedtls.HandshakeVerificationInfo) (*advancedtls.PostHandshakeVerificationResults, error) { +func customVerificationFail(info *advancedtls.HandshakeVerificationInfo) (*advancedtls.PostHandshakeVerificationResults, error) { // Looks at info for what you care about as the custom verification implementer if info.ServerName != "ExampleDesignedToFail" { return nil, fmt.Errorf("expected servername of ExampleDesignedToFail, got %v", info.ServerName) @@ -189,7 +189,7 @@ func runClientWithCustomVerification(credsDirectory string, port string) { }, // Tell the client to verify the server cert VerificationType: advancedtls.CertVerification, - AdditionalPeerVerification: customVerificaitonSucceed, + AdditionalPeerVerification: customVerificationSucceed, } clientTLSCreds, err := advancedtls.NewClientCreds(options) @@ -213,7 +213,7 @@ func runClientWithCustomVerification(credsDirectory string, port string) { }, // Tell the client to verify the server cert VerificationType: advancedtls.CertVerification, - AdditionalPeerVerification: customVerificaitonFail, + AdditionalPeerVerification: customVerificationFail, } clientTLSCreds, err := advancedtls.NewClientCreds(options) diff --git a/examples/features/advancedtls/creds/openssl-ca.cnf b/examples/features/advancedtls/creds/openssl-ca.cnf index 64d3de014076..31e0b3919bae 100644 --- a/examples/features/advancedtls/creds/openssl-ca.cnf +++ b/examples/features/advancedtls/creds/openssl-ca.cnf @@ -1,5 +1,5 @@ base_dir = . -certificate = $base_dir/ca_cert.pem # The CA certifcate +certificate = $base_dir/ca_cert.pem # The CA certificate private_key = $base_dir/ca_key.pem # The CA private key new_certs_dir = $base_dir # Location for new certs after signing database = $base_dir/index.txt # Database index file diff --git a/examples/features/advancedtls/openssl-ca.cnf b/examples/features/advancedtls/openssl-ca.cnf index 64d3de014076..31e0b3919bae 100644 --- a/examples/features/advancedtls/openssl-ca.cnf +++ b/examples/features/advancedtls/openssl-ca.cnf @@ -1,5 +1,5 @@ base_dir = . -certificate = $base_dir/ca_cert.pem # The CA certifcate +certificate = $base_dir/ca_cert.pem # The CA certificate private_key = $base_dir/ca_key.pem # The CA private key new_certs_dir = $base_dir # Location for new certs after signing database = $base_dir/index.txt # Database index file diff --git a/examples/features/csm_observability/client/main.go b/examples/features/csm_observability/client/main.go index 8ba1e863ab8d..20b357c2faad 100644 --- a/examples/features/csm_observability/client/main.go +++ b/examples/features/csm_observability/client/main.go @@ -63,7 +63,7 @@ func main() { defer cc.Close() c := echo.NewEchoClient(cc) - // Make a RPC every second. This should trigger telemetry to be emitted from + // Make an RPC every second. This should trigger telemetry to be emitted from // the client and the server. for { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) diff --git a/examples/features/encryption/ALTS/client/main.go b/examples/features/encryption/ALTS/client/main.go index 942d824b5736..b0778ec6bb9a 100644 --- a/examples/features/encryption/ALTS/client/main.go +++ b/examples/features/encryption/ALTS/client/main.go @@ -56,7 +56,7 @@ func main() { } defer conn.Close() - // Make a echo client and send an RPC. + // Make an echo client and send an RPC. rgc := ecpb.NewEchoClient(conn) callUnaryEcho(rgc, "hello world") } diff --git a/examples/features/encryption/TLS/client/main.go b/examples/features/encryption/TLS/client/main.go index 8dde2feb6aa5..27e978065885 100644 --- a/examples/features/encryption/TLS/client/main.go +++ b/examples/features/encryption/TLS/client/main.go @@ -60,7 +60,7 @@ func main() { } defer conn.Close() - // Make a echo client and send an RPC. + // Make an echo client and send an RPC. rgc := ecpb.NewEchoClient(conn) callUnaryEcho(rgc, "hello world") } diff --git a/examples/features/interceptor/client/main.go b/examples/features/interceptor/client/main.go index 12b20d26bc54..22150c404670 100644 --- a/examples/features/interceptor/client/main.go +++ b/examples/features/interceptor/client/main.go @@ -159,7 +159,7 @@ func main() { } defer conn.Close() - // Make a echo client and send RPCs. + // Make an echo client and send RPCs. rgc := ecpb.NewEchoClient(conn) callUnaryEcho(rgc, "hello world") callBidiStreamingEcho(rgc) diff --git a/examples/features/opentelemetry/client/main.go b/examples/features/opentelemetry/client/main.go index 071d7ba63a79..6b0ee92eb0da 100644 --- a/examples/features/opentelemetry/client/main.go +++ b/examples/features/opentelemetry/client/main.go @@ -60,7 +60,7 @@ func main() { defer cc.Close() c := echo.NewEchoClient(cc) - // Make a RPC every second. This should trigger telemetry to be emitted from + // Make an RPC every second. This should trigger telemetry to be emitted from // the client and the server. for { r, err := c.UnaryEcho(ctx, &echo.EchoRequest{Message: "this is examples/opentelemetry"}) diff --git a/examples/gotutorial.md b/examples/gotutorial.md index 69a6632b7724..b32649e6acb0 100644 --- a/examples/gotutorial.md +++ b/examples/gotutorial.md @@ -6,7 +6,7 @@ This tutorial provides a basic Go programmer's introduction to working with gRPC - Generate server and client code using the protocol buffer compiler. - Use the Go gRPC API to write a simple client and server for your service. -It assumes that you have read the [Getting started](https://github.com/grpc/grpc/tree/master/examples) guide and are familiar with [protocol buffers](https://developers.google.com/protocol-buffers/docs/overview). Note that the example in this tutorial uses the proto3 version of the protocol buffers language, you can find out more in the [proto3 language guide](https://developers.google.com/protocol-buffers/docs/proto3) and see the [release notes](https://github.com/google/protobuf/releases) for the new version in the protocol buffers Github repository. +It assumes that you have read the [Getting started](https://github.com/grpc/grpc/tree/master/examples) guide and are familiar with [protocol buffers](https://developers.google.com/protocol-buffers/docs/overview). Note that the example in this tutorial uses the proto3 version of the protocol buffers language, you can find out more in the [proto3 language guide](https://developers.google.com/protocol-buffers/docs/proto3) and see the [release notes](https://github.com/google/protobuf/releases) for the new version in the protocol buffers GitHub repository. This isn't a comprehensive guide to using gRPC in Go: more reference documentation is coming soon. diff --git a/gcp/observability/config.go b/gcp/observability/config.go index ae7ea8b6983c..4ef3487abe86 100644 --- a/gcp/observability/config.go +++ b/gcp/observability/config.go @@ -245,8 +245,8 @@ type cloudLogging struct { type cloudMonitoring struct{} type cloudTrace struct { - // SamplingRate is the global setting that controls the probability of a RPC - // being traced. For example, 0.05 means there is a 5% chance for a RPC to + // SamplingRate is the global setting that controls the probability of an RPC + // being traced. For example, 0.05 means there is a 5% chance for an RPC to // be traced, 1.0 means trace every call, 0 means don’t start new traces. By // default, the sampling_rate is 0. SamplingRate float64 `json:"sampling_rate,omitempty"` diff --git a/internal/balancer/gracefulswitch/gracefulswitch_test.go b/internal/balancer/gracefulswitch/gracefulswitch_test.go index fb48d2aee6a3..5e6c53c74534 100644 --- a/internal/balancer/gracefulswitch/gracefulswitch_test.go +++ b/internal/balancer/gracefulswitch/gracefulswitch_test.go @@ -87,7 +87,7 @@ func (s) TestSuccessfulFirstUpdate(t *testing.T) { // TestTwoBalancersSameType tests the scenario where there is a graceful switch // load balancer setup with a current and pending load balancer of the same // type. Any ClientConn update should be forwarded to the current lb if there is -// a current lb and no pending lb, and the only the pending lb if the graceful +// a current lb and no pending lb, and only the pending lb if the graceful // switch balancer contains both a current lb and a pending lb. The pending load // balancer should also swap into current whenever it updates with a // connectivity state other than CONNECTING. @@ -725,7 +725,7 @@ func (s) TestUpdateSubConnStateRace(t *testing.T) { } // TestInlineCallbackInBuild tests the scenario where a balancer calls back into -// the balancer.ClientConn API inline from it's build function. +// the balancer.ClientConn API inline from its build function. func (s) TestInlineCallbackInBuild(t *testing.T) { tcc, gsb := setup(t) // This build call should cause all of the inline updates to forward to the diff --git a/internal/balancergroup/balancergroup_test.go b/internal/balancergroup/balancergroup_test.go index e602bf660bdd..8d22c9ac587e 100644 --- a/internal/balancergroup/balancergroup_test.go +++ b/internal/balancergroup/balancergroup_test.go @@ -168,7 +168,7 @@ func (s) TestBalancerGroup_start_close(t *testing.T) { // The callback will try to hold the same lock again, which will cause a // deadlock. // -// This test starts the balancer group with a test balancer, will updates picker +// This test starts the balancer group with a test balancer, will update picker // whenever it gets an address update. It's expected that start() doesn't block // because of deadlock. func (s) TestBalancerGroup_start_close_deadlock(t *testing.T) { diff --git a/internal/grpcsync/callback_serializer.go b/internal/grpcsync/callback_serializer.go index 19b9d639275a..8e8e861280a0 100644 --- a/internal/grpcsync/callback_serializer.go +++ b/internal/grpcsync/callback_serializer.go @@ -53,7 +53,7 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { return cs } -// TrySchedule tries to schedules the provided callback function f to be +// TrySchedule tries to schedule the provided callback function f to be // executed in the order it was added. This is a best-effort operation. If the // context passed to NewCallbackSerializer was canceled before this method is // called, the callback will not be scheduled. diff --git a/internal/grpcutil/method.go b/internal/grpcutil/method.go index ec62b4775e5b..683d1955c6a1 100644 --- a/internal/grpcutil/method.go +++ b/internal/grpcutil/method.go @@ -39,7 +39,7 @@ func ParseMethod(methodName string) (service, method string, _ error) { } // baseContentType is the base content-type for gRPC. This is a valid -// content-type on it's own, but can also include a content-subtype such as +// content-type on its own, but can also include a content-subtype such as // "proto" as a suffix after "+" or ";". See // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests // for more details. diff --git a/internal/idle/idle.go b/internal/idle/idle.go index fe49cb74c55a..d91cc30824fb 100644 --- a/internal/idle/idle.go +++ b/internal/idle/idle.go @@ -225,7 +225,7 @@ func (m *Manager) ExitIdleMode() error { // came in and OnCallBegin() noticed that the calls count is negative. // - Channel is in idle mode, and multiple new RPCs come in at the same // time, all of them notice a negative calls count in OnCallBegin and get - // here. The first one to get the lock would got the channel to exit idle. + // here. The first one to get the lock would get the channel to exit idle. // - Channel is not in idle mode, and the user calls Connect which calls // m.ExitIdleMode. // diff --git a/internal/idle/idle_test.go b/internal/idle/idle_test.go index a4b0e42b8582..c2645bb95c05 100644 --- a/internal/idle/idle_test.go +++ b/internal/idle/idle_test.go @@ -138,7 +138,7 @@ func (s) TestManager_Enabled_TimerFires(t *testing.T) { defer mgr.Close() mgr.ExitIdleMode() - // Ensure that the timer callback fires within a appropriate amount of time. + // Ensure that the timer callback fires within an appropriate amount of time. select { case <-callbackCh: case <-time.After(2 * defaultTestIdleTimeout): @@ -306,7 +306,7 @@ const ( stateActiveRPCs ) -// racyIdlnessEnforcer is a test idleness enforcer used specifically to test the +// racyEnforcer is a test idleness enforcer used specifically to test the // race between idle timeout and incoming RPCs. type racyEnforcer struct { t *testing.T diff --git a/internal/internal.go b/internal/internal.go index 7aae9240ffc0..a4d4c19d7ad3 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -235,7 +235,7 @@ var ( // // The implementation is expected to create a health checking RPC stream by // calling newStream(), watch for the health status of serviceName, and report -// it's health back by calling setConnectivityState(). +// its health back by calling setConnectivityState(). // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md diff --git a/internal/leakcheck/leakcheck.go b/internal/leakcheck/leakcheck.go index d3b41bd320fc..90830848830b 100644 --- a/internal/leakcheck/leakcheck.go +++ b/internal/leakcheck/leakcheck.go @@ -273,7 +273,7 @@ func CheckGoroutines(logger Logger, timeout time.Duration) { } } -// LeakChecker captures an Logger and is returned by NewLeakChecker as a +// LeakChecker captures a Logger and is returned by NewLeakChecker as a // convenient method to set up leak check tests in a unit test. type LeakChecker struct { logger Logger diff --git a/internal/profiling/buffer/buffer.go b/internal/profiling/buffer/buffer.go index 27c8a2003cb1..22c86295774d 100644 --- a/internal/profiling/buffer/buffer.go +++ b/internal/profiling/buffer/buffer.go @@ -48,7 +48,7 @@ type queue struct { written uint32 } -// Allocates and returns a new *queue. size needs to be a exponent of two. +// Allocates and returns a new *queue. size needs to be an exponent of two. func newQueue(size uint32) *queue { return &queue{ arr: make([]unsafe.Pointer, size), @@ -103,7 +103,7 @@ func (qp *queuePair) switchQueues() *queue { // by other exponents of two, we use floorCPUCount number of queuePairs within // each CircularBuffer. // -// Floor of the number of CPUs (and not the ceiling) was found to the be the +// Floor of the number of CPUs (and not the ceiling) was found to be the // optimal number through experiments. func floorCPUCount() uint32 { floorExponent := bits.Len32(uint32(runtime.NumCPU())) - 1 @@ -129,7 +129,7 @@ var numCircularBufferPairs = floorCPUCount() type CircularBuffer struct { drainMutex sync.Mutex qp []*queuePair - // qpn is an monotonically incrementing counter that's used to determine + // qpn is a monotonically incrementing counter that's used to determine // which queuePair a Push operation should write to. This approach's // performance was found to be better than writing to a random queue. qpn uint32 diff --git a/internal/profiling/profiling.go b/internal/profiling/profiling.go index 58f71423459e..f8ae14f048b5 100644 --- a/internal/profiling/profiling.go +++ b/internal/profiling/profiling.go @@ -189,7 +189,7 @@ func (stat *Stat) AppendTimer(timer *Timer) { var statsInitialized int32 // Stats for the last defaultStreamStatsBufsize RPCs will be stored in memory. -// This is can be configured by the registering server at profiling service +// This can be configured by the registering server at profiling service // initialization with google.golang.org/grpc/profiling/service.ProfilingConfig const defaultStreamStatsSize uint32 = 16 << 10 diff --git a/internal/resolver/dns/dns_resolver.go b/internal/resolver/dns/dns_resolver.go index 4552db16b028..8691698ef223 100644 --- a/internal/resolver/dns/dns_resolver.go +++ b/internal/resolver/dns/dns_resolver.go @@ -177,7 +177,7 @@ type dnsResolver struct { // finished. Otherwise, data race will be possible. [Race Example] in // dns_resolver_test we replace the real lookup functions with mocked ones to // facilitate testing. If Close() doesn't wait for watcher() goroutine - // finishes, race detector sometimes will warns lookup (READ the lookup + // finishes, race detector sometimes will warn lookup (READ the lookup // function pointers) inside watcher() goroutine has data race with // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup diff --git a/internal/transport/grpchttp2/http2bridge.go b/internal/transport/grpchttp2/http2bridge.go index 31feee11c69e..73b6eeabc611 100644 --- a/internal/transport/grpchttp2/http2bridge.go +++ b/internal/transport/grpchttp2/http2bridge.go @@ -209,7 +209,7 @@ func (fr *FramerBridge) WritePing(ack bool, data [8]byte) error { return fr.framer.WritePing(ack, data) } -// WriteGoAway writes a GoAway Frame to the unerlying writer. +// WriteGoAway writes a GoAway Frame to the underlying writer. func (fr *FramerBridge) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { return fr.framer.WriteGoAway(maxStreamID, http2.ErrCode(code), debugData) } diff --git a/internal/xds/matcher/matcher_header.go b/internal/xds/matcher/matcher_header.go index 01433f4122a2..1893526c539c 100644 --- a/internal/xds/matcher/matcher_header.go +++ b/internal/xds/matcher/matcher_header.go @@ -222,7 +222,7 @@ type HeaderContainsMatcher struct { // NewHeaderContainsMatcher returns a new HeaderContainsMatcher. key is the HTTP // Header key to match on, and contains is the value that the header should -// should contain for a successful match. An empty contains string does not +// contain for a successful match. An empty contains string does not // work, use HeaderPresentMatcher in that case. func NewHeaderContainsMatcher(key string, contains string, invert bool) *HeaderContainsMatcher { return &HeaderContainsMatcher{key: key, contains: contains, invert: invert} diff --git a/interop/test_utils.go b/interop/test_utils.go index cd84e007c6e2..71d0b0f060be 100644 --- a/interop/test_utils.go +++ b/interop/test_utils.go @@ -404,7 +404,7 @@ func DoPerRPCCreds(ctx context.Context, tc testgrpc.TestServiceClient, serviceAc } } -// DoGoogleDefaultCredentials performs an unary RPC with google default credentials +// DoGoogleDefaultCredentials performs a unary RPC with google default credentials func DoGoogleDefaultCredentials(ctx context.Context, tc testgrpc.TestServiceClient, defaultServiceAccount string) { pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ @@ -423,7 +423,7 @@ func DoGoogleDefaultCredentials(ctx context.Context, tc testgrpc.TestServiceClie } } -// DoComputeEngineChannelCredentials performs an unary RPC with compute engine channel credentials +// DoComputeEngineChannelCredentials performs a unary RPC with compute engine channel credentials func DoComputeEngineChannelCredentials(ctx context.Context, tc testgrpc.TestServiceClient, defaultServiceAccount string) { pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ diff --git a/mem/buffers_test.go b/mem/buffers_test.go index 72156becb012..c17995745209 100644 --- a/mem/buffers_test.go +++ b/mem/buffers_test.go @@ -199,7 +199,7 @@ func (s) TestBuffer_RefAfterFree(t *testing.T) { buf := newBuffer([]byte("abcd"), mem.NopBufferPool{}) buf.Ref() - // This first call should not panc and bring the ref counter down to 1 + // This first call should not panic and bring the ref counter down to 1 buf.Free() // This second call actually frees the buffer buf.Free() diff --git a/security/advancedtls/crl.go b/security/advancedtls/crl.go index bf54856bc427..b6d49bc70474 100644 --- a/security/advancedtls/crl.go +++ b/security/advancedtls/crl.go @@ -401,7 +401,7 @@ func crlPemToDer(crlBytes []byte) []byte { // extractCRLIssuer extracts the raw ASN.1 encoding of the CRL issuer. Due to the design of // pkix.CertificateList and pkix.RDNSequence, it is not possible to reliably marshal the -// parsed Issuer to it's original raw encoding. +// parsed Issuer to its original raw encoding. func extractCRLIssuer(crlBytes []byte) ([]byte, error) { if bytes.HasPrefix(crlBytes, crlPemPrefix) { crlBytes = crlPemToDer(crlBytes) diff --git a/stats/opencensus/e2e_test.go b/stats/opencensus/e2e_test.go index 1f223e2515f5..d4cd4411f5bc 100644 --- a/stats/opencensus/e2e_test.go +++ b/stats/opencensus/e2e_test.go @@ -1060,7 +1060,7 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { // TestOpenCensusTags tests this instrumentation code's ability to propagate // OpenCensus tags across the wire. It also tests the server stats handler's // functionality of adding the server method tag for the application to see. The -// test makes an Unary RPC without a tag map and with a tag map, and expects to +// test makes a Unary RPC without a tag map and with a tag map, and expects to // see a tag map at the application layer with server method tag in the first // case, and a tag map at the application layer with the populated tag map plus // server method tag in second case. diff --git a/stats/opentelemetry/csm/observability_test.go b/stats/opentelemetry/csm/observability_test.go index 2b092b0e1e58..7b498cb50879 100644 --- a/stats/opentelemetry/csm/observability_test.go +++ b/stats/opentelemetry/csm/observability_test.go @@ -45,7 +45,7 @@ import ( // the bootstrap env var to a bootstrap file with a nodeID provided. It sets CSM // Env Vars as well, and mocks the resource detector's returned attribute set to // simulate the environment. It registers a cleanup function on the provided t -// to restore the environment to it's original state. +// to restore the environment to its original state. func setupEnv(t *testing.T, resourceDetectorEmissions map[string]string, nodeID, csmCanonicalServiceName, csmWorkloadName string) { bootstrapContents := e2e.DefaultBootstrapContents(t, nodeID, "xds_server_uri") testutils.CreateBootstrapFileForTesting(t, bootstrapContents) diff --git a/xds/csds/csds_e2e_test.go b/xds/csds/csds_e2e_test.go index 33e10ba0f53f..3c838afb67fc 100644 --- a/xds/csds/csds_e2e_test.go +++ b/xds/csds/csds_e2e_test.go @@ -147,7 +147,7 @@ func (w *blockingListenerWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDo writeOnDone(w.testCtxDone, w.onDoneCh, onDone) } -// writeOnDone attempts to writes the onDone callback on the onDone channel. It +// writeOnDone attempts to write the onDone callback on the onDone channel. It // returns when it can successfully write to the channel or when the test is // done, which is signalled by testCtxDone being closed. func writeOnDone(testCtxDone <-chan struct{}, onDoneCh chan xdsresource.OnDoneFunc, onDone xdsresource.OnDoneFunc) { @@ -407,7 +407,7 @@ func (s) TestCSDS(t *testing.T) { // // This test does a bunch of similar things to the previous test, but has // reduced complexity because of having to deal with a single resource type. -// This makes is possible to test the NACKing a resource (which results in +// This makes it possible to test the NACKing a resource (which results in // continuous resending of the resource by the go-control-plane management // server), in an easier and less flaky way. func (s) TestCSDS_NACK(t *testing.T) { diff --git a/xds/internal/balancer/cdsbalancer/aggregate_cluster_test.go b/xds/internal/balancer/cdsbalancer/aggregate_cluster_test.go index ded8c13448d8..36f406552266 100644 --- a/xds/internal/balancer/cdsbalancer/aggregate_cluster_test.go +++ b/xds/internal/balancer/cdsbalancer/aggregate_cluster_test.go @@ -66,7 +66,7 @@ func makeLogicalDNSClusterResource(name, dnsHost string, dnsPort uint32) *v3clus // Tests the case where the cluster resource requested by the cds LB policy is a // leaf cluster. The management server sends two updates for the same leaf // cluster resource. The test verifies that the load balancing configuration -// pushed to the cluster_resolver LB policy is contains the expected discovery +// pushed to the cluster_resolver LB policy contains the expected discovery // mechanism corresponding to the leaf cluster, on both occasions. func (s) TestAggregateClusterSuccess_LeafNode(t *testing.T) { tests := []struct { diff --git a/xds/internal/balancer/clusterresolver/clusterresolver.go b/xds/internal/balancer/clusterresolver/clusterresolver.go index 749945059b88..3f0c54e8f389 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -134,7 +134,7 @@ func (bb) ParseConfig(j json.RawMessage) (serviceconfig.LoadBalancingConfig, err // This will never occur, valid configuration is emitted from the xDS // Client. Validity is already checked in the xDS Client, however, this // double validation is present because Unmarshalling and Validating are - // coupled into one json.Unmarshal operation). We will switch this in + // coupled into one json.Unmarshal operation. We will switch this in // the future to two separate operations. return nil, fmt.Errorf("error unmarshalling xDS LB Policy: %v", err) } diff --git a/xds/internal/balancer/clusterresolver/configbuilder.go b/xds/internal/balancer/clusterresolver/configbuilder.go index 8740360eefdd..f62b8e6c8eb5 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder.go +++ b/xds/internal/balancer/clusterresolver/configbuilder.go @@ -37,7 +37,7 @@ import ( const million = 1000000 -// priorityConfig is config for one priority. For example, if there an EDS and a +// priorityConfig is config for one priority. For example, if there's an EDS and a // DNS, the priority list will be [priorityConfig{EDS}, priorityConfig{DNS}]. // // Each priorityConfig corresponds to one discovery mechanism from the LBConfig @@ -171,7 +171,7 @@ func buildClusterImplConfigForEDS(g *nameGenerator, edsResp xdsresource.Endpoint } // Localities of length 0 is triggered by an NACK or resource-not-found - // error before update, or a empty localities list in a update. In either + // error before update, or an empty localities list in an update. In either // case want to create a priority, and send down empty address list, causing // TF for that priority. "If any discovery mechanism instance experiences an // error retrieving data, and it has not previously reported any results, it diff --git a/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go b/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go index 4c125f344588..2d934ef3ae10 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go @@ -109,7 +109,7 @@ func setupAndDial(t *testing.T, bootstrapContents []byte) (*grpc.ClientConn, fun } // TestErrorFromParentLB_ConnectionError tests the case where the parent of the -// clusterresolver LB policy sends its a connection error. The parent policy, +// clusterresolver LB policy sends it a connection error. The parent policy, // CDS LB policy, sends a connection error when the ADS stream to the management // server breaks. The test verifies that there is no perceivable effect because // of this connection error, and that RPCs continue to work (because the LB @@ -193,7 +193,7 @@ func (s) TestErrorFromParentLB_ResourceNotFound(t *testing.T) { // notify the test about the following events: // - an EDS requested with the expected resource name is requested // - EDS resource is unrequested, i.e, an EDS request with no resource name - // is received, which indicates that we are not longer interested in that + // is received, which indicates that we are no longer interested in that // resource. edsResourceRequestedCh := make(chan struct{}, 1) edsResourceCanceledCh := make(chan struct{}, 1) diff --git a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go index 8fddf0bb055a..c54424376d3a 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go @@ -596,7 +596,7 @@ func (s) TestEDS_ResourceRemoved(t *testing.T) { // notify the test about the following events: // - an EDS requested with the expected resource name is requested // - EDS resource is unrequested, i.e, an EDS request with no resource name - // is received, which indicates that we are not longer interested in that + // is received, which indicates that we are no longer interested in that // resource. edsResourceRequestedCh := make(chan struct{}, 1) edsResourceCanceledCh := make(chan struct{}, 1) @@ -847,7 +847,7 @@ func (s) TestEDS_ClusterResourceUpdates(t *testing.T) { t.Fatalf("Timeout when waiting for old EDS watch %q to be canceled and new one %q to be registered", edsServiceName, clusterName) } - // Make a RPC, and ensure that it gets routed to second backend, + // Make an RPC, and ensure that it gets routed to second backend, // corresponding to the cluster_name. for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { diff --git a/xds/internal/balancer/clusterresolver/resource_resolver.go b/xds/internal/balancer/clusterresolver/resource_resolver.go index 5bc64b86305c..3bcfba8732a3 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -77,7 +77,7 @@ type endpointsResolver interface { // discoveryMechanismKey is {type+resource_name}, it's used as the map key, so // that the same resource resolver can be reused (e.g. when there are two // mechanisms, both for the same EDS resource, but has different circuit -// breaking config. +// breaking config). type discoveryMechanismKey struct { typ DiscoveryMechanismType name string diff --git a/xds/internal/balancer/outlierdetection/balancer.go b/xds/internal/balancer/outlierdetection/balancer.go index 53ba72c0813f..4ccff08b51ec 100644 --- a/xds/internal/balancer/outlierdetection/balancer.go +++ b/xds/internal/balancer/outlierdetection/balancer.go @@ -657,7 +657,7 @@ func (b *outlierDetectionBalancer) handleChildStateUpdate(u balancer.State) { func (b *outlierDetectionBalancer) handleLBConfigUpdate(u lbCfgUpdate) { lbCfg := u.lbCfg noopCfg := lbCfg.SuccessRateEjection == nil && lbCfg.FailurePercentageEjection == nil - // If the child has sent it's first update and this config flips the noop + // If the child has sent its first update and this config flips the noop // bit compared to the most recent picker update sent upward, then a new // picker with this updated bit needs to be forwarded upward. If a child // update was received during the suppression of child updates within diff --git a/xds/internal/balancer/outlierdetection/balancer_test.go b/xds/internal/balancer/outlierdetection/balancer_test.go index 953fb782f4ab..a80d4d9dee4f 100644 --- a/xds/internal/balancer/outlierdetection/balancer_test.go +++ b/xds/internal/balancer/outlierdetection/balancer_test.go @@ -561,7 +561,7 @@ type emptyChildConfig struct { } // TestChildBasicOperations tests basic operations of the Outlier Detection -// Balancer and it's interaction with it's child. The following scenarios are +// Balancer and its interaction with its child. The following scenarios are // tested, in a step by step fashion: // 1. The Outlier Detection Balancer receives it's first good configuration. The // balancer is expected to create a child and sent the child it's configuration. @@ -606,7 +606,7 @@ func (s) TestChildBasicOperations(t *testing.T) { od, tcc, _ := setup(t) // This first config update should cause a child to be built and forwarded - // it's first update. + // its first update. od.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &LBConfig{ ChildPolicy: &iserviceconfig.BalancerConfig{ @@ -627,7 +627,7 @@ func (s) TestChildBasicOperations(t *testing.T) { } // This Update Client Conn State call should cause the first child balancer - // to close, and a new child to be created and also forwarded it's first + // to close, and a new child to be created and also forwarded its first // config update. od.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &LBConfig{ @@ -654,7 +654,7 @@ func (s) TestChildBasicOperations(t *testing.T) { if _, err = closeCh.Receive(ctx); err != nil { t.Fatalf("timed out waiting for the first child balancer to be closed: %v", err) } - // Verify the second child balancer received it's first config update. + // Verify the second child balancer received its first config update. if _, err = ccsCh.Receive(ctx); err != nil { t.Fatalf("timed out waiting for UpdateClientConnState on the second child balancer: %v", err) } diff --git a/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go b/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go index 2b98ee5ad161..6765d827a432 100644 --- a/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go +++ b/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go @@ -1596,11 +1596,11 @@ func (s) TestRingHash_TransientFailureSkipToAvailableReady(t *testing.T) { }) defer restartableServer2.Stop() - nonExistantBackends := makeNonExistentBackends(t, 2) + nonExistentBackends := makeNonExistentBackends(t, 2) const clusterName = "cluster" backends := []string{restartableServer1.Address, restartableServer2.Address} - backends = append(backends, nonExistantBackends...) + backends = append(backends, nonExistentBackends...) endpoints := endpointResource(t, clusterName, backends) cluster := e2e.ClusterResourceWithOptions(e2e.ClusterOptions{ ClusterName: clusterName, @@ -1862,11 +1862,11 @@ func (s) TestRingHash_SwitchToLowerPriorityAndThenBack(t *testing.T) { // so for only one subchannel at a time. func (s) TestRingHash_ContinuesConnectingWithoutPicksOneSubchannelAtATime(t *testing.T) { backends := startTestServiceBackends(t, 1) - nonExistantBackends := makeNonExistentBackends(t, 3) + nonExistentBackends := makeNonExistentBackends(t, 3) const clusterName = "cluster" - endpoints := endpointResource(t, clusterName, append(nonExistantBackends, backends...)) + endpoints := endpointResource(t, clusterName, append(nonExistentBackends, backends...)) cluster := e2e.ClusterResourceWithOptions(e2e.ClusterOptions{ ClusterName: clusterName, ServiceName: clusterName, @@ -1897,15 +1897,15 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicksOneSubchannelAtATime(t *tes defer conn.Close() client := testgrpc.NewTestServiceClient(conn) - holdNonExistant0 := dialer.Hold(nonExistantBackends[0]) - holdNonExistant1 := dialer.Hold(nonExistantBackends[1]) - holdNonExistant2 := dialer.Hold(nonExistantBackends[2]) + holdNonExistent0 := dialer.Hold(nonExistentBackends[0]) + holdNonExistent1 := dialer.Hold(nonExistentBackends[1]) + holdNonExistent2 := dialer.Hold(nonExistentBackends[2]) holdGood := dialer.Hold(backends[0]) rpcCtx, rpcCancel := context.WithCancel(ctx) errCh := make(chan error, 1) go func() { - rpcCtx = metadata.NewOutgoingContext(rpcCtx, metadata.Pairs("address_hash", nonExistantBackends[0]+"_0")) + rpcCtx = metadata.NewOutgoingContext(rpcCtx, metadata.Pairs("address_hash", nonExistentBackends[0]+"_0")) _, err := client.EmptyCall(rpcCtx, &testpb.Empty{}) if status.Code(err) == codes.Canceled { errCh <- nil @@ -1916,7 +1916,7 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicksOneSubchannelAtATime(t *tes // Wait for the RPC to trigger a connection attempt to the first address, // then cancel the RPC. No other connection attempts should be started yet. - if !holdNonExistant0.Wait(ctx) { + if !holdNonExistent0.Wait(ctx) { t.Fatalf("Timeout waiting for connection attempt to backend 0") } rpcCancel() @@ -1926,10 +1926,10 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicksOneSubchannelAtATime(t *tes // Since the connection attempt to the first address is still blocked, no // other connection attempts should be started yet. - if holdNonExistant1.IsStarted() { + if holdNonExistent1.IsStarted() { t.Errorf("Got connection attempt to backend 1, expected no connection attempt.") } - if holdNonExistant2.IsStarted() { + if holdNonExistent2.IsStarted() { t.Errorf("Got connection attempt to backend 2, expected no connection attempt.") } if holdGood.IsStarted() { @@ -1939,15 +1939,15 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicksOneSubchannelAtATime(t *tes // Allow the connection attempt to the first address to resume and wait for // the attempt for the second address. No other connection attempts should // be started yet. - holdNonExistant0Again := dialer.Hold(nonExistantBackends[0]) - holdNonExistant0.Resume() - if !holdNonExistant1.Wait(ctx) { + holdNonExistent0Again := dialer.Hold(nonExistentBackends[0]) + holdNonExistent0.Resume() + if !holdNonExistent1.Wait(ctx) { t.Fatalf("Timeout waiting for connection attempt to backend 1") } - if holdNonExistant0Again.IsStarted() { + if holdNonExistent0Again.IsStarted() { t.Errorf("Got connection attempt to backend 0 again, expected no connection attempt.") } - if holdNonExistant2.IsStarted() { + if holdNonExistent2.IsStarted() { t.Errorf("Got connection attempt to backend 2, expected no connection attempt.") } if holdGood.IsStarted() { @@ -1957,15 +1957,15 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicksOneSubchannelAtATime(t *tes // Allow the connection attempt to the second address to resume and wait for // the attempt for the third address. No other connection attempts should // be started yet. - holdNonExistant1Again := dialer.Hold(nonExistantBackends[1]) - holdNonExistant1.Resume() - if !holdNonExistant2.Wait(ctx) { + holdNonExistent1Again := dialer.Hold(nonExistentBackends[1]) + holdNonExistent1.Resume() + if !holdNonExistent2.Wait(ctx) { t.Fatalf("Timeout waiting for connection attempt to backend 2") } - if holdNonExistant0Again.IsStarted() { + if holdNonExistent0Again.IsStarted() { t.Errorf("Got connection attempt to backend 0 again, expected no connection attempt.") } - if holdNonExistant1Again.IsStarted() { + if holdNonExistent1Again.IsStarted() { t.Errorf("Got connection attempt to backend 1 again, expected no connection attempt.") } if holdGood.IsStarted() { @@ -1975,18 +1975,18 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicksOneSubchannelAtATime(t *tes // Allow the connection attempt to the third address to resume and wait // for the attempt for the final address. No other connection attempts // should be started yet. - holdNonExistant2Again := dialer.Hold(nonExistantBackends[2]) - holdNonExistant2.Resume() + holdNonExistent2Again := dialer.Hold(nonExistentBackends[2]) + holdNonExistent2.Resume() if !holdGood.Wait(ctx) { t.Fatalf("Timeout waiting for connection attempt to good backend") } - if holdNonExistant0Again.IsStarted() { + if holdNonExistent0Again.IsStarted() { t.Errorf("Got connection attempt to backend 0 again, expected no connection attempt.") } - if holdNonExistant1Again.IsStarted() { + if holdNonExistent1Again.IsStarted() { t.Errorf("Got connection attempt to backend 1 again, expected no connection attempt.") } - if holdNonExistant2Again.IsStarted() { + if holdNonExistent2Again.IsStarted() { t.Errorf("Got connection attempt to backend 2 again, expected no connection attempt.") } @@ -1997,13 +1997,13 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicksOneSubchannelAtATime(t *tes testutils.AwaitState(ctx, t, conn, connectivity.Ready) // No other connection attempts should have been started - if holdNonExistant0Again.IsStarted() { + if holdNonExistent0Again.IsStarted() { t.Errorf("Got connection attempt to backend 0 again, expected no connection attempt.") } - if holdNonExistant1Again.IsStarted() { + if holdNonExistent1Again.IsStarted() { t.Errorf("Got connection attempt to backend 1 again, expected no connection attempt.") } - if holdNonExistant2Again.IsStarted() { + if holdNonExistent2Again.IsStarted() { t.Errorf("Got connection attempt to backend 2 again, expected no connection attempt.") } }