Skip to content

Commit

Permalink
*: fix minor typos (grpc#7487)
Browse files Browse the repository at this point in the history
* Fix typos

* Fix reflecton tests
  • Loading branch information
NathanBaulch authored and infovivek2020 committed Aug 18, 2024
1 parent 0716f4a commit 108e3d9
Show file tree
Hide file tree
Showing 88 changed files with 135 additions and 135 deletions.
2 changes: 1 addition & 1 deletion SECURITY.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
# Security Policy

For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
2 changes: 1 addition & 1 deletion backoff/backoff.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ type Config struct {
MaxDelay time.Duration
}

// DefaultConfig is a backoff configuration with the default values specfied
// DefaultConfig is a backoff configuration with the default values specified
// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
//
// This should be useful for callers who want to configure backoff with
Expand Down
2 changes: 1 addition & 1 deletion balancer/grpclb/grpclb.go
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ type lbBalancer struct {
// All backends addresses, with metadata set to nil. This list contains all
// backend addresses in the same order and with the same duplicates as in
// serverlist. When generating picker, a SubConn slice with the same order
// but with only READY SCs will be gerenated.
// but with only READY SCs will be generated.
backendAddrsWithoutMetadata []resolver.Address
// Roundrobin functionalities.
state connectivity.State
Expand Down
2 changes: 1 addition & 1 deletion balancer/grpclb/grpclb_util_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ func (s) TestLBCache_ShutdownTimer_New_Race(t *testing.T) {
go func() {
for i := 0; i < 1000; i++ {
// Shutdown starts a timer with 1 ns timeout, the NewSubConn will
// race with with the timer.
// race with the timer.
sc.Shutdown()
sc, _ = ccc.NewSubConn([]resolver.Address{{Addr: "address1"}}, balancer.NewSubConnOptions{})
}
Expand Down
2 changes: 1 addition & 1 deletion balancer/pickfirst/pickfirst.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
// Endpoints not set, process addresses until we migrate resolver
// emissions fully to Endpoints. The top channel does wrap emitted
// addresses with endpoints, however some balancers such as weighted
// target do not forwarrd the corresponding correct endpoints down/split
// target do not forward the corresponding correct endpoints down/split
// endpoints properly. Once all balancers correctly forward endpoints
// down, can delete this else conditional.
addrs = state.ResolverState.Addresses
Expand Down
12 changes: 6 additions & 6 deletions balancer/rls/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ type cacheEntry struct {
// headerData is received in the RLS response and is to be sent in the
// X-Google-RLS-Data header for matching RPCs.
headerData string
// expiryTime is the absolute time at which this cache entry entry stops
// expiryTime is the absolute time at which this cache entry stops
// being valid. When an RLS request succeeds, this is set to the current
// time plus the max_age field from the LB policy config.
expiryTime time.Time
Expand Down Expand Up @@ -223,7 +223,7 @@ func (dc *dataCache) resize(size int64) (backoffCancelled bool) {
backoffCancelled = true
}
}
dc.deleteAndcleanup(key, entry)
dc.deleteAndCleanup(key, entry)
}
dc.maxSize = size
return backoffCancelled
Expand All @@ -249,7 +249,7 @@ func (dc *dataCache) evictExpiredEntries() bool {
if entry.expiryTime.After(now) || entry.backoffExpiryTime.After(now) {
continue
}
dc.deleteAndcleanup(key, entry)
dc.deleteAndCleanup(key, entry)
evicted = true
}
return evicted
Expand Down Expand Up @@ -339,23 +339,23 @@ func (dc *dataCache) removeEntryForTesting(key cacheKey) {
if !ok {
return
}
dc.deleteAndcleanup(key, entry)
dc.deleteAndCleanup(key, entry)
}

// deleteAndCleanup performs actions required at the time of deleting an entry
// from the data cache.
// - the entry is removed from the map of entries
// - current size of the data cache is update
// - the key is removed from the LRU
func (dc *dataCache) deleteAndcleanup(key cacheKey, entry *cacheEntry) {
func (dc *dataCache) deleteAndCleanup(key cacheKey, entry *cacheEntry) {
delete(dc.entries, key)
dc.currentSize -= entry.size
dc.keys.removeEntry(key)
}

func (dc *dataCache) stop() {
for key, entry := range dc.entries {
dc.deleteAndcleanup(key, entry)
dc.deleteAndCleanup(key, entry)
}
dc.shutdown.Fire()
}
2 changes: 1 addition & 1 deletion balancer/rls/control_channel_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ func (s) TestControlChannelThrottled(t *testing.T) {

select {
case <-rlsReqCh:
t.Fatal("RouteLookup RPC invoked when control channel is throtlled")
t.Fatal("RouteLookup RPC invoked when control channel is throttled")
case <-time.After(defaultTestShortTimeout):
}
}
Expand Down
2 changes: 1 addition & 1 deletion balancer/rls/internal/keys/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ type matcher struct {
names []string
}

// Equal reports if m and are are equivalent headerKeys.
// Equal reports if m and a are equivalent headerKeys.
func (m matcher) Equal(a matcher) bool {
if m.key != a.key {
return false
Expand Down
2 changes: 1 addition & 1 deletion balancer/rls/internal/test/e2e/rls_child_policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ func (b *bal) Close() {

// run is a dummy goroutine to make sure that child policies are closed at the
// end of tests. If they are not closed, these goroutines will be picked up by
// the leakcheker and tests will fail.
// the leak checker and tests will fail.
func (b *bal) run() {
<-b.done.Done()
}
2 changes: 1 addition & 1 deletion balancer/rls/picker.go
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ func (p *rlsPicker) delegateToChildPoliciesLocked(dcEntry *cacheEntry, info bala
state := (*balancer.State)(atomic.LoadPointer(&cpw.state))
// Delegate to the child policy if it is not in TRANSIENT_FAILURE, or if
// it is the last one (which handles the case of delegating to the last
// child picker if all child polcies are in TRANSIENT_FAILURE).
// child picker if all child policies are in TRANSIENT_FAILURE).
if state.ConnectivityState != connectivity.TransientFailure || i == len(dcEntry.childPolicyWrappers)-1 {
// Any header data received from the RLS server is stored in the
// cache entry and needs to be sent to the actual backend in the
Expand Down
2 changes: 1 addition & 1 deletion balancer/weightedtarget/weightedaggregator/aggregator.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ func New(cc balancer.ClientConn, logger *grpclog.PrefixLogger, newWRR func() wrr
}

// Start starts the aggregator. It can be called after Stop to restart the
// aggretator.
// aggregator.
func (wbsa *Aggregator) Start() {
wbsa.mu.Lock()
defer wbsa.mu.Unlock()
Expand Down
10 changes: 5 additions & 5 deletions benchmark/benchmain/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,11 +110,11 @@ var (
useBufconn = flag.Bool("bufconn", false, "Use in-memory connection instead of system network I/O")
enableKeepalive = flag.Bool("enable_keepalive", false, "Enable client keepalive. \n"+
"Keepalive.Time is set to 10s, Keepalive.Timeout is set to 1s, Keepalive.PermitWithoutStream is set to true.")
clientReadBufferSize = flags.IntSlice("clientReadBufferSize", []int{-1}, "Configures the client read buffer size in bytes. If negative, use the default - may be a a comma-separated list")
clientWriteBufferSize = flags.IntSlice("clientWriteBufferSize", []int{-1}, "Configures the client write buffer size in bytes. If negative, use the default - may be a a comma-separated list")
serverReadBufferSize = flags.IntSlice("serverReadBufferSize", []int{-1}, "Configures the server read buffer size in bytes. If negative, use the default - may be a a comma-separated list")
serverWriteBufferSize = flags.IntSlice("serverWriteBufferSize", []int{-1}, "Configures the server write buffer size in bytes. If negative, use the default - may be a a comma-separated list")
sleepBetweenRPCs = flags.DurationSlice("sleepBetweenRPCs", []time.Duration{0}, "Configures the maximum amount of time the client should sleep between consecutive RPCs - may be a a comma-separated list")
clientReadBufferSize = flags.IntSlice("clientReadBufferSize", []int{-1}, "Configures the client read buffer size in bytes. If negative, use the default - may be a comma-separated list")
clientWriteBufferSize = flags.IntSlice("clientWriteBufferSize", []int{-1}, "Configures the client write buffer size in bytes. If negative, use the default - may be a comma-separated list")
serverReadBufferSize = flags.IntSlice("serverReadBufferSize", []int{-1}, "Configures the server read buffer size in bytes. If negative, use the default - may be a comma-separated list")
serverWriteBufferSize = flags.IntSlice("serverWriteBufferSize", []int{-1}, "Configures the server write buffer size in bytes. If negative, use the default - may be a comma-separated list")
sleepBetweenRPCs = flags.DurationSlice("sleepBetweenRPCs", []time.Duration{0}, "Configures the maximum amount of time the client should sleep between consecutive RPCs - may be a comma-separated list")
connections = flag.Int("connections", 1, "The number of connections. Each connection will handle maxConcurrentCalls RPC streams")
recvBufferPool = flags.StringWithAllowedValues("recvBufferPool", recvBufferPoolNil, "Configures the shared receive buffer pool. One of: nil, simple, all", allRecvBufferPools)
sharedWriteBuffer = flags.StringWithAllowedValues("sharedWriteBuffer", toggleModeOff,
Expand Down
2 changes: 1 addition & 1 deletion benchmark/latency/latency.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ type Network struct {
var (
//Local simulates local network.
Local = Network{0, 0, 0}
//LAN simulates local area network network.
//LAN simulates local area network.
LAN = Network{100 * 1024, 2 * time.Millisecond, 1500}
//WAN simulates wide area network.
WAN = Network{20 * 1024, 30 * time.Millisecond, 1500}
Expand Down
6 changes: 3 additions & 3 deletions benchmark/latency/latency_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,9 @@ type bufConn struct {
func (bufConn) Close() error { panic("unimplemented") }
func (bufConn) LocalAddr() net.Addr { panic("unimplemented") }
func (bufConn) RemoteAddr() net.Addr { panic("unimplemented") }
func (bufConn) SetDeadline(t time.Time) error { panic("unimplemneted") }
func (bufConn) SetReadDeadline(t time.Time) error { panic("unimplemneted") }
func (bufConn) SetWriteDeadline(t time.Time) error { panic("unimplemneted") }
func (bufConn) SetDeadline(t time.Time) error { panic("unimplemented") }
func (bufConn) SetReadDeadline(t time.Time) error { panic("unimplemented") }
func (bufConn) SetWriteDeadline(t time.Time) error { panic("unimplemented") }

func restoreHooks() func() {
s := sleep
Expand Down
2 changes: 1 addition & 1 deletion benchmark/primitives/syncmap_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ func benchmarkIncrementUint64Map(b *testing.B, f func() incrementUint64Map) {
}
}

func BenchmarkMapWithSyncMutexContetion(b *testing.B) {
func BenchmarkMapWithSyncMutexContention(b *testing.B) {
benchmarkIncrementUint64Map(b, newMapWithLock)
}

Expand Down
2 changes: 1 addition & 1 deletion benchmark/stats/stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@ type RunData struct {
Fiftieth time.Duration
// Ninetieth is the 90th percentile latency.
Ninetieth time.Duration
// Ninetyninth is the 99th percentile latency.
// NinetyNinth is the 99th percentile latency.
NinetyNinth time.Duration
// Average is the average latency.
Average time.Duration
Expand Down
4 changes: 2 additions & 2 deletions benchmark/worker/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ func (s *workerServer) RunClient(stream testgrpc.WorkerService_RunClientServer)
var bc *benchmarkClient
defer func() {
// Shut down benchmark client when stream ends.
logger.Infof("shuting down benchmark client")
logger.Infof("shutting down benchmark client")
if bc != nil {
bc.shutdown()
}
Expand All @@ -160,7 +160,7 @@ func (s *workerServer) RunClient(stream testgrpc.WorkerService_RunClientServer)
case *testpb.ClientArgs_Setup:
logger.Infof("client setup received:")
if bc != nil {
logger.Infof("client setup received when client already exists, shuting down the existing client")
logger.Infof("client setup received when client already exists, shutting down the existing client")
bc.shutdown()
}
bc, err = startBenchmarkClient(t.Setup)
Expand Down
2 changes: 1 addition & 1 deletion binarylog/binarylog_end2end_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ func (s *testBinLogSink) Write(e *binlogpb.GrpcLogEntry) error {

func (s *testBinLogSink) Close() error { return nil }

// Returns all client entris if client is true, otherwise return all server
// Returns all client entries if client is true, otherwise return all server
// entries.
func (s *testBinLogSink) logEntries(client bool) []*binlogpb.GrpcLogEntry {
logger := binlogpb.GrpcLogEntry_LOGGER_SERVER
Expand Down
2 changes: 1 addition & 1 deletion clientconn.go
Original file line number Diff line number Diff line change
Expand Up @@ -1576,7 +1576,7 @@ func (ac *addrConn) tearDown(err error) {
} else {
// Hard close the transport when the channel is entering idle or is
// being shutdown. In the case where the channel is being shutdown,
// closing of transports is also taken care of by cancelation of cc.ctx.
// closing of transports is also taken care of by cancellation of cc.ctx.
// But in the case where the channel is entering idle, we need to
// explicitly close the transports here. Instead of distinguishing
// between these two cases, it is simpler to close the transport
Expand Down
2 changes: 1 addition & 1 deletion clientconn_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -642,7 +642,7 @@ func (s) TestConnectParamsWithMinConnectTimeout(t *testing.T) {
defer conn.Close()

if got := conn.dopts.minConnectTimeout(); got != mct {
t.Errorf("unexpect minConnectTimeout on the connection: %v, want %v", got, mct)
t.Errorf("unexpected minConnectTimeout on the connection: %v, want %v", got, mct)
}
}

Expand Down
2 changes: 1 addition & 1 deletion credentials/alts/internal/conn/aeadrekey.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ func (k KeySizeError) Error() string {

// newRekeyAEAD creates a new instance of aes128gcm with rekeying.
// The key argument should be 44 bytes, the first 32 bytes are used as a key
// for HKDF-expand and the remainining 12 bytes are used as a random mask for
// for HKDF-expand and the remaining 12 bytes are used as a random mask for
// the counter.
func newRekeyAEAD(key []byte) (*rekeyAEAD, error) {
k := len(key)
Expand Down
2 changes: 1 addition & 1 deletion credentials/alts/internal/conn/aes128gcmrekey.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ type aes128gcmRekey struct {

// NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying
// for ALTS record. The key argument should be 44 bytes, the first 32 bytes
// are used as a key for HKDF-expand and the remainining 12 bytes are used
// are used as a key for HKDF-expand and the remaining 12 bytes are used
// as a random mask for the counter.
func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) {
inCounter := NewInCounter(side, overflowLenAES128GCMRekey)
Expand Down
2 changes: 1 addition & 1 deletion credentials/alts/internal/conn/record_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ func testWriteLargeData(t *testing.T, rp string) {
// buffer size.
clientConn, serverConn := newConnPair(rp, nil, nil)
// Message size is intentionally chosen to not be multiple of
// payloadLengthLimtit.
// payloadLengthLimit.
msgSize := altsWriteBufferMaxSize + (100 * 1024)
clientMsg := make([]byte, msgSize)
for i := 0; i < msgSize; i++ {
Expand Down
2 changes: 1 addition & 1 deletion credentials/local/local.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ func (c *localTC) Clone() credentials.TransportCredentials {
}

// OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server.
// Since this feature is specific to TLS (SNI + hostname verification check), it does not take any effet for local credentials.
// Since this feature is specific to TLS (SNI + hostname verification check), it does not take any effect for local credentials.
func (c *localTC) OverrideServerName(serverNameOverride string) error {
c.info.ServerName = serverNameOverride
return nil
Expand Down
2 changes: 1 addition & 1 deletion credentials/sts/sts.go
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,7 @@ type requestParameters struct {
ActorTokenType string `json:"actor_token_type,omitempty"`
}

// nesponseParameters stores all attributes sent as JSON in a successful STS
// responseParameters stores all attributes sent as JSON in a successful STS
// response. These attributes are defined in
// https://tools.ietf.org/html/rfc8693#section-2.2.1.
type responseParameters struct {
Expand Down
2 changes: 1 addition & 1 deletion examples/examples_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ for example in ${EXAMPLES[@]}; do
$(cat $CLIENT_LOG)
"
else
pass "client successfully communitcated with server"
pass "client successfully communicated with server"
fi

# Check server log for expected output if expecting an
Expand Down
4 changes: 2 additions & 2 deletions examples/features/load_balancing/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@ this is examples/load_balancing (from :50051)

The second client is configured to use `round_robin`. `round_robin` connects to
all the addresses it sees, and sends an RPC to each backend one at a time in
order. E.g. the first RPC will be sent to backend-1, the second RPC will be be
sent to backend-2, and the third RPC will be be sent to backend-1 again.
order. E.g. the first RPC will be sent to backend-1, the second RPC will be
sent to backend-2, and the third RPC will be sent to backend-1 again.

```
this is examples/load_balancing (from :50051)
Expand Down
2 changes: 1 addition & 1 deletion examples/features/multiplex/client/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ func main() {

fmt.Println()
fmt.Println("--- calling routeguide.RouteGuide/GetFeature ---")
// Make a routeguild client with the same ClientConn.
// Make a routeguide client with the same ClientConn.
rgc := ecpb.NewEchoClient(conn)
callUnaryEcho(rgc, "this is examples/multiplex")
}
2 changes: 1 addition & 1 deletion examples/features/orca/client/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ func main() {
ticker := time.NewTicker(time.Second)
for range ticker.C {
func() {
// Use an anonymous function to ensure context cancelation via defer.
// Use an anonymous function to ensure context cancellation via defer.
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
if _, err := c.UnaryEcho(ctx, &pb.EchoRequest{Message: "test echo message"}); err != nil {
Expand Down
2 changes: 1 addition & 1 deletion gcp/observability/logging.go
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ type binaryMethodLogger struct {
clientSide bool
}

// buildGCPLoggingEntry converts the binary log log entry into a gcp logging
// buildGCPLoggingEntry converts the binary log entry into a gcp logging
// entry.
func (bml *binaryMethodLogger) buildGCPLoggingEntry(ctx context.Context, c iblog.LogEntryConfig) gcplogging.Entry {
binLogEntry := bml.mlb.Build(c)
Expand Down
2 changes: 1 addition & 1 deletion gcp/observability/logging_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -548,7 +548,7 @@ func (s) TestServerRPCEventsLogAll(t *testing.T) {
// Client and Server RPC Events configured to log. Both sides should log and
// share the exporter, so the exporter should receive the collective amount of
// calls for both a client stream (corresponding to a Client RPC Event) and a
// server stream (corresponding ot a Server RPC Event). The specificity of the
// server stream (corresponding to a Server RPC Event). The specificity of the
// entries are tested in previous tests.
func (s) TestBothClientAndServerRPCEvents(t *testing.T) {
fle := &fakeLoggingExporter{
Expand Down
2 changes: 1 addition & 1 deletion gcp/observability/observability_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ func (s) TestRefuseStartWithInvalidPatterns(t *testing.T) {
}
}

// TestRefuseStartWithExcludeAndWildCardAll tests the sceanrio where an
// TestRefuseStartWithExcludeAndWildCardAll tests the scenario where an
// observability configuration is provided with client RPC event specifying to
// exclude, and which matches on the '*' wildcard (any). This should cause an
// error when trying to start the observability system.
Expand Down
2 changes: 1 addition & 1 deletion grpclog/grpclog.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ func Fatalf(format string, args ...any) {
}

// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
// It calle os.Exit()) with exit code 1.
// It calls os.Exit() with exit code 1.
func Fatalln(args ...any) {
grpclog.Logger.Fatalln(args...)
// Make sure fatal logs will exit.
Expand Down
4 changes: 2 additions & 2 deletions internal/balancer/gracefulswitch/gracefulswitch_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -604,7 +604,7 @@ func (s) TestPendingReplacedByAnotherPending(t *testing.T) {
if err != nil {
t.Fatalf("error constructing newSubConn in gsb: %v", err)
}
// This picker never returns an error, which can help this this test verify
// This picker never returns an error, which can help this test verify
// whether this cached state will get cleared on a new pending balancer
// (will replace it with a picker that always errors).
pendBal.updateState(balancer.State{
Expand Down Expand Up @@ -672,7 +672,7 @@ func (p *neverErrPicker) Pick(info balancer.PickInfo) (balancer.PickResult, erro

// TestUpdateSubConnStateRace tests the race condition when the graceful switch
// load balancer receives a SubConnUpdate concurrently with an UpdateState()
// call, which can cause the balancer to forward the update to to be closed and
// call, which can cause the balancer to forward the update to be closed and
// cleared. The balancer API guarantees to never call any method the balancer
// after a Close() call, and the test verifies that doesn't happen within the
// graceful switch load balancer.
Expand Down
Loading

0 comments on commit 108e3d9

Please sign in to comment.