diff --git a/CHANGELOG.md b/CHANGELOG.md index 739732ae27..3af0f7626f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -400,7 +400,7 @@ **Merged pull requests:** - Cherrypick \#1105 into Release-1.9 [\#1121](https://github.com/kubernetes/ingress-gce/pull/1121) ([freehan](https://github.com/freehan)) -- Cherrpick \#1119 \[Force send Enable field for LogConfig\] into release-1.9 [\#1120](https://github.com/kubernetes/ingress-gce/pull/1120) ([skmatti](https://github.com/skmatti)) +- Cherrypick \#1119 \[Force send Enable field for LogConfig\] into release-1.9 [\#1120](https://github.com/kubernetes/ingress-gce/pull/1120) ([skmatti](https://github.com/skmatti)) - Update generated code for BackendConfig in release-1.9 [\#1115](https://github.com/kubernetes/ingress-gce/pull/1115) ([skmatti](https://github.com/skmatti)) - Cherry Pick \#1107 \[Wait for caches to sync before running node sync\] to release-1.9 [\#1114](https://github.com/kubernetes/ingress-gce/pull/1114) ([spencerhance](https://github.com/spencerhance)) - Cherry Pick \#1104 \[Change project permissions check from 'foo' to 'k8s-ingress-svc-acct-...\] to release-1.9… [\#1113](https://github.com/kubernetes/ingress-gce/pull/1113) ([spencerhance](https://github.com/spencerhance)) @@ -649,7 +649,7 @@ - Fix CreateILBSubnet\(\) logic in e2e tests [\#885](https://github.com/kubernetes/ingress-gce/pull/885) ([spencerhance](https://github.com/spencerhance)) - Fix Ingress names for ILB e2e update test [\#884](https://github.com/kubernetes/ingress-gce/pull/884) ([spencerhance](https://github.com/spencerhance)) - Refactor ingress key function and finalizer into separate package [\#883](https://github.com/kubernetes/ingress-gce/pull/883) ([skmatti](https://github.com/skmatti)) -- deploy csm neg scirpt and yaml [\#882](https://github.com/kubernetes/ingress-gce/pull/882) ([cadmuxe](https://github.com/cadmuxe)) +- deploy csm neg script and yaml [\#882](https://github.com/kubernetes/ingress-gce/pull/882) ([cadmuxe](https://github.com/cadmuxe)) - BugFix: Update ingress key function used for GC [\#881](https://github.com/kubernetes/ingress-gce/pull/881) ([skmatti](https://github.com/skmatti)) - Fix basic ilb test service name [\#880](https://github.com/kubernetes/ingress-gce/pull/880) ([spencerhance](https://github.com/spencerhance)) - Fix backend services whitebox test to take into account the default backend [\#878](https://github.com/kubernetes/ingress-gce/pull/878) ([rramkumar1](https://github.com/rramkumar1)) @@ -709,7 +709,7 @@ **Fixed bugs:** - NEG controller should create NEG for default backend when enabled [\#767](https://github.com/kubernetes/ingress-gce/issues/767) -- Removing Node Pool from Cluster Breaks Ingress Conroller [\#649](https://github.com/kubernetes/ingress-gce/issues/649) +- Removing Node Pool from Cluster Breaks Ingress Controller [\#649](https://github.com/kubernetes/ingress-gce/issues/649) - Ingress controller should react to node scale down event from autoscaler [\#595](https://github.com/kubernetes/ingress-gce/issues/595) - BackendConfig OpenAPI spec [\#563](https://github.com/kubernetes/ingress-gce/issues/563) @@ -811,7 +811,7 @@ **Closed issues:** -- HTTS frontend listener isn't deleted after setting ingress.allow-http: "false" annotation [\#766](https://github.com/kubernetes/ingress-gce/issues/766) +- HTTPS frontend listener isn't deleted after setting ingress.allow-http: "false" annotation [\#766](https://github.com/kubernetes/ingress-gce/issues/766) - Backends healthchecks and expected operation [\#762](https://github.com/kubernetes/ingress-gce/issues/762) - Update GKE self managed script [\#758](https://github.com/kubernetes/ingress-gce/issues/758) - Deploying to GKE self managed has invalid YAML [\#755](https://github.com/kubernetes/ingress-gce/issues/755) @@ -832,7 +832,7 @@ - Rebase of \#559 "Scaffolding for FrontendConfig" [\#753](https://github.com/kubernetes/ingress-gce/pull/753) ([spencerhance](https://github.com/spencerhance)) - Emit event if Ingress spec does not contain valid config to setup frontend resources [\#752](https://github.com/kubernetes/ingress-gce/pull/752) ([rramkumar1](https://github.com/rramkumar1)) - readiness reflector [\#748](https://github.com/kubernetes/ingress-gce/pull/748) ([freehan](https://github.com/freehan)) -- Adding the /healthz handler to the 404-default-server-with-metris to … [\#747](https://github.com/kubernetes/ingress-gce/pull/747) ([vbannai](https://github.com/vbannai)) +- Adding the /healthz handler to the 404-default-server-with-metrics to … [\#747](https://github.com/kubernetes/ingress-gce/pull/747) ([vbannai](https://github.com/vbannai)) - Update canonical rbac.yaml with latest, minimal bindings [\#746](https://github.com/kubernetes/ingress-gce/pull/746) ([dekkagaijin](https://github.com/dekkagaijin)) - Adding docker configuration file for the 404-server-with-metrics and … [\#745](https://github.com/kubernetes/ingress-gce/pull/745) ([vbannai](https://github.com/vbannai)) - More composite types [\#742](https://github.com/kubernetes/ingress-gce/pull/742) ([spencerhance](https://github.com/spencerhance)) @@ -844,7 +844,7 @@ **Implemented enhancements:** - Link for the example on deploying ingress controller is not valid [\#686](https://github.com/kubernetes/ingress-gce/issues/686) -- If readiness probe is on port different than the service \(app\) port - ingress failes to sync the correct healthcheck [\#647](https://github.com/kubernetes/ingress-gce/issues/647) +- If readiness probe is on port different than the service \(app\) port - ingress fails to sync the correct healthcheck [\#647](https://github.com/kubernetes/ingress-gce/issues/647) **Fixed bugs:** @@ -931,11 +931,11 @@ - GKE BackendConfig permissions change `container.backendConfigs.get` does not exist [\#538](https://github.com/kubernetes/ingress-gce/issues/538) - A new home for 404-server \(defaultbackend\) [\#498](https://github.com/kubernetes/ingress-gce/issues/498) - Does not work if workers are in different subnet. [\#282](https://github.com/kubernetes/ingress-gce/issues/282) -- original http request origin and host headers are overriden [\#179](https://github.com/kubernetes/ingress-gce/issues/179) +- original http request origin and host headers are overridden [\#179](https://github.com/kubernetes/ingress-gce/issues/179) **Merged pull requests:** -- Modify NameBelongToCluter to tolerate truncated cluster name suffix [\#650](https://github.com/kubernetes/ingress-gce/pull/650) ([freehan](https://github.com/freehan)) +- Modify NameBelongToCluster to tolerate truncated cluster name suffix [\#650](https://github.com/kubernetes/ingress-gce/pull/650) ([freehan](https://github.com/freehan)) - Shorten the name of the namespace for test sandboxes [\#648](https://github.com/kubernetes/ingress-gce/pull/648) ([rramkumar1](https://github.com/rramkumar1)) - Move lone function in kubeapi.go into existing utils.go [\#644](https://github.com/kubernetes/ingress-gce/pull/644) ([rramkumar1](https://github.com/rramkumar1)) - Update CHANGELOG and version mapping for v1.4.3 [\#643](https://github.com/kubernetes/ingress-gce/pull/643) ([rramkumar1](https://github.com/rramkumar1)) @@ -988,7 +988,7 @@ - Add pkg/common/operator & pkg/common/typed to make resource joins much cleaner. [\#517](https://github.com/kubernetes/ingress-gce/pull/517) ([rramkumar1](https://github.com/rramkumar1)) - Add Syncer Skeleton [\#509](https://github.com/kubernetes/ingress-gce/pull/509) ([freehan](https://github.com/freehan)) - Welcome defaultbackend to the ingress-gce repo [\#503](https://github.com/kubernetes/ingress-gce/pull/503) ([jonpulsifer](https://github.com/jonpulsifer)) -- Add a Backofff Handler utils [\#499](https://github.com/kubernetes/ingress-gce/pull/499) ([freehan](https://github.com/freehan)) +- Add a Backoff Handler utils [\#499](https://github.com/kubernetes/ingress-gce/pull/499) ([freehan](https://github.com/freehan)) # Change Log @@ -999,7 +999,7 @@ - GCP - Kubernetes Ingress Backend service unhealthy [\#621](https://github.com/kubernetes/ingress-gce/issues/621) - BackendConfig security policy not enforced [\#616](https://github.com/kubernetes/ingress-gce/issues/616) -- original http request origin and host headers are overriden [\#179](https://github.com/kubernetes/ingress-gce/issues/179) +- original http request origin and host headers are overridden [\#179](https://github.com/kubernetes/ingress-gce/issues/179) **Merged pull requests:** @@ -1133,7 +1133,7 @@ **Closed issues:** -- GCE ingress stucks on "Creating ingress" status, existing ingresses don't update [\#470](https://github.com/kubernetes/ingress-gce/issues/470) +- GCE ingress stuck on "Creating ingress" status, existing ingresses don't update [\#470](https://github.com/kubernetes/ingress-gce/issues/470) - Issue with multiple domains and SSL certificates when using ingress-gce [\#466](https://github.com/kubernetes/ingress-gce/issues/466) **Merged pull requests:** @@ -1251,7 +1251,7 @@ - Documentation fixes [\#394](https://github.com/kubernetes/ingress-gce/pull/394) ([rramkumar1](https://github.com/rramkumar1)) - Implement security policy validator for real [\#393](https://github.com/kubernetes/ingress-gce/pull/393) ([MrHohn](https://github.com/MrHohn)) - promote http2 to beta [\#382](https://github.com/kubernetes/ingress-gce/pull/382) ([agau4779](https://github.com/agau4779)) -- Typo in message: SyncNetworkEndpiontGroupFailed-\>SyncNetworkEndpointGroupFailed [\#374](https://github.com/kubernetes/ingress-gce/pull/374) ([AdamDang](https://github.com/AdamDang)) +- Typo in message: SyncNetworkEndpointGroupFailed-\>SyncNetworkEndpointGroupFailed [\#374](https://github.com/kubernetes/ingress-gce/pull/374) ([AdamDang](https://github.com/AdamDang)) - URLMap sync [\#356](https://github.com/kubernetes/ingress-gce/pull/356) ([nicksardo](https://github.com/nicksardo)) ## [v1.2.2](https://github.com/kubernetes/ingress-gce/tree/v1.2.2) (2018-07-09) @@ -1307,7 +1307,7 @@ - Fix WaitForGCLBDeletion\(\) callers [\#371](https://github.com/kubernetes/ingress-gce/pull/371) ([MrHohn](https://github.com/MrHohn)) - Update deploy script to edit copy of default backend service yaml [\#368](https://github.com/kubernetes/ingress-gce/pull/368) ([rramkumar1](https://github.com/rramkumar1)) - Add simple e2e test for CDN & IAP [\#367](https://github.com/kubernetes/ingress-gce/pull/367) ([rramkumar1](https://github.com/rramkumar1)) -- Swtich to use beta HealthCheck for NEG [\#366](https://github.com/kubernetes/ingress-gce/pull/366) ([freehan](https://github.com/freehan)) +- Switch to use beta HealthCheck for NEG [\#366](https://github.com/kubernetes/ingress-gce/pull/366) ([freehan](https://github.com/freehan)) - Fix order-dependency in test cases [\#364](https://github.com/kubernetes/ingress-gce/pull/364) ([anfernee](https://github.com/anfernee)) - Revendor GCE go client, cloud provider and fixes to make it work [\#362](https://github.com/kubernetes/ingress-gce/pull/362) ([freehan](https://github.com/freehan)) - Fix missing gcloud command in e2e script [\#361](https://github.com/kubernetes/ingress-gce/pull/361) ([bowei](https://github.com/bowei)) @@ -1322,7 +1322,7 @@ - merge Ingress NEG annotation and Expose NEG annotation [\#350](https://github.com/kubernetes/ingress-gce/pull/350) ([agau4779](https://github.com/agau4779)) - Add Liveness Probe for NEG controller [\#349](https://github.com/kubernetes/ingress-gce/pull/349) ([freehan](https://github.com/freehan)) - Make sure we get a BackendService after updating it to populate object fingerprint \[WIP\] [\#348](https://github.com/kubernetes/ingress-gce/pull/348) ([rramkumar1](https://github.com/rramkumar1)) -- On removal of backend config name from service annotaion, ensure no existing settings are affected [\#347](https://github.com/kubernetes/ingress-gce/pull/347) ([rramkumar1](https://github.com/rramkumar1)) +- On removal of backend config name from service annotation, ensure no existing settings are affected [\#347](https://github.com/kubernetes/ingress-gce/pull/347) ([rramkumar1](https://github.com/rramkumar1)) - Adds readme for e2e-tests [\#346](https://github.com/kubernetes/ingress-gce/pull/346) ([bowei](https://github.com/bowei)) - Modify IAP + CDN support to not touch settings if section in spec is missing [\#345](https://github.com/kubernetes/ingress-gce/pull/345) ([rramkumar1](https://github.com/rramkumar1)) - Delete ingress and wait for resource deletion [\#344](https://github.com/kubernetes/ingress-gce/pull/344) ([bowei](https://github.com/bowei)) diff --git a/cmd/404-server-with-metrics/README.md b/cmd/404-server-with-metrics/README.md index 440dd32c94..1fbae6a724 100644 --- a/cmd/404-server-with-metrics/README.md +++ b/cmd/404-server-with-metrics/README.md @@ -95,7 +95,7 @@ ab -n 10000000 -c 10000 http://localhost:8080/page.html > /tmp/ab_get_test.log & * Results * default MAXGOPROCS = 12 - * around 1K requests/sec each (GET and POST)over 2m (combine througput of 2K requests/sec) + * around 1K requests/sec each (GET and POST)over 2m (combine throughput of 2K requests/sec) * max go routines peaked to 17K and oscillates between 5K to 15K * server over 20M GET + POST requests @@ -248,7 +248,7 @@ ab -n 10000000 -c 1000 http://localhost:8080/page.html > /tmp/ab_get_test.log & * Results * default MAXGOPROCS = 12 - * around 18K requests/sec each (GET and POST) over 20m requests (combine througput of 36K requests/sec) + * around 18K requests/sec each (GET and POST) over 20m requests (combine throughput of 36K requests/sec) * max go routines peaked to 1.5K and oscillates between 500 to 1.5K * server over 20M GET requests * http processing delay over 1m, low: 0.075ms, avg: 0.125ms and max: 0.2ms diff --git a/cmd/404-server-with-metrics/server-with-metrics.go b/cmd/404-server-with-metrics/server-with-metrics.go index 617e2b0a11..fa50c32106 100644 --- a/cmd/404-server-with-metrics/server-with-metrics.go +++ b/cmd/404-server-with-metrics/server-with-metrics.go @@ -61,7 +61,7 @@ func main() { server.registerHandlers() klog.Infof("Default 404 server is running with GOMAXPROCS(%d) on %s:%d\n", runtime.GOMAXPROCS(-1), hostName, *port) - // The main http server for handling NotFound and healthzrequests + // The main http server for handling NotFound and healthz requests go func() { err := server.httpServer.ListenAndServe() if err != nil { @@ -237,10 +237,10 @@ func (s *server) notFoundHandler() http.HandlerFunc { path := r.URL.Path w.WriteHeader(http.StatusNotFound) // we log 1 out of 10 requests (by default) to the logs - fmt.Fprintf(w, "response 404 (backend NotFound), service rules for the path non-existent \n") + fmt.Fprintf(w, "response 404 (backend NotFound), service rules for the path nonexistent \n") s.idleChannel <- true if rand.Float64() < *logSampleRequests { - klog.Infof("response 404 (backend NotFound), service rules for [ %s ] non-existent \n", path) + klog.Infof("response 404 (backend NotFound), service rules for [ %s ] nonexistent \n", path) } } } diff --git a/cmd/e2e-test/asm_test.go b/cmd/e2e-test/asm_test.go index 439580049b..1e1d671d4d 100644 --- a/cmd/e2e-test/asm_test.go +++ b/cmd/e2e-test/asm_test.go @@ -41,9 +41,9 @@ func TestASMConfig(t *testing.T) { }, { desc: "Invalid ConfigMap filed equals to disable", - configMap: map[string]string{"enable-unknow-feild": "INVALID1"}, + configMap: map[string]string{"enable-unknow-field": "INVALID1"}, wantASMReady: false, - wantConfigMapEvents: []string{"The map contains a unknown key-value pair: enable-unknow-feild:INVALID1"}, + wantConfigMapEvents: []string{"The map contains a unknown key-value pair: enable-unknow-field:INVALID1"}, }, { desc: "Set enable-asm to true should restart the controller", @@ -124,14 +124,14 @@ func TestASMServiceAndDestinationRule(t *testing.T) { // Different versions will be used as DestinationRule: subset for _, deployment := range []struct { deploymentName string - replics int32 + replicas int32 version string }{ - {deploymentName: "deployment-v1", replics: 1, version: "v1"}, - {deploymentName: "deployment-v2", replics: 2, version: "v2"}, - {deploymentName: "deployment-v3", replics: 3, version: "v3"}, + {deploymentName: "deployment-v1", replicas: 1, version: "v1"}, + {deploymentName: "deployment-v2", replicas: 2, version: "v2"}, + {deploymentName: "deployment-v3", replicas: 3, version: "v3"}, } { - if err := e2e.CreatePorterDeployment(s, deployment.deploymentName, deployment.replics, deployment.version); err != nil { + if err := e2e.CreatePorterDeployment(s, deployment.deploymentName, deployment.replicas, deployment.version); err != nil { t.Errorf("Failed to create deployment, Error: %s", err) } } @@ -163,7 +163,7 @@ func TestASMServiceAndDestinationRule(t *testing.T) { } if svc.inSkipNamespace { if negStatus != nil { - t.Errorf("Service: %s/%s is in the ASM skip namespace, shoudln't have NEG Status. ASM Config: %v, NEGStatus got: %v", + t.Errorf("Service: %s/%s is in the ASM skip namespace, shouldn't have NEG Status. ASM Config: %v, NEGStatus got: %v", sandbox.Namespace, svc.svcName, asmConfig, negStatus) } } else { @@ -190,7 +190,7 @@ func TestASMServiceAndDestinationRule(t *testing.T) { t.Run(tc.desc, func(t *testing.T) { sandbox := s drHost := svcName - // crossNamespace will test DestinationRules that refering a serive located in a different namespace + // crossNamespace will test DestinationRules that referring a service located in a different namespace if tc.crossNamespace { sandbox = sSkip drHost = fmt.Sprintf("%s.%s.svc.cluster.local", svcName, s.Namespace) diff --git a/cmd/e2e-test/backend_config_test.go b/cmd/e2e-test/backend_config_test.go index a0ca19282e..40dbe219bc 100644 --- a/cmd/e2e-test/backend_config_test.go +++ b/cmd/e2e-test/backend_config_test.go @@ -185,7 +185,7 @@ func TestBackendConfigAPI(t *testing.T) { } v1beta1BackendConfig := &backendconfigv1beta1.BackendConfig{} if err := json.Unmarshal(bcData, v1beta1BackendConfig); err != nil { - t.Fatalf("Failed to unmarshall backendconfig %s into v1beta1: %v", bcKey, err) + t.Fatalf("Failed to unmarshal backendconfig %s into v1beta1: %v", bcKey, err) } // Create BackendConfig using v1 API and retrieve it using v1beta1 API. diff --git a/cmd/e2e-test/basic_test.go b/cmd/e2e-test/basic_test.go index 3ac8368f08..3949c6abd8 100644 --- a/cmd/e2e-test/basic_test.go +++ b/cmd/e2e-test/basic_test.go @@ -89,7 +89,7 @@ func testBasicOS(t *testing.T, os e2e.OS) { if err != nil { t.Fatalf("error waiting for Ingress to stabilize: %v", err) } - t.Logf("GCLB resources createdd (%s/%s)", s.Namespace, tc.ing.Name) + t.Logf("GCLB resources created (%s/%s)", s.Namespace, tc.ing.Name) // Perform whitebox testing. gclb, err := e2e.WhiteboxTest(ing, nil, Framework.Cloud, "", s) @@ -200,7 +200,7 @@ func TestEdge(t *testing.T) { if err != nil { t.Fatalf("error waiting for Ingress to stabilize: %v", err) } - t.Logf("GCLB resources createdd (%s/%s)", s.Namespace, tc.ing.Name) + t.Logf("GCLB resources created (%s/%s)", s.Namespace, tc.ing.Name) // Perform whitebox testing. gclb, err := e2e.WhiteboxTest(ing, nil, Framework.Cloud, "", s) diff --git a/cmd/e2e-test/draining_test.go b/cmd/e2e-test/draining_test.go index bf623e2ea8..73e8a7b113 100644 --- a/cmd/e2e-test/draining_test.go +++ b/cmd/e2e-test/draining_test.go @@ -35,8 +35,8 @@ import ( ) const ( - drainingTransitionPollTimeout = 15 * time.Minute - drainingTansitionPollInterval = 30 * time.Second + drainingTransitionPollTimeout = 15 * time.Minute + drainingTransitionPollInterval = 30 * time.Second ) func TestDraining(t *testing.T) { @@ -134,7 +134,7 @@ func TestDraining(t *testing.T) { t.Errorf("Failed to update BackendConfig ConnectionDraining settings for %s: %v", t.Name(), err) } - if err := wait.Poll(drainingTansitionPollInterval, drainingTransitionPollTimeout, func() (bool, error) { + if err := wait.Poll(drainingTransitionPollInterval, drainingTransitionPollTimeout, func() (bool, error) { params := &fuzz.GCLBForVIPParams{VIP: vip, Validators: fuzz.FeatureValidators(features.All)} gclb, err = fuzz.GCLBForVIP(context.Background(), Framework.Cloud, params) if err != nil { diff --git a/cmd/e2e-test/ilb_test.go b/cmd/e2e-test/ilb_test.go index fbea80eb19..9bb5cb6866 100644 --- a/cmd/e2e-test/ilb_test.go +++ b/cmd/e2e-test/ilb_test.go @@ -110,7 +110,7 @@ func TestILB(t *testing.T) { if err != nil { t.Fatalf("error waiting for Ingress to stabilize: %v", err) } - t.Logf("GCLB resources createdd (%s/%s)", s.Namespace, tc.ing.Name) + t.Logf("GCLB resources created (%s/%s)", s.Namespace, tc.ing.Name) // Perform whitebox testing. if len(ing.Status.LoadBalancer.Ingress) < 1 { @@ -335,7 +335,7 @@ func TestILBHttps(t *testing.T) { if err != nil { t.Fatalf("error waiting for Ingress to stabilize: %v", err) } - t.Logf("GCLB resources createdd (%s/%s)", s.Namespace, ing.Name) + t.Logf("GCLB resources created (%s/%s)", s.Namespace, ing.Name) // Perform whitebox testing. if len(ing.Status.LoadBalancer.Ingress) < 1 { @@ -476,7 +476,7 @@ func TestILBUpdate(t *testing.T) { if err != nil { t.Fatalf("error waiting for Ingress to stabilize: %v", err) } - t.Logf("GCLB resources createdd (%s/%s)", s.Namespace, tc.ing.Name) + t.Logf("GCLB resources created (%s/%s)", s.Namespace, tc.ing.Name) // Perform whitebox testing. if len(ing.Status.LoadBalancer.Ingress) < 1 { @@ -511,7 +511,7 @@ func TestILBUpdate(t *testing.T) { if err != nil { t.Fatalf("error waiting for Ingress to stabilize: %v", err) } - t.Logf("GCLB resources createdd (%s/%s)", s.Namespace, tc.ingUpdate.Name) + t.Logf("GCLB resources created (%s/%s)", s.Namespace, tc.ingUpdate.Name) // Perform whitebox testing. if len(ing.Status.LoadBalancer.Ingress) < 1 { @@ -703,7 +703,7 @@ func TestILBShared(t *testing.T) { if err != nil { t.Fatalf("error waiting for Ingress to stabilize: %v", err) } - t.Logf("GCLB resources createdd (%s/%s)", s.Namespace, ing.Name) + t.Logf("GCLB resources created (%s/%s)", s.Namespace, ing.Name) // Perform whitebox testing. if len(ing.Status.LoadBalancer.Ingress) < 1 { diff --git a/cmd/e2e-test/neg_test.go b/cmd/e2e-test/neg_test.go index 19f9285f06..ab6cc3394b 100644 --- a/cmd/e2e-test/neg_test.go +++ b/cmd/e2e-test/neg_test.go @@ -359,7 +359,7 @@ func TestNEGSyncEndpoints(t *testing.T) { // This test rescales test backend and validate if NEG controller is able to handle it correctly. // Following validation is performed: // 1. validate if expected number of network endpoint is in NEGs - // 2. validate if the newtork endpoint is healthy + // 2. validate if the network endpoint is healthy // 3. validate by sending traffic to LB VIP and check if expected number of backends can be reached. // First scale up the pods to 5 replicas to try to cover all zones where the cluster spans. scaleAndValidate(5) @@ -739,7 +739,7 @@ func TestNegDisruptive(t *testing.T) { // are temporary and can be ignored foundEvents, err := e2e.CheckSvcEvents(s, serviceName, v1.EventTypeWarning, "error processing service", "is shutting down", "not found") if err != nil { - t.Fatalf("errored quering for service events: %q", err) + t.Fatalf("errored querying for service events: %q", err) } if foundEvents { t.Fatalf("found error events when none were expected") diff --git a/cmd/e2e-test/upgrade/v2frontendnamer.go b/cmd/e2e-test/upgrade/v2frontendnamer.go index 2cc68fff6b..db92223585 100644 --- a/cmd/e2e-test/upgrade/v2frontendnamer.go +++ b/cmd/e2e-test/upgrade/v2frontendnamer.go @@ -38,7 +38,7 @@ type V2FrontendNamer struct { // NewV2FrontendNamerTest returns upgrade test for v2 frontend namer. // This test asserts that v1 finalizer is retained/attached and ingress continues -// to use v1 naming naming scheme when master is upgraded to a gke version that use v1.8. +// to use v1 naming scheme when master is upgraded to a gke version that use v1.8. func NewV2FrontendNamerTest() e2e.UpgradeTest { return &V2FrontendNamer{} } diff --git a/cmd/fuzzer/README.md b/cmd/fuzzer/README.md index 3cd8613eb5..0317990801 100644 --- a/cmd/fuzzer/README.md +++ b/cmd/fuzzer/README.md @@ -3,7 +3,7 @@ ## validate `fuzzer validate` will validate the Ingress spec against the load balancer that -was instatiated with the given spec. +was instantiated with the given spec. Usage: diff --git a/docs/contrib/cluster-setup.md b/docs/contrib/cluster-setup.md index 1c572f228a..933bea9147 100644 --- a/docs/contrib/cluster-setup.md +++ b/docs/contrib/cluster-setup.md @@ -3,7 +3,7 @@ This doc outlines the steps needed to run Ingress-GCE binary locally: * Setup of a dev GKE cluster. * Authorization configuration to it. -* Buldinging and running the Ingress-GCE bibnary locally. +* Building and running the Ingress-GCE binary locally. ## Create the cluster @@ -16,7 +16,7 @@ Once the cluster is ready you need to disable HTTP Load Balancing. $ gcloud container clusters update CLUSTER_NAME --update-addons=HttpLoadBalancing=DISABLED ``` You can also do this from the Cloud Console. -The HTTP Load Balancing option is avaialbe under Networking section. +The HTTP Load Balancing option is available under Networking section. ## Authorize gcloud and kubectl diff --git a/docs/contrib/dev-setup.md b/docs/contrib/dev-setup.md index 9c4ae89983..a5569981c4 100644 --- a/docs/contrib/dev-setup.md +++ b/docs/contrib/dev-setup.md @@ -5,7 +5,7 @@ The below guide assumes you have installed the necessary binaries to run Golang. ## Get the code -We suggest to follow the steps [here](https://github.com/kubernetes/community/blob/master/contributors/guide/github-workflow.md) to get your Github workflow started. +We suggest to follow the steps [here](https://github.com/kubernetes/community/blob/master/contributors/guide/github-workflow.md) to get your GitHub workflow started. Those steps reference the main Kubernetes repository but the same steps apply for this repository. ## Unit tests diff --git a/docs/deploy/gke/README.md b/docs/deploy/gke/README.md index bfcf15c87b..4837f565d9 100644 --- a/docs/deploy/gke/README.md +++ b/docs/deploy/gke/README.md @@ -76,7 +76,7 @@ Here is an explanation of each script dependency. deployment and service. This is no different than what you are used to seeing in your cluster. In this case, we need to recreate the default backend since turning off the GLBC on the master removes it. Note that we - modify the file to use the same node port as before before we create the resource. + modify the file to use the same node port as before we create the resource. 3. [rbac.yaml](../resources/rbac.yaml) * This file contains specification for an RBAC role which gives the GLBC access to the resources it needs from the k8s API server. diff --git a/docs/deploy/gke/csm/README.md b/docs/deploy/gke/csm/README.md index 26c11de149..97e8bcf9a3 100644 --- a/docs/deploy/gke/csm/README.md +++ b/docs/deploy/gke/csm/README.md @@ -7,11 +7,11 @@ This document will deploy the self managed Ingress-GCE controller in CSM(Cloud S The cluster should satisfy the following restrictions: * GKE version 1.14+ - * [IP Alias](https://cloud.google.com/kubernetes-engine/docs/how-to/alias-ips) eanbeld + * [IP Alias](https://cloud.google.com/kubernetes-engine/docs/how-to/alias-ips) enabled * Default Ingress-GCE Controller disabled. * [GKE Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) enabled. -## [Option 1] Ceate a new Cluster +## [Option 1] Create a new Cluster ```sh gcloud container clusters create $CLUSTER --enable-ip-alias --cluster-version 1.14 \ diff --git a/docs/deploy/gke/gke-self-managed.sh b/docs/deploy/gke/gke-self-managed.sh index 9846735a9b..b42d5fec72 100755 --- a/docs/deploy/gke/gke-self-managed.sh +++ b/docs/deploy/gke/gke-self-managed.sh @@ -211,7 +211,7 @@ case $key in --no-confirm) CONFIRM=0 - # --quiet flag makes gloud prompts non-interactive, ensuring this script can be + # --quiet flag makes gcloud prompts non-interactive, ensuring this script can be # used in automated flows (user can also use this to provide extra, arbitrary flags). GCLOUD_EXTRA_FLAGS="${GCLOUD_EXTRA_FLAGS} --quiet" shift @@ -304,7 +304,7 @@ if [[ -n $BUILD_AND_PUSH ]]; then IMAGE_URL=$(head -n 1 $(ls -t ../../../.*_ingress-gce-glbc-*-push | head -1)) if [[ $? -eq 1 ]]; then - error_exit "Error-bot: Issue geting the image url consider providing --image-url yourself" + error_exit "Error-bot: Issue getting the image url consider providing --image-url yourself" fi echo "Pushed new glbc image to: $IMAGE_URL" else diff --git a/docs/experimental/workload/README.md b/docs/experimental/workload/README.md index a14375914b..3a062cea5c 100644 --- a/docs/experimental/workload/README.md +++ b/docs/experimental/workload/README.md @@ -243,7 +243,7 @@ NAME ADDRESSTYPE PORTS ENDPOINTS workload-service-workload-controller.k8s.io IPv4 80 10.168.15.209 91s ``` -# Toubleshooting +# Troubleshooting ## Service Account Email diff --git a/docs/faq/gce.md b/docs/faq/gce.md index edeafb5eb6..5766054006 100644 --- a/docs/faq/gce.md +++ b/docs/faq/gce.md @@ -175,7 +175,7 @@ grained control over the algorithm, you should deploy the [nginx controller](htt ## Is there a maximum number of Endpoints I can add to the Ingress? This limit is directly related to the maximum number of endpoints allowed in a -Kubernetes cluster, not the the HTTP LB configuration, since the HTTP LB sends +Kubernetes cluster, not the HTTP LB configuration, since the HTTP LB sends packets to VMs. Ingress is not yet supported on single zone clusters of size > 1000 nodes ([issue](https://github.com/kubernetes/contrib/issues/1724)). If you'd like to use Ingress on a large cluster, spread it across 2 or more zones diff --git a/pkg/annotations/ingress.go b/pkg/annotations/ingress.go index 64ccb1db00..be6998a866 100644 --- a/pkg/annotations/ingress.go +++ b/pkg/annotations/ingress.go @@ -70,7 +70,7 @@ const ( // InstanceGroupsAnnotationKey is the annotation key used by controller to // specify the name and zone of instance groups created for the ingress. - // This is read only for users. Controller will overrite any user updates. + // This is read only for users. Controller will overwrite any user updates. // This is only set for ingresses with ingressClass = "gce-multi-cluster" InstanceGroupsAnnotationKey = "ingress.gcp.kubernetes.io/instance-groups" diff --git a/pkg/apis/backendconfig/v1/types.go b/pkg/apis/backendconfig/v1/types.go index 74a6ab7168..ae7f339567 100644 --- a/pkg/apis/backendconfig/v1/types.go +++ b/pkg/apis/backendconfig/v1/types.go @@ -185,7 +185,7 @@ type ConnectionDrainingConfig struct { DrainingTimeoutSec int64 `json:"drainingTimeoutSec,omitempty"` } -// SessionAffinityConfig contains configuration for stickyness parameters. +// SessionAffinityConfig contains configuration for stickiness parameters. // +k8s:openapi-gen=true type SessionAffinityConfig struct { AffinityType string `json:"affinityType,omitempty"` diff --git a/pkg/apis/backendconfig/v1/zz_generated.openapi.go b/pkg/apis/backendconfig/v1/zz_generated.openapi.go index 693a2030ff..665172c150 100644 --- a/pkg/apis/backendconfig/v1/zz_generated.openapi.go +++ b/pkg/apis/backendconfig/v1/zz_generated.openapi.go @@ -638,7 +638,7 @@ func schema_pkg_apis_backendconfig_v1_SessionAffinityConfig(ref common.Reference return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "SessionAffinityConfig contains configuration for stickyness parameters.", + Description: "SessionAffinityConfig contains configuration for stickiness parameters.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "affinityType": { diff --git a/pkg/apis/backendconfig/v1beta1/types.go b/pkg/apis/backendconfig/v1beta1/types.go index 74dd9b3989..0782e1aac2 100644 --- a/pkg/apis/backendconfig/v1beta1/types.go +++ b/pkg/apis/backendconfig/v1beta1/types.go @@ -126,7 +126,7 @@ type ConnectionDrainingConfig struct { DrainingTimeoutSec int64 `json:"drainingTimeoutSec,omitempty"` } -// SessionAffinityConfig contains configuration for stickyness parameters. +// SessionAffinityConfig contains configuration for stickiness parameters. // +k8s:openapi-gen=true type SessionAffinityConfig struct { AffinityType string `json:"affinityType,omitempty"` diff --git a/pkg/apis/backendconfig/v1beta1/zz_generated.openapi.go b/pkg/apis/backendconfig/v1beta1/zz_generated.openapi.go index 845e215d7e..5df7568e49 100644 --- a/pkg/apis/backendconfig/v1beta1/zz_generated.openapi.go +++ b/pkg/apis/backendconfig/v1beta1/zz_generated.openapi.go @@ -436,7 +436,7 @@ func schema_pkg_apis_backendconfig_v1beta1_SessionAffinityConfig(ref common.Refe return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "SessionAffinityConfig contains configuration for stickyness parameters.", + Description: "SessionAffinityConfig contains configuration for stickiness parameters.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "affinityType": { diff --git a/pkg/backends/features/doc.go b/pkg/backends/features/doc.go index 6656123161..ddbf2ec0eb 100644 --- a/pkg/backends/features/doc.go +++ b/pkg/backends/features/doc.go @@ -16,7 +16,7 @@ limitations under the License. // This package contains the implementations of backend service // features. -// For features that requrie non-GA compute API, please make sure to +// For features that require non-GA compute API, please make sure to // update `versionToFeatures` and `featuresFromServicePort()` in // features.go (upon both feature addition and promotion). It will make // sure the controller interacts with compute service using the proper diff --git a/pkg/backends/neg_linker.go b/pkg/backends/neg_linker.go index cf35fa5b9c..34c3b9fbea 100644 --- a/pkg/backends/neg_linker.go +++ b/pkg/backends/neg_linker.go @@ -72,7 +72,7 @@ func (nl *negLinker) Link(sp utils.ServicePort, groups []GroupKey) error { svcNegKey := fmt.Sprintf("%s/%s", sp.ID.Service.Namespace, negName) negUrl, ok := getNegUrlFromSvcneg(svcNegKey, group.Zone, nl.svcNegLister) if !ok { - klog.V(4).Infof("Falling back to use NEG API to retreive NEG url for NEG %q", negName) + klog.V(4).Infof("Falling back to use NEG API to retrieve NEG url for NEG %q", negName) neg, err := nl.negGetter.GetNetworkEndpointGroup(negName, group.Zone, version) if err != nil { return err diff --git a/pkg/backends/neg_linker_test.go b/pkg/backends/neg_linker_test.go index 56b0776882..3108f9a7b3 100644 --- a/pkg/backends/neg_linker_test.go +++ b/pkg/backends/neg_linker_test.go @@ -246,13 +246,13 @@ func TestMergeBackends(t *testing.T) { if tc.expectError && err == nil { t.Errorf("Expect err != nil, however got err == nil") } else if !tc.expectError && err != nil { - t.Errorf("Exptect err == nil, however got %v", err) + t.Errorf("Expect err == nil, however got %v", err) } if !tc.expectError { diffBackend := diffBackends(tc.expect, ret) if !diffBackend.isEqual() { - t.Errorf("Expect tc.expect == ret, howevever got, tc.expect = %v, ret = %v", tc.expect, ret) + t.Errorf("Expect tc.expect == ret, however got, tc.expect = %v, ret = %v", tc.expect, ret) } } }) diff --git a/pkg/cmconfig/config.go b/pkg/cmconfig/config.go index 7804a5481f..6a2ea5770c 100644 --- a/pkg/cmconfig/config.go +++ b/pkg/cmconfig/config.go @@ -24,7 +24,7 @@ const ( asmSkipNamespaces = "asm-skip-namespaces" ) -// NewConfig returns a Conifg instances with default values. +// NewConfig returns a Config instances with default values. func NewConfig() Config { return Config{EnableASM: false, ASMServiceNEGSkipNamespaces: []string{"kube-system", "istio-system"}} } diff --git a/pkg/cmconfig/controller.go b/pkg/cmconfig/controller.go index 561f206a48..cf64e12893 100644 --- a/pkg/cmconfig/controller.go +++ b/pkg/cmconfig/controller.go @@ -16,7 +16,7 @@ import ( ) // ConfigMapConfigController is the ConfigMap based config controller. -// If cmConfigModeEnabled set to true, it will load the config from configmap: configMapNamespace/configMapName and restart ingress controller if the config has any ligeal changes. +// If cmConfigModeEnabled set to true, it will load the config from configmap: configMapNamespace/configMapName and restart ingress controller if the config has any illegal changes. // If cmConfigModeEnabled set to false, it will return the default values for the configs. type ConfigMapConfigController struct { configMapNamespace string @@ -36,7 +36,7 @@ func NewConfigMapConfigController(kubeClient kubernetes.Interface, recorder reco if errors.IsNotFound(err) { klog.Infof("ConfigMapConfigController: Not found the configmap based config, using default config: %v", currentConfig) } else { - klog.Warningf("ConfigMapConfigController failed to load config from api server, using the defualt config. Error: %v", err) + klog.Warningf("ConfigMapConfigController failed to load config from api server, using the default config. Error: %v", err) } } else { if err := currentConfig.LoadValue(cm.Data); err != nil { @@ -109,7 +109,7 @@ func (c *ConfigMapConfigController) RecordEvent(eventtype, reason, message strin return true } -// RegisterInformer regjister the configmap based config controller handler to the configapInformer which will watch the target +// RegisterInformer register the configmap based config controller handler to the configMapInformer which will watch the target // configmap and send stop message to the stopCh if any valid change detected. func (c *ConfigMapConfigController) RegisterInformer(configMapInformer cache.SharedIndexInformer, cancel func()) { configMapInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -146,7 +146,7 @@ func (c *ConfigMapConfigController) processItem(obj interface{}, cancel func()) if errors.IsNotFound(err) { klog.Infof("ConfigMapConfigController: Not found the configmap based config, using default config: %v", config) } else { - klog.Warningf("ConfigMapConfigController failed to load config from api server, using the defualt config. Error: %v", err) + klog.Warningf("ConfigMapConfigController failed to load config from api server, using the default config. Error: %v", err) } } else { c.currentConfigMapObject = cm diff --git a/pkg/cmconfig/controller_test.go b/pkg/cmconfig/controller_test.go index 7ed0c7365f..bea5d0d8d5 100644 --- a/pkg/cmconfig/controller_test.go +++ b/pkg/cmconfig/controller_test.go @@ -46,52 +46,52 @@ func TestController(t *testing.T) { testcases := []struct { desc string defaultConfigMapData map[string]string - updateConifgMapData map[string]string + updateConfigMapData map[string]string wantConfig *Config wantUpdateConfig *Config wantStop bool wantLog string - donotWantLog string + doNotWantLog string }{ { desc: "No configMap config exists, controller should return default config", defaultConfigMapData: nil, - updateConifgMapData: nil, + updateConfigMapData: nil, wantConfig: &defaultConfig, wantUpdateConfig: nil, wantStop: false, wantLog: "Not found the configmap based config", - donotWantLog: "", + doNotWantLog: "", }, { desc: "Update a default value shouldn't trigger restart", defaultConfigMapData: nil, - updateConifgMapData: map[string]string{"enable-asm": "false"}, + updateConfigMapData: map[string]string{"enable-asm": "false"}, wantConfig: &defaultConfig, wantUpdateConfig: &defaultConfig, wantStop: false, wantLog: "Not found the configmap based config", - donotWantLog: "", + doNotWantLog: "", }, { desc: "update the default config should trigger a restart", defaultConfigMapData: map[string]string{"enable-asm": "false"}, - updateConifgMapData: map[string]string{"enable-asm": "true"}, + updateConfigMapData: map[string]string{"enable-asm": "true"}, wantConfig: &defaultConfig, wantUpdateConfig: &Config{EnableASM: true, ASMServiceNEGSkipNamespaces: []string{"kube-system", "istio-system"}}, wantStop: true, wantLog: "", - donotWantLog: "Not found the configmap based config", + doNotWantLog: "Not found the configmap based config", }, { - desc: "invalide config should give the default config", + desc: "invalid config should give the default config", defaultConfigMapData: map[string]string{"enable-asm": "TTTTT"}, - updateConifgMapData: nil, + updateConfigMapData: nil, wantConfig: &defaultConfig, wantUpdateConfig: nil, wantStop: false, wantLog: "unvalid value", - donotWantLog: "", + doNotWantLog: "", }, } for _, tc := range testcases { @@ -121,10 +121,10 @@ func TestController(t *testing.T) { stopped = true }) - if tc.updateConifgMapData != nil { + if tc.updateConfigMapData != nil { updateConfigMap := v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: testConfigMapName}, - Data: tc.updateConifgMapData} + Data: tc.updateConfigMapData} cmLister.Add(&updateConfigMap) fakeClient.CoreV1().ConfigMaps(testNamespace).Update(context.TODO(), &updateConfigMap, metav1.UpdateOptions{}) @@ -141,8 +141,8 @@ func TestController(t *testing.T) { t.Errorf("Missing log, got: %v, want: %v", logBuf.String(), tc.wantLog) } - if tc.donotWantLog != "" && strings.Contains(logBuf.String(), tc.donotWantLog) { - t.Errorf("Having not wanted log, got: %v, not want: %v", logBuf.String(), tc.donotWantLog) + if tc.doNotWantLog != "" && strings.Contains(logBuf.String(), tc.doNotWantLog) { + t.Errorf("Having not wanted log, got: %v, not want: %v", logBuf.String(), tc.doNotWantLog) } }) diff --git a/pkg/common/operator/frontendconfig_test.go b/pkg/common/operator/frontendconfig_test.go index 30effab252..2d60afac3e 100644 --- a/pkg/common/operator/frontendconfig_test.go +++ b/pkg/common/operator/frontendconfig_test.go @@ -26,7 +26,7 @@ func TestDoesIngressReferenceFrontendConfig(t *testing.T) { expected: false, }, { - desc: "ingress in different namspace", + desc: "ingress in different namespace", ing: test.IngressWithFrontendConfigOtherNamespace, expected: false, }, diff --git a/pkg/composite/gen.go b/pkg/composite/gen.go index f564e6f174..774609aa4e 100644 --- a/pkg/composite/gen.go +++ b/pkg/composite/gen.go @@ -241,7 +241,7 @@ type Backend struct { // Not available if the backend's balancingMode is CONNECTION. MaxRatePerInstance float64 `json:"maxRatePerInstance,omitempty"` // Optional parameter to define a target capacity for the - // UTILIZATIONbalancing mode. The valid range is [0.0, 1.0]. For usage + // UTILIZATION balancing mode. The valid range is [0.0, 1.0]. For usage // guidelines, see Utilization balancing mode. MaxUtilization float64 `json:"maxUtilization,omitempty"` ForceSendFields []string `json:"-"` @@ -897,7 +897,7 @@ type CorsPolicy struct { AllowHeaders []string `json:"allowHeaders,omitempty"` // Specifies the content for the Access-Control-Allow-Methods header. AllowMethods []string `json:"allowMethods,omitempty"` - // Specifies the regualar expression patterns that match allowed + // Specifies the regular expression patterns that match allowed // origins. For regular expression grammar please see // github.com/google/re2/wiki/Syntax An origin is allowed if it matches // either an item in allowOrigins or an item in allowOriginRegexes. @@ -1034,7 +1034,7 @@ type ForwardingRule struct { // its filterLabels must match with corresponding labels provided in the // metadata. If multiple metadataFilters are specified, all of them need // to be satisfied in order to be considered a match. metadataFilters - // specified here will be applifed before those specified in the UrlMap + // specified here will be applied before those specified in the UrlMap // that this ForwardingRule references. metadataFilters only applies to // Loadbalancers that have their loadBalancingScheme set to // INTERNAL_SELF_MANAGED. diff --git a/pkg/context/context.go b/pkg/context/context.go index 71c881608a..c50c8f4746 100644 --- a/pkg/context/context.go +++ b/pkg/context/context.go @@ -285,12 +285,12 @@ func (ctx *ControllerContext) initEnableASM() { } klog.V(2).Infof("The supported DestinationRule group version is %s in group %s. Need to update as istio API graduates.", destinationRuleAPIVersion, destinationRuleGroup) - destrinationGVR := schema.GroupVersionResource{Group: destinationRuleGroup, Version: destinationRuleAPIVersion, Resource: destinationRulePlural} - drDynamicInformer := dynamicinformer.NewFilteredDynamicInformer(dynamicClient, destrinationGVR, ctx.Namespace, ctx.ResyncPeriod, + destinationGVR := schema.GroupVersionResource{Group: destinationRuleGroup, Version: destinationRuleAPIVersion, Resource: destinationRulePlural} + drDynamicInformer := dynamicinformer.NewFilteredDynamicInformer(dynamicClient, destinationGVR, ctx.Namespace, ctx.ResyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, nil) ctx.DestinationRuleInformer = drDynamicInformer.Informer() - ctx.DestinationRuleClient = dynamicClient.Resource(destrinationGVR) + ctx.DestinationRuleClient = dynamicClient.Resource(destinationGVR) ctx.ASMConfigController.RecordEvent("Normal", "ASMModeOn", fmt.Sprintf("NEG controller is running in ASM Mode with Istio API: %s.", destinationRuleAPIVersion)) ctx.ASMConfigController.SetASMReadyTrue() } diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 695c5de519..5a09332aec 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -835,7 +835,7 @@ func (lbc *LoadBalancerController) ensureFinalizer(ing *v1.Ingress) (*v1.Ingress return updatedIng, nil } -// frontendGCAlgorithm returns the naming scheme using which frontend resources needs to be cleanedup. +// frontendGCAlgorithm returns the naming scheme using which frontend resources needs to be cleaned-up. // This also returns a boolean to specify if we need to delete frontend resources. // GC path is // If ingress does not exist : v1 frontends and all backends diff --git a/pkg/controller/controller_test.go b/pkg/controller/controller_test.go index 4c6aa45f74..312699e11e 100644 --- a/pkg/controller/controller_test.go +++ b/pkg/controller/controller_test.go @@ -216,7 +216,7 @@ func TestNEGOnlyIngress(t *testing.T) { } } -// TestIngressCreateDeleteFinalizer asserts that `sync` will will not return an +// TestIngressCreateDeleteFinalizer asserts that `sync` will not return an // error for a good ingress config. It also tests garbage collection for // Ingresses that need to be deleted, and keep the ones that don't, depending // on whether Finalizer Adds and/or Removes are enabled. diff --git a/pkg/controller/translator/translator.go b/pkg/controller/translator/translator.go index 3ef5d1974f..cc163f9812 100644 --- a/pkg/controller/translator/translator.go +++ b/pkg/controller/translator/translator.go @@ -322,9 +322,9 @@ func (t *Translator) TranslateIngress(ing *v1.Ingress, systemDefaultBackend util return urlMap, errs } -// validateAndGetPaths will validate the path based on the specifed path type and will return the +// validateAndGetPaths will validate the path based on the specified path type and will return the // the path rules that should be used. If no path type is provided, the path type will be assumed -// to be ImplementationSpecific. If a non existent path type is provided, an error will be returned. +// to be ImplementationSpecific. If a nonexistent path type is provided, an error will be returned. func validateAndGetPaths(path v1.HTTPIngressPath) ([]string, error) { pathType := v1.PathTypeImplementationSpecific @@ -366,7 +366,7 @@ func validateAndModifyPrefixPathType(path v1.HTTPIngressPath) ([]string, error) return nil, fmt.Errorf("failed to validate prefix path type due to empty path") } - // The Ingress spec defines Prefx path "/" as matching all paths + // The Ingress spec defines Prefix path "/" as matching all paths if path.Path == "/" { return []string{"/*"}, nil } diff --git a/pkg/crd/crd.go b/pkg/crd/crd.go index 1c5947c2b7..dd0cead71b 100644 --- a/pkg/crd/crd.go +++ b/pkg/crd/crd.go @@ -126,7 +126,7 @@ func crd(meta *CRDMeta, namespacedScoped bool) *apiextensionsv1.CustomResourceDe klog.Errorf("Error adding simple validation for %v CRD(%s API): %v", meta.kind, v.name, err) } if validationSchema == nil { - klog.Errorf("No validation schema exists for for %v CRD(%s API)", meta.kind, v.name) + klog.Errorf("No validation schema exists for %v CRD(%s API)", meta.kind, v.name) } version := apiextensionsv1.CustomResourceDefinitionVersion{ Name: v.name, diff --git a/pkg/e2e/doc.go b/pkg/e2e/doc.go index 6da1473847..a28293d6c4 100644 --- a/pkg/e2e/doc.go +++ b/pkg/e2e/doc.go @@ -33,5 +33,5 @@ limitations under the License. // } // } // -// The Sandbox will handle resource isolation and reclaimation. +// The Sandbox will handle resource isolation and reclamation. package e2e diff --git a/pkg/e2e/fixtures.go b/pkg/e2e/fixtures.go index 9c7f9b17b7..59c1d3d721 100644 --- a/pkg/e2e/fixtures.go +++ b/pkg/e2e/fixtures.go @@ -406,13 +406,13 @@ func DeleteSubnet(s *Sandbox, name string) error { } // CreatePorterDeployment creates a Deployment with porter image. -func CreatePorterDeployment(s *Sandbox, name string, replics int32, version string) error { +func CreatePorterDeployment(s *Sandbox, name string, replicas int32, version string) error { env := fmt.Sprintf("SERVE_PORT_%d", porterPort) labels := map[string]string{"app": "porter", "version": version} deployment := apiappsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{Namespace: s.Namespace, Name: name}, Spec: apiappsv1.DeploymentSpec{ - Replicas: &replics, + Replicas: &replicas, Selector: &metav1.LabelSelector{MatchLabels: labels}, Template: apiv1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{Labels: labels}, @@ -461,7 +461,7 @@ func GetConfigMap(s *Sandbox, namespace, name string) (map[string]string, error) return cm.Data, nil } -// EnsureConfigMap ensures the namespace:name ConfigMap Data fieled, create if the target not exist. +// EnsureConfigMap ensures the namespace:name ConfigMap Data field, create if the target not exist. func EnsureConfigMap(s *Sandbox, namespace, name string, data map[string]string) error { cm := v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}, Data: data} _, err := s.f.Clientset.CoreV1().ConfigMaps(namespace).Update(context.TODO(), &cm, metav1.UpdateOptions{}) diff --git a/pkg/e2e/framework.go b/pkg/e2e/framework.go index b2d8ec9a7f..a0b12bec66 100644 --- a/pkg/e2e/framework.go +++ b/pkg/e2e/framework.go @@ -110,7 +110,7 @@ func NewFramework(config *rest.Config, options Options) *Framework { } f.statusManager = NewStatusManager(f) - // Preparing dynamic client if Istio:DestinationRule CRD exisits and matches the required version. + // Preparing dynamic client if Istio:DestinationRule CRD exists and matches the required version. // The client is used by the ASM e2e tests. destinationRuleCRD, err := f.crdClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), destinationRuleCRDName, metav1.GetOptions{}) if err != nil { @@ -127,8 +127,8 @@ func NewFramework(config *rest.Config, options Options) *Framework { if err != nil { klog.Fatalf("Failed to create Dynamic client: %v", err) } - destrinationGVR := schema.GroupVersionResource{Group: destinationRuleGroup, Version: destinationRuleAPIVersion, Resource: destinationRulePlural} - f.DestinationRuleClient = dynamicClient.Resource(destrinationGVR) + destinationGVR := schema.GroupVersionResource{Group: destinationRuleGroup, Version: destinationRuleAPIVersion, Resource: destinationRulePlural} + f.DestinationRuleClient = dynamicClient.Resource(destinationGVR) } } return f diff --git a/pkg/e2e/helpers.go b/pkg/e2e/helpers.go index a01c38609d..c45feb8939 100644 --- a/pkg/e2e/helpers.go +++ b/pkg/e2e/helpers.go @@ -476,7 +476,7 @@ func CheckGCLB(gclb *fuzz.GCLB, numForwardingRules int, numBackendServices int) return nil } -// CheckDistinctResponseHost issue GET call to the vip for 100 times, parse the reponses and calculate the number of distinct backends. +// CheckDistinctResponseHost issue GET call to the vip for 100 times, parse the responses and calculate the number of distinct backends. func CheckDistinctResponseHost(vip string, expectDistinctHosts int, tolerateTransientError bool) error { var errs []error const repeat = 100 @@ -581,7 +581,7 @@ func CheckNegs(negs map[meta.Key]*fuzz.NetworkEndpoints, expectHealthy bool, exp } // CheckNegStatus checks if the NEG Status annotation is presented and in the expected state -func CheckNegStatus(svc *v1.Service, expectSvcPors []string) (annotations.NegStatus, error) { +func CheckNegStatus(svc *v1.Service, expectSvcPorts []string) (annotations.NegStatus, error) { annotation, ok := svc.Annotations[annotations.NEGStatusKey] if !ok { return annotations.NegStatus{}, fmt.Errorf("service %s/%s does not have neg status annotation: %v", svc.Namespace, svc.Name, svc) @@ -592,7 +592,7 @@ func CheckNegStatus(svc *v1.Service, expectSvcPors []string) (annotations.NegSta return negStatus, fmt.Errorf("service %s/%s has invalid neg status annotation %q: %v", svc.Namespace, svc.Name, annotation, err) } - expectPorts := sets.NewString(expectSvcPors...) + expectPorts := sets.NewString(expectSvcPorts...) existingPorts := sets.NewString() for port := range negStatus.NetworkEndpointGroups { existingPorts.Insert(port) @@ -605,7 +605,7 @@ func CheckNegStatus(svc *v1.Service, expectSvcPors []string) (annotations.NegSta } // CheckNameInNegStatus checks if the NEG Status annotation is present and in the expected state -// The parameter expectedNegAttrs will map a port to a neg name. If the the neg name is empty, CheckNameInNegStatus expects +// The parameter expectedNegAttrs will map a port to a neg name. If the neg name is empty, CheckNameInNegStatus expects // that the name is autogenerated and will check it func CheckNameInNegStatus(svc *v1.Service, expectedNegAttrs map[string]string) (annotations.NegStatus, error) { annotation, ok := svc.Annotations[annotations.NEGStatusKey] @@ -856,7 +856,7 @@ func WaitForStandaloneNegDeletion(ctx context.Context, c cloud.Cloud, s *Sandbox }) } -// CheckDeletedNegCRs verifies that the provided neg list does not have negs that are associated with the provided neg atrributes +// CheckDeletedNegCRs verifies that the provided neg list does not have negs that are associated with the provided neg attributes func CheckDeletedNegCRs(s *Sandbox, negName, port string) (bool, error) { svcNeg, err := s.f.SvcNegClient.NetworkingV1beta1().ServiceNetworkEndpointGroups(s.Namespace).Get(context.Background(), negName, metav1.GetOptions{}) if err != nil { @@ -906,7 +906,7 @@ func CheckNegFinalizer(svcNeg negv1beta1.ServiceNetworkEndpointGroup) error { } // WaitForSvcNegErrorEvents waits for at least one of the possibles messages to be emitted on the -// namespace:svcName serice until timeout +// namespace:svcName service until timeout func WaitForSvcNegErrorEvents(s *Sandbox, svcName string, possibleMessages []string) error { svc, err := s.f.Clientset.CoreV1().Services(s.Namespace).Get(context.TODO(), svcName, metav1.GetOptions{}) if svc == nil || err != nil { @@ -1012,7 +1012,7 @@ func WaitForServiceAttachmentDeletion(s *Sandbox, saName, gceSAURL string) error }) } -// CheckServiceAttachmentCRDeletion verifes that the CR does not exist +// CheckServiceAttachmentCRDeletion verifies that the CR does not exist func CheckServiceAttachmentCRDeletion(s *Sandbox, saName string) bool { _, err := s.f.SAClient.Get(s.Namespace, saName) if err != nil { @@ -1026,7 +1026,7 @@ func CheckServiceAttachmentCRDeletion(s *Sandbox, saName string) bool { return false } -// CheckServiceAttachment verifes that the CR spec matches the GCE Service Attachment configuration and +// CheckServiceAttachment verifies that the CR spec matches the GCE Service Attachment configuration and // that the CR's Status was properly populated func CheckServiceAttachment(sa *fuzz.ServiceAttachment, cr *sav1.ServiceAttachment) (string, error) { if err := CheckServiceAttachmentFinalizer(cr); err != nil { @@ -1056,7 +1056,7 @@ func CheckServiceAttachment(sa *fuzz.ServiceAttachment, cr *sav1.ServiceAttachme return sa.GA.SelfLink, nil } -// CheckServiceAttachmentForwardingRule verfies that the forwarding rule used in the GCE Service Attachment creation +// CheckServiceAttachmentForwardingRule verifies that the forwarding rule used in the GCE Service Attachment creation // is the same one created by the Service referenced in the CR func CheckServiceAttachmentForwardingRule(s *Sandbox, c cloud.Cloud, cr *sav1.ServiceAttachment) error { @@ -1081,7 +1081,7 @@ func CheckServiceAttachmentForwardingRule(s *Sandbox, c cloud.Cloud, cr *sav1.Se return nil } -// CheckServiceAttachmentFinalizer verifes that the CR has the ServiceAttachment Finalizer +// CheckServiceAttachmentFinalizer verifies that the CR has the ServiceAttachment Finalizer func CheckServiceAttachmentFinalizer(cr *sav1.ServiceAttachment) error { finalizers := cr.GetFinalizers() if l := len(finalizers); l != 1 { @@ -1098,7 +1098,7 @@ func CheckServiceAttachmentFinalizer(cr *sav1.ServiceAttachment) error { func Truncate(key string) string { if len(key) > 62 { // GCE requires names to end with an alphanumeric, but allows - // characters like '-', so make sure the trucated name ends + // characters like '-', so make sure the truncated name ends // legally. return fmt.Sprintf("%v%v", key[:62], "0") } diff --git a/pkg/events/events.go b/pkg/events/events.go index ad6d55ca9c..047557206f 100644 --- a/pkg/events/events.go +++ b/pkg/events/events.go @@ -44,7 +44,7 @@ func (r RecorderProducerMock) Recorder(ns string) record.EventRecorder { return &record.FakeRecorder{} } -// GloablEventf records a Cluster level event not attached to a given object. +// GlobalEventf records a Cluster level event not attached to a given object. func GlobalEventf(r record.EventRecorder, eventtype, reason, messageFmt string, args ...interface{}) { // Using an empty ObjectReference to indicate no associated // resource. This apparently works, see the package @@ -57,7 +57,7 @@ func GlobalEventf(r record.EventRecorder, eventtype, reason, messageFmt string, var truncatedStringListMax = 2000 // TruncateStringList will render the list of items as a string, -// eliding elements with elipsis at the end if there are more than a +// eliding elements with ellipsis at the end if there are more than a // reasonable number of characters in the resulting string. This is // used to prevent accidentally dumping enormous strings into the // Event description. diff --git a/pkg/experimental/apis/workload/v1alpha1/doc.go b/pkg/experimental/apis/workload/v1alpha1/doc.go index 5a23ef89a1..a9f8c3c338 100644 --- a/pkg/experimental/apis/workload/v1alpha1/doc.go +++ b/pkg/experimental/apis/workload/v1alpha1/doc.go @@ -14,6 +14,6 @@ limitations under the License. // +k8s:deepcopy-gen=package // TODO: Change group name to discovery.k8s.io after approved -// Package v1aplha1 is the v1aplha1 version of the API. +// Package v1alpha1 is the v1alpha1 version of the API. // +groupName=networking.gke.io package v1alpha1 diff --git a/pkg/experimental/workload/controller.go b/pkg/experimental/workload/controller.go index 10410458e9..cd84df2bf4 100644 --- a/pkg/experimental/workload/controller.go +++ b/pkg/experimental/workload/controller.go @@ -285,7 +285,7 @@ func (c *Controller) processService(key string) error { return nil } wlSelector := labels.Set(service.Spec.Selector).AsSelectorPreValidated() - // TODO: Use selector instaed of name to select endpointslices + // TODO: Use selector instead of name to select endpointslices // esSelector := labels.Set(map[string]string{ // discovery.LabelServiceName: service.Name, // discovery.LabelManagedBy: controllerName, @@ -295,7 +295,7 @@ func (c *Controller) processService(key string) error { // TODO: fix this // SA: https://github.com/kubernetes/kubernetes/blob/bdb99c8e0954c6b2d4c40233ded94455a343af73/pkg/controller/endpointslice/reconciler.go#L58:22 subsets := []discovery.Endpoint{} - listMachedWorkload(c.workloadLister, namespace, wlSelector, func(workload *workloadv1a1.Workload) { + listMatchedWorkload(c.workloadLister, namespace, wlSelector, func(workload *workloadv1a1.Workload) { subsets = append(subsets, workloadToEndpoint(workload, service)) }) // TODO: Consider Ready conditions of the workloads before putting them into the EndpointSlice. @@ -403,7 +403,7 @@ func getEndpointPortsFromServicePorts(svcPorts []corev1.ServicePort) []discovery return ret } -func listMachedWorkload( +func listMatchedWorkload( lister cache.Indexer, namespace string, selector labels.Selector, diff --git a/pkg/experimental/workload/controller_test.go b/pkg/experimental/workload/controller_test.go index 864fa52588..04ce639e28 100644 --- a/pkg/experimental/workload/controller_test.go +++ b/pkg/experimental/workload/controller_test.go @@ -90,7 +90,7 @@ func getEndpointSliceAddr(wlc *Controller, svc *corev1.Service, t *testing.T) [] return ret } -// addEndpointSliceToLister adds the EndpointSlice to the lister so the controller knows the existance of it. +// addEndpointSliceToLister adds the EndpointSlice to the lister so the controller knows the existence of it. func addEndpointSliceToLister(wlc *Controller, svc *corev1.Service, t *testing.T) { sliceName := endpointsliceName(svc.Name) eps, err := wlc.ctx.KubeClient.DiscoveryV1().EndpointSlices(svc.Namespace).Get( diff --git a/pkg/experimental/workload/daemon/daemon.go b/pkg/experimental/workload/daemon/daemon.go index 2f0afa0242..78e1418a82 100644 --- a/pkg/experimental/workload/daemon/daemon.go +++ b/pkg/experimental/workload/daemon/daemon.go @@ -96,7 +96,7 @@ func updateCR( select { case <-ticker.C: newStatus := generateHeartbeatStatus() - patch, err := preparePatchBytesforWorkloadStatus(oldStatus, newStatus) + patch, err := preparePatchBytesForWorkloadStatus(oldStatus, newStatus) if err != nil { klog.Errorf("failed to prepare the patch for workload resource: %+v", err) continue @@ -175,8 +175,8 @@ func OutputCredentials(credentials daemonutils.ClusterCredentials) { fmt.Println(string(ret)) } -// preparePatchBytesforWorkloadStatus generates patch bytes based on the old and new workload status -func preparePatchBytesforWorkloadStatus(oldStatus, newStatus workloadv1a1.WorkloadStatus) ([]byte, error) { +// preparePatchBytesForWorkloadStatus generates patch bytes based on the old and new workload status +func preparePatchBytesForWorkloadStatus(oldStatus, newStatus workloadv1a1.WorkloadStatus) ([]byte, error) { patchBytes, err := patch.StrategicMergePatchBytes( workloadv1a1.Workload{Status: oldStatus}, workloadv1a1.Workload{Status: newStatus}, diff --git a/pkg/experimental/workload/daemon/provider/gce/vm.go b/pkg/experimental/workload/daemon/provider/gce/vm.go index 3917100835..0dadad4b85 100644 --- a/pkg/experimental/workload/daemon/provider/gce/vm.go +++ b/pkg/experimental/workload/daemon/provider/gce/vm.go @@ -159,7 +159,7 @@ func (vm *VM) getCluster() (cluster *gkev1.Cluster, err error) { oauthClient, _, err := transport.NewHTTPClient(context.Background(), option.WithScopes(gkev1.CloudPlatformScope)) if err != nil { - klog.Errorf("failed to initalize http client: %+v", err) + klog.Errorf("failed to initialize http client: %+v", err) return } gkeSvc, err := gkev1.New(oauthClient) @@ -193,7 +193,7 @@ func (vm *VM) KubeConfig() (config *rest.Config, err error) { return } - // Get contianer master address and CA + // Get container master address and CA cluster, err := vm.getCluster() if err != nil { klog.Errorf("unable to get the cluster info: %+v", err) @@ -288,7 +288,7 @@ func NewVM() (vm *VM, err error) { if strings.HasPrefix(name, labelPrefix) { val, err := metadata.InstanceAttributeValue(name) if err != nil { - klog.Errorf("faild to fetch label %s: %+v", name, err) + klog.Errorf("failed to fetch label %s: %+v", name, err) } vm.vmLabels[name[prefixLen:]] = val } diff --git a/pkg/experimental/workload/daemon/utils/interface.go b/pkg/experimental/workload/daemon/utils/interface.go index 5d074aa582..f49225a61b 100644 --- a/pkg/experimental/workload/daemon/utils/interface.go +++ b/pkg/experimental/workload/daemon/utils/interface.go @@ -36,7 +36,7 @@ type WorkloadInfo interface { // ConnectionHelper provides the identity and config used to connect to the cluster type ConnectionHelper interface { - // Credentials contain the credentials used for the deamon to access the cluster. + // Credentials contain the credentials used for the daemon to access the cluster. // This is output to stdout, for Kubernetes clients to use. Credentials() (ClusterCredentials, error) // KubeConfig yields the config used to create Kubernetes clientset. diff --git a/pkg/experimental/workload/daemon/utils/kube-config.go b/pkg/experimental/workload/daemon/utils/kube-config.go index 93af356572..b55cebf4bc 100644 --- a/pkg/experimental/workload/daemon/utils/kube-config.go +++ b/pkg/experimental/workload/daemon/utils/kube-config.go @@ -30,7 +30,7 @@ func GenKubeConfigForKSA(clusterCa, clusterIP, clusterName, saName, accessToken var kubeConfig bytes.Buffer t, err := template.New("user").Parse(kubeConfigUserTemp) if err != nil { - klog.Fatalf("unablt to create KubeConfig template: %+v", err) + klog.Fatalf("unable to create KubeConfig template: %+v", err) } err = t.Execute(&kubeConfig, map[string]string{ "clusterCa": clusterCa, @@ -40,7 +40,7 @@ func GenKubeConfigForKSA(clusterCa, clusterIP, clusterName, saName, accessToken "accessToken": accessToken, }) if err != nil { - klog.Fatalf("unablt to execute KubeConfig template: %+v", err) + klog.Fatalf("unable to execute KubeConfig template: %+v", err) } return kubeConfig.Bytes() } @@ -56,7 +56,7 @@ func GenKubeConfigForUser(clusterCa, clusterIP, clusterName, authProvider string var kubeConfig bytes.Buffer t, err := template.New("user").Parse(kubeConfigUserTemp) if err != nil { - klog.Fatalf("unablt to create KubeConfig template: %+v", err) + klog.Fatalf("unable to create KubeConfig template: %+v", err) } err = t.Execute(&kubeConfig, map[string]string{ "clusterCa": clusterCa, @@ -66,7 +66,7 @@ func GenKubeConfigForUser(clusterCa, clusterIP, clusterName, authProvider string "authProvider": authProvider, }) if err != nil { - klog.Fatalf("unablt to execute KubeConfig template: %+v", err) + klog.Fatalf("unable to execute KubeConfig template: %+v", err) } return kubeConfig.Bytes() } diff --git a/pkg/firewalls/firewalls_test.go b/pkg/firewalls/firewalls_test.go index 9879dd1c1c..2835d0cd1f 100644 --- a/pkg/firewalls/firewalls_test.go +++ b/pkg/firewalls/firewalls_test.go @@ -295,7 +295,7 @@ func verifyFirewallRule(fwp *fakeFirewallsProvider, ruleName string, expectedNod } if !sets.NewString(f.TargetTags...).Equal(sets.NewString(expectedNodes...)) { - t.Errorf("target tags doesn't equal expected taget tags. Actual: %v, Expected: %v", f.TargetTags, expectedNodes) + t.Errorf("target tags doesn't equal expected target tags. Actual: %v, Expected: %v", f.TargetTags, expectedNodes) } if !sets.NewString(f.SourceRanges...).Equal(sets.NewString(expectedCIDRs...)) { diff --git a/pkg/forwardingrules/forwarding_rules_test.go b/pkg/forwardingrules/forwarding_rules_test.go index 19fe1f99ee..5ba8c90677 100644 --- a/pkg/forwardingrules/forwarding_rules_test.go +++ b/pkg/forwardingrules/forwarding_rules_test.go @@ -86,9 +86,9 @@ func TestGetForwardingRule(t *testing.T) { }, { existingFwdRules: []*composite.ForwardingRule{elbForwardingRule, ilbForwardingRule}, - getFwdRuleName: "non-existent-rule", + getFwdRuleName: "nonexistent-rule", expectedFwdRule: nil, - desc: "Get non existent forwarding rule", + desc: "Get nonexistent forwarding rule", }, } @@ -148,9 +148,9 @@ func TestDeleteForwardingRule(t *testing.T) { }, { existingFwdRules: []*composite.ForwardingRule{elbForwardingRule, ilbForwardingRule}, - deleteFwdRuleName: "non-existent", + deleteFwdRuleName: "nonexistent", shouldNotDeleteFwdRules: []*composite.ForwardingRule{elbForwardingRule, ilbForwardingRule}, - desc: "Delete non existent forwarding rule", + desc: "Delete nonexistent forwarding rule", }, } diff --git a/pkg/fuzz/gcp.go b/pkg/fuzz/gcp.go index 540aafa12d..635dc9688d 100644 --- a/pkg/fuzz/gcp.go +++ b/pkg/fuzz/gcp.go @@ -1014,7 +1014,7 @@ func GetServiceAttachment(ctx context.Context, c cloud.Cloud, saURL string) (*Se return &ServiceAttachment{GA: sa}, nil } -// CheckServiceAttachmentDeletion verfies that the Service Attachment does not exist +// CheckServiceAttachmentDeletion verifies that the Service Attachment does not exist func CheckServiceAttachmentDeletion(ctx context.Context, c cloud.Cloud, saURL string) (bool, error) { resID, err := cloud.ParseResourceURL(saURL) if err != nil { @@ -1023,7 +1023,7 @@ func CheckServiceAttachmentDeletion(ctx context.Context, c cloud.Cloud, saURL st } _, err = c.ServiceAttachments().Get(ctx, resID.Key) if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - klog.Infof("CheckServiceAttachmnetDeletion(), service attachment was successfully deleted") + klog.Infof("CheckServiceAttachmentDeletion(), service attachment was successfully deleted") return true, nil } return false, err diff --git a/pkg/fuzz/helpers.go b/pkg/fuzz/helpers.go index fbf41ac86b..add96ce035 100644 --- a/pkg/fuzz/helpers.go +++ b/pkg/fuzz/helpers.go @@ -506,7 +506,7 @@ func (f *FrontendConfigBuilder) Build() *frontendconfig.FrontendConfig { return f.frontendConfig.DeepCopy() } -// SetSslPolicy Sets ths SslPolicy on the FrontendConfig. +// SetSslPolicy Sets the SslPolicy on the FrontendConfig. func (f *FrontendConfigBuilder) SetSslPolicy(policy string) *FrontendConfigBuilder { f.frontendConfig.Spec.SslPolicy = &policy return f diff --git a/pkg/healthchecksl4/healthchecksl4.go b/pkg/healthchecksl4/healthchecksl4.go index 1f0992c8d0..c08bcbef6a 100644 --- a/pkg/healthchecksl4/healthchecksl4.go +++ b/pkg/healthchecksl4/healthchecksl4.go @@ -312,7 +312,7 @@ func newL4HealthCheck(name string, svcName types.NamespacedName, shared bool, pa Type: "HTTP", Description: desc, Scope: scope, - // Region will be omited by GCP API if Scope is set to Global + // Region will be omitted by GCP API if Scope is set to Global Region: region, } } diff --git a/pkg/healthchecksprovider/healthchecksprovider_test.go b/pkg/healthchecksprovider/healthchecksprovider_test.go index 48dd4b5399..5b48a74198 100644 --- a/pkg/healthchecksprovider/healthchecksprovider_test.go +++ b/pkg/healthchecksprovider/healthchecksprovider_test.go @@ -85,16 +85,16 @@ func TestGetHealthCheck(t *testing.T) { expectedHealthCheck: globalHealthCheck, }, { - desc: "Get non existent global health check", + desc: "Get nonexistent global health check", existingHealthChecks: []*composite.HealthCheck{regionalHealthCheck, globalHealthCheck}, - getHCName: "non-existent-hc", + getHCName: "nonexistent-hc", getHCScope: meta.Global, expectedHealthCheck: nil, }, { - desc: "Get non existent regional health check", + desc: "Get nonexistent regional health check", existingHealthChecks: []*composite.HealthCheck{regionalHealthCheck, globalHealthCheck}, - getHCName: "non-existent-hc", + getHCName: "nonexistent-hc", getHCScope: meta.Regional, expectedHealthCheck: nil, }, @@ -169,9 +169,9 @@ func TestDeleteHealthCheck(t *testing.T) { shouldNotDeleteHealthChecks: []*composite.HealthCheck{regionalHealthCheck}, }, { - desc: "Delete non existent healthCheck", + desc: "Delete nonexistent healthCheck", existingHealthChecks: []*composite.HealthCheck{regionalHealthCheck, globalHealthCheck}, - deleteHCName: "non-existent", + deleteHCName: "nonexistent", deleteHCScope: meta.Regional, shouldNotDeleteHealthChecks: []*composite.HealthCheck{regionalHealthCheck, globalHealthCheck}, }, diff --git a/pkg/instances/instances.go b/pkg/instances/instances.go index 1aeada67d4..2698cad374 100644 --- a/pkg/instances/instances.go +++ b/pkg/instances/instances.go @@ -394,7 +394,7 @@ func (i *Instances) Sync(nodes []string) (err error) { return nil } -// canonicalizeInstanceNeme take a GCE instance 'hostname' and break it down +// canonicalizeInstanceName take a GCE instance 'hostname' and break it down // to something that can be fed to the GCE API client library. Basically // this means reducing 'kubernetes-node-2.c.my-proj.internal' to // 'kubernetes-node-2' if necessary. diff --git a/pkg/instances/interfaces.go b/pkg/instances/interfaces.go index a736170d41..dc6fb2a095 100644 --- a/pkg/instances/interfaces.go +++ b/pkg/instances/interfaces.go @@ -49,7 +49,7 @@ type InstanceGroups interface { DeleteInstanceGroup(name, zone string) error ListInstanceGroups(zone string) ([]*compute.InstanceGroup, error) - // TODO: Refactor for modulatiry. + // TODO: Refactor for modularity. ListInstancesInInstanceGroup(name, zone string, state string) ([]*compute.InstanceWithNamedPorts, error) AddInstancesToInstanceGroup(name, zone string, instanceRefs []*compute.InstanceReference) error RemoveInstancesFromInstanceGroup(name, zone string, instanceRefs []*compute.InstanceReference) error diff --git a/pkg/l4lb/l4controller.go b/pkg/l4lb/l4controller.go index 138dcdeef0..6d60282877 100644 --- a/pkg/l4lb/l4controller.go +++ b/pkg/l4lb/l4controller.go @@ -272,7 +272,7 @@ func (l4c *L4Controller) processServiceDeletion(key string, svc *v1.Service) *lo // Following this order avoids a race condition when a service is changed from LoadBalancer type Internal to External. if err := updateServiceStatus(l4c.ctx, svc, &v1.LoadBalancerStatus{}); err != nil { l4c.ctx.Recorder(svc.Namespace).Eventf(svc, v1.EventTypeWarning, "DeleteLoadBalancer", - "Error reseting load balancer status to empty: %v", err) + "Error resetting load balancer status to empty: %v", err) result.Error = fmt.Errorf("failed to reset ILB status, err: %w", err) return result } diff --git a/pkg/l4lb/l4lbcommon.go b/pkg/l4lb/l4lbcommon.go index 43c635854c..dd413b8bb0 100644 --- a/pkg/l4lb/l4lbcommon.go +++ b/pkg/l4lb/l4lbcommon.go @@ -44,7 +44,7 @@ func computeNewAnnotationsIfNeeded(svc *v1.Service, newAnnotations map[string]st return newObjectMeta } -// mergeAnnotations merges the new set of l4lb resource annotations with the pre-existing service annotations. +// mergeAnnotations merges the new set of l4lb resource annotations with the preexisting service annotations. // Existing L4 resource annotation values will be replaced with the values in the new map. // This function is used by External and Internal L4 LB controllers. func mergeAnnotations(existing, lbAnnotations map[string]string, keysToRemove []string) map[string]string { diff --git a/pkg/l4lb/l4netlbcontroller.go b/pkg/l4lb/l4netlbcontroller.go index 98d2373156..7c5a4e57bf 100644 --- a/pkg/l4lb/l4netlbcontroller.go +++ b/pkg/l4lb/l4netlbcontroller.go @@ -372,7 +372,7 @@ func (lc *L4NetLBController) sync(key string) error { return fmt.Errorf("Failed to lookup L4 External LoadBalancer service for key %s : %w", key, err) } if !exists || svc == nil { - klog.V(3).Infof("Ignoring sync of non-existent service %s", key) + klog.V(3).Infof("Ignoring sync of nonexistent service %s", key) return nil } @@ -503,7 +503,7 @@ func (lc *L4NetLBController) garbageCollectRBSNetLB(key string, svc *v1.Service) if err := updateServiceStatus(lc.ctx, svc, &v1.LoadBalancerStatus{}); err != nil { lc.ctx.Recorder(svc.Namespace).Eventf(svc, v1.EventTypeWarning, "DeleteLoadBalancer", - "Error reseting L4 External LoadBalancer status to empty, err: %v", err) + "Error resetting L4 External LoadBalancer status to empty, err: %v", err) result.Error = fmt.Errorf("Failed to reset L4 External LoadBalancer status, err: %w", err) return result } diff --git a/pkg/l4lb/l4netlbcontroller_test.go b/pkg/l4lb/l4netlbcontroller_test.go index 6ddd905649..4f87442dac 100644 --- a/pkg/l4lb/l4netlbcontroller_test.go +++ b/pkg/l4lb/l4netlbcontroller_test.go @@ -172,7 +172,7 @@ func checkBackendService(lc *L4NetLBController, svc *v1.Service) error { igName := lc.namer.InstanceGroup() for _, b := range bs.Backends { if !strings.Contains(b.Group, igName) { - return fmt.Errorf("Backend Ingstance Group Link mismatch: %s != %s", igName, b.Group) + return fmt.Errorf("Backend Instance Group Link mismatch: %s != %s", igName, b.Group) } } ig, err := lc.ctx.Cloud.GetInstanceGroup(igName, testGCEZone) @@ -271,7 +271,7 @@ func validateAnnotationsDeleted(svc *v1.Service) error { } } if len(unexpectedKeys) != 0 { - return fmt.Errorf("Unexpeceted annotations: %v, Service annotations %v", unexpectedKeys, svc.Annotations) + return fmt.Errorf("Unexpected annotations: %v, Service annotations %v", unexpectedKeys, svc.Annotations) } return nil } @@ -868,7 +868,7 @@ func TestHealthCheckWhenExternalTrafficPolicyWasUpdated(t *testing.T) { hcNameNonShared := lc.namer.L4HealthCheck(svc.Namespace, svc.Name, false) err = updateAndAssertExternalTrafficPolicy(newSvc, lc, v1.ServiceExternalTrafficPolicyTypeLocal, hcNameNonShared) if err != nil { - t.Errorf("Error asserthing nonshared health check %v", err) + t.Errorf("Error asserting nonshared health check %v", err) } // delete shared health check if is created, update service to Cluster and // check that non-shared health check was created @@ -877,7 +877,7 @@ func TestHealthCheckWhenExternalTrafficPolicyWasUpdated(t *testing.T) { // Update ExternalTrafficPolicy to Cluster check if shared HC was created err = updateAndAssertExternalTrafficPolicy(newSvc, lc, v1.ServiceExternalTrafficPolicyTypeCluster, hcNameShared) if err != nil { - t.Errorf("Error asserthing shared health check %v", err) + t.Errorf("Error asserting shared health check %v", err) } newSvc.DeletionTimestamp = &metav1.Time{} updateNetLBService(lc, newSvc) diff --git a/pkg/loadbalancers/certificates.go b/pkg/loadbalancers/certificates.go index f5fa7087ea..34bff37195 100644 --- a/pkg/loadbalancers/certificates.go +++ b/pkg/loadbalancers/certificates.go @@ -62,7 +62,7 @@ func (l7 *L7) checkSSLCert() error { func (l7 *L7) createSslCertificates(existingCerts, translatorCerts []*composite.SslCertificate) ([]*composite.SslCertificate, error) { var result []*composite.SslCertificate - existingCertsMap := getMapfromCertList(existingCerts) + existingCertsMap := getMapFromCertList(existingCerts) // mapping of currently configured certs visitedCertMap := make(map[string]string) @@ -126,7 +126,7 @@ func (l7 *L7) createSslCertificates(existingCerts, translatorCerts []*composite. return result, nil } -func getMapfromCertList(certs []*composite.SslCertificate) map[string]*composite.SslCertificate { +func getMapFromCertList(certs []*composite.SslCertificate) map[string]*composite.SslCertificate { if len(certs) == 0 { return nil } @@ -200,7 +200,7 @@ func (l7 *L7) deleteOldSSLCerts() { if len(l7.oldSSLCerts) == 0 { return } - certsMap := getMapfromCertList(l7.sslCerts) + certsMap := getMapFromCertList(l7.sslCerts) for _, cert := range l7.oldSSLCerts { if !l7.namer.IsCertNameForLB(cert.Name) && !l7.namer.IsLegacySSLCert(cert.Name) { // retain cert if it is managed by GCE(non-ingress) @@ -221,7 +221,7 @@ func (l7 *L7) deleteOldSSLCerts() { // Returns true if the input array of certs is identical to the certs in the L7 config. // Returns false if there is any mismatch func (l7 *L7) compareCerts(certLinks []string) bool { - certsMap := getMapfromCertList(l7.sslCerts) + certsMap := getMapFromCertList(l7.sslCerts) if len(certLinks) != len(certsMap) { klog.V(4).Infof("Loadbalancer has %d certs, target proxy has %d certs", len(certsMap), len(certLinks)) return false diff --git a/pkg/loadbalancers/features/features.go b/pkg/loadbalancers/features/features.go index 9a57a2c9a6..b12347fe2e 100644 --- a/pkg/loadbalancers/features/features.go +++ b/pkg/loadbalancers/features/features.go @@ -62,7 +62,7 @@ var ( meta.Regional: {FeatureL7ILB}, } - // All of these fields must be filled in to allow L7ILBVersions() to work + // All of these fields must be filled in for L7ILBVersions() to work // TODO(shance) Remove this entirely l7IlbVersions = *NewResourceVersions() ) diff --git a/pkg/loadbalancers/forwarding_rules.go b/pkg/loadbalancers/forwarding_rules.go index 1c1a7f7af1..a75c5702ff 100644 --- a/pkg/loadbalancers/forwarding_rules.go +++ b/pkg/loadbalancers/forwarding_rules.go @@ -284,7 +284,7 @@ func (l4 *L4) ensureForwardingRule(bsLink string, options gce.ILBOptions, existi } frDiff := cmp.Diff(existingFwdRule, fr) // If the forwarding rule pointed to a backend service which does not match the controller naming scheme, - // that resouce could be leaked. It is not being deleted here because that is a user-managed resource. + // that resource could be leaked. It is not being deleted here because that is a user-managed resource. klog.V(2).Infof("ensureForwardingRule: forwarding rule changed - Existing - %+v\n, New - %+v\n, Diff(-existing, +new) - %s\n. Deleting existing forwarding rule.", existingFwdRule, fr, frDiff) if err = l4.forwardingRules.Delete(existingFwdRule.Name); err != nil { return nil, err diff --git a/pkg/loadbalancers/l4_test.go b/pkg/loadbalancers/l4_test.go index d0d8636413..d8d2710a69 100644 --- a/pkg/loadbalancers/l4_test.go +++ b/pkg/loadbalancers/l4_test.go @@ -667,7 +667,7 @@ func TestHealthCheckFirewallDeletionWithNetLB(t *testing.T) { // Create NetLB Service netlbSvc := test.NewL4NetLBRBSService(8080) l4NetLB := NewL4NetLB(netlbSvc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - // make sure both ilb and netlb use the same l4 healtcheck instance + // make sure both ilb and netlb use the same l4 healthcheck instance l4NetLB.healthChecks = l4.healthChecks // create netlb resources @@ -1534,7 +1534,7 @@ func verifyHealthCheckFirewall(l4 *L4, nodeNames []string) error { hcFwName := l4.namer.L4HealthCheckFirewall(l4.Service.Namespace, l4.Service.Name, isSharedHC) hcFwDesc, err := utils.MakeL4LBFirewallDescription(utils.ServiceKeyFunc(l4.Service.Namespace, l4.Service.Name), "", meta.VersionGA, isSharedHC) if err != nil { - return fmt.Errorf("failed to calculate decsription for health check for service %v, error %v", l4.Service, err) + return fmt.Errorf("failed to calculate description for health check for service %v, error %v", l4.Service, err) } return verifyFirewall(l4, nodeNames, hcFwName, hcFwDesc, gce.L4LoadBalancerSrcRanges()) diff --git a/pkg/loadbalancers/l4netlb_test.go b/pkg/loadbalancers/l4netlb_test.go index eeea81c8f1..562f9ab0e4 100644 --- a/pkg/loadbalancers/l4netlb_test.go +++ b/pkg/loadbalancers/l4netlb_test.go @@ -167,7 +167,7 @@ func TestHealthCheckFirewallDeletionWithILB(t *testing.T) { l4NetLB := NewL4NetLB(netlbSvc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - // make sure both ilb and netlb use the same l4 healtcheck instance + // make sure both ilb and netlb use the same l4 healthcheck instance l4NetLB.healthChecks = l4ilb.healthChecks // create netlb resources @@ -440,7 +440,7 @@ func TestEnsureNetLBFirewallDestinations(t *testing.T) { } if reflect.DeepEqual(oldDestinationRanges, updatedFirewall.DestinationRanges) { - t.Errorf("DestinationRanges is not udpated. oldDestinationRanges:%v, updatedFirewall.DestinationRanges:%v", oldDestinationRanges, updatedFirewall.DestinationRanges) + t.Errorf("DestinationRanges is not updated. oldDestinationRanges:%v, updatedFirewall.DestinationRanges:%v", oldDestinationRanges, updatedFirewall.DestinationRanges) } } diff --git a/pkg/loadbalancers/l7s_test.go b/pkg/loadbalancers/l7s_test.go index ff68296160..49137dd42f 100644 --- a/pkg/loadbalancers/l7s_test.go +++ b/pkg/loadbalancers/l7s_test.go @@ -267,7 +267,7 @@ func TestDoNotGCWantedLB(t *testing.T) { } } -// This should not leak at all, but verfies existing behavior +// This should not leak at all, but verifies existing behavior // TODO: remove this test after the GC resource leaking is fixed. func TestGCToLeakLB(t *testing.T) { t.Parallel() diff --git a/pkg/loadbalancers/loadbalancers_test.go b/pkg/loadbalancers/loadbalancers_test.go index 624ce6c221..9bf110819a 100644 --- a/pkg/loadbalancers/loadbalancers_test.go +++ b/pkg/loadbalancers/loadbalancers_test.go @@ -921,7 +921,7 @@ func verifyCertAndProxyLink(expectCerts map[string]string, expectCertsProxy map[ } } - // httpsproxy needs to contain only the certs in expectCerts, nothing more, nothing less + // https proxy needs to contain only the certs in expectCerts, nothing more, nothing less key, err = composite.CreateKey(j.fakeGCE, j.feNamer.TargetProxy(namer_util.HTTPSProtocol), defaultScope) if err != nil { t.Fatal(err) @@ -1033,7 +1033,7 @@ func TestCreateBothLoadBalancers(t *testing.T) { } // Test StaticIP annotation behavior. -// When a non-existent StaticIP value is specified, ingress creation must fail. +// When a nonexistent StaticIP value is specified, ingress creation must fail. func TestStaticIP(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() @@ -1052,7 +1052,7 @@ func TestStaticIP(t *testing.T) { } if _, err := j.pool.Ensure(lbInfo); err == nil { - t.Fatalf("expected error ensuring ingress with non-existent static ip") + t.Fatalf("expected error ensuring ingress with nonexistent static ip") } // Create static IP err := j.fakeGCE.ReserveGlobalAddress(&compute.Address{Name: "teststaticip", Address: "1.2.3.4"}) diff --git a/pkg/loadbalancers/target_proxies.go b/pkg/loadbalancers/target_proxies.go index f478df67d9..b688a726bb 100644 --- a/pkg/loadbalancers/target_proxies.go +++ b/pkg/loadbalancers/target_proxies.go @@ -28,7 +28,7 @@ import ( ) const ( - // Every target https proxy accepts upto 10 ssl certificates. + // Every target https proxy accepts up to 10 ssl certificates. TargetProxyCertLimit = 10 ) diff --git a/pkg/metrics/features.go b/pkg/metrics/features.go index 71f8c6ecdc..1e2bc039a9 100644 --- a/pkg/metrics/features.go +++ b/pkg/metrics/features.go @@ -101,7 +101,7 @@ const ( customNamedNeg = feature("CustomNamedNEG") // negInSuccess feature specifies that syncers were created for the Neg negInSuccess = feature("NegInSuccess") - // negInError feature specifies that an error occuring in ensuring Neg Syncer + // negInError feature specifies that an error occurring in ensuring Neg Syncer negInError = feature("NegInError") l4ILBService = feature("L4ILBService") diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index 2a4645c665..ec10a82042 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -298,7 +298,7 @@ func (im *ControllerMetrics) DeleteL4NetLBService(svcKey string) { } // SetServiceAttachment adds sa state to the map to be counted during metrics computation. -// SetServiceAttachment implments PSCMetricsCollector. +// SetServiceAttachment implements PSCMetricsCollector. func (im *ControllerMetrics) SetServiceAttachment(saKey string, state pscmetrics.PSCState) { im.Lock() defer im.Unlock() @@ -310,7 +310,7 @@ func (im *ControllerMetrics) SetServiceAttachment(saKey string, state pscmetrics } // DeleteServiceAttachment removes sa state to the map to be counted during metrics computation. -// DeleteServiceAttachment implments PSCMetricsCollector. +// DeleteServiceAttachment implements PSCMetricsCollector. func (im *ControllerMetrics) DeleteServiceAttachment(saKey string) { im.Lock() defer im.Unlock() @@ -319,7 +319,7 @@ func (im *ControllerMetrics) DeleteServiceAttachment(saKey string) { } // SetService adds the service to the map to be counted during metrics computation. -// SetService implments PSCMetricsCollector. +// SetService implements PSCMetricsCollector. func (im *ControllerMetrics) SetService(serviceKey string) { im.Lock() defer im.Unlock() @@ -331,7 +331,7 @@ func (im *ControllerMetrics) SetService(serviceKey string) { } // DeleteService removes the service from the map to be counted during metrics computation. -// DeleteService implments PSCMetricsCollector. +// DeleteService implements PSCMetricsCollector. func (im *ControllerMetrics) DeleteService(serviceKey string) { im.Lock() defer im.Unlock() diff --git a/pkg/metrics/metrics_test.go b/pkg/metrics/metrics_test.go index c5def5371b..760de0308b 100644 --- a/pkg/metrics/metrics_test.go +++ b/pkg/metrics/metrics_test.go @@ -518,7 +518,7 @@ var ( cookieAffinity, backendConnectionDraining}, }, { - "non-existent pre-shared cert", + "nonexistent pre-shared cert", &v1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Namespace: defaultNamespace, diff --git a/pkg/neg/controller.go b/pkg/neg/controller.go index b04d5ddb85..15be81b184 100644 --- a/pkg/neg/controller.go +++ b/pkg/neg/controller.go @@ -652,7 +652,7 @@ func (c *Controller) mergeDefaultBackendServicePortInfoMap(key string, service * } // getCSMPortInfoMap gets the PortInfoMap for service and DestinationRules. -// If enableCSM = true, the controller will create NEGs for every port/subsets combinations for the DestinaitonRules. +// If enableCSM = true, the controller will create NEGs for every port/subsets combinations for the DestinationRules. // It will also create NEGs for all the ports of the service that referred by the DestinationRules. func (c *Controller) getCSMPortInfoMap(namespace, name string, service *apiv1.Service) (negtypes.PortInfoMap, negtypes.PortInfoMap, error) { destinationRulesPortInfoMap := make(negtypes.PortInfoMap) @@ -951,12 +951,12 @@ func gatherPortMappingFromService(svc *apiv1.Service) negtypes.SvcPortTupleSet { } // getDestinationRulesFromStore returns all DestinationRules that referring service svc. -// Please notice that a DestionationRule can point to a service in a different namespace. +// Please notice that a DestinationRule can point to a service in a different namespace. func getDestinationRulesFromStore(store cache.Store, svc *apiv1.Service, logger klog.Logger) (drs map[apimachinerytypes.NamespacedName]*istioV1alpha3.DestinationRule) { drs = make(map[apimachinerytypes.NamespacedName]*istioV1alpha3.DestinationRule) for _, obj := range store.List() { - drUnstructed := obj.(*unstructured.Unstructured) - targetServiceNamespace, drHost, dr, err := castToDestinationRule(drUnstructed) + drUnstructured := obj.(*unstructured.Unstructured) + targetServiceNamespace, drHost, dr, err := castToDestinationRule(drUnstructured) if err != nil { logger.Error(err, "Failed to cast Unstructured DestinationRule to DestinationRule") continue @@ -964,7 +964,7 @@ func getDestinationRulesFromStore(store cache.Store, svc *apiv1.Service, logger if targetServiceNamespace == svc.Namespace && drHost == svc.Name { // We want to return DestinationRule namespace but not the target service namespace. - drs[apimachinerytypes.NamespacedName{Namespace: drUnstructed.GetNamespace(), Name: drUnstructed.GetName()}] = dr + drs[apimachinerytypes.NamespacedName{Namespace: drUnstructured.GetNamespace(), Name: drUnstructured.GetName()}] = dr } } return diff --git a/pkg/neg/controller_test.go b/pkg/neg/controller_test.go index dda865f7d3..80bbf7ac76 100644 --- a/pkg/neg/controller_test.go +++ b/pkg/neg/controller_test.go @@ -401,7 +401,7 @@ func TestEnableNEGServiceWithL4ILB(t *testing.T) { t.Fatalf("Failed to update service lister: %v", err) } if err = controller.processService(svcKey); err != nil { - t.Fatalf("Failed to process updated L4 ILB srvice: %v", err) + t.Fatalf("Failed to process updated L4 ILB service: %v", err) } expectedPortInfoMap = negtypes.NewPortInfoMapForVMIPNEG(testServiceNamespace, testServiceName, controller.l4Namer, true) @@ -1097,7 +1097,7 @@ func TestMergeCSMPortInfoMap(t *testing.T) { defer controller.stop() n1s1 := newTestServiceCus(t, controller, "namespace1", "service1", []int32{80, 90}) n2s1 := newTestServiceCus(t, controller, "namespace2", "service1", []int32{90}) - ds1, usDr1 := newTestDestinationRule(t, controller, "namespac2", "test-destination-rule", "service1.namespace1", []string{"v1", "v2"}) + ds1, usDr1 := newTestDestinationRule(t, controller, "name-space2", "test-destination-rule", "service1.namespace1", []string{"v1", "v2"}) if err := controller.destinationRuleLister.Add(usDr1); err != nil { t.Fatal(err) } @@ -1354,7 +1354,7 @@ func TestEnqueueEndpoints(t *testing.T) { if list := informer.GetIndexer().List(); len(list) != 1 { t.Errorf("Got list - %v of size %d, want 1 element", list, len(list)) } - t.Logf("Checking for enqueue of endopoint create event") + t.Logf("Checking for enqueue of endpoint create event") ensureEndpointEnqueue(t, tc.expectedKey, controller) }) } @@ -1513,7 +1513,7 @@ func validateDestinationRuleAnnotationWithPortInfoMap(t *testing.T, usdr *unstru } // validateServiceStateAnnotationWithPortNameMap validates all aspects of the service annotation -// and also checks for custon names if specified in given portNameMap +// and also checks for custom names if specified in given portNameMap func validateServiceStateAnnotationWithPortNameMap(t *testing.T, svc *apiv1.Service, svcPorts []int32, namer negtypes.NetworkEndpointGroupNamer, portNameMap map[int32]string) { negStatus := validateServiceStateAnnotationExceptNames(t, svc, svcPorts) diff --git a/pkg/neg/manager.go b/pkg/neg/manager.go index 16b76031c9..720492e71c 100644 --- a/pkg/neg/manager.go +++ b/pkg/neg/manager.go @@ -200,7 +200,7 @@ func (manager *syncerManager) EnsureSyncers(namespace, name string, newPorts neg syncer, ok := manager.syncerMap[syncerKey] if !ok { - // To ensure that a NEG CR always exists during the lifecyle of a NEG, do not create a + // To ensure that a NEG CR always exists during the lifecycle of a NEG, do not create a // syncer for the NEG until the NEG CR is successfully created. This will reduce the // possibility of invalid states and reduces complexity of garbage collection if err := manager.ensureSvcNegCR(key, portInfo); err != nil { diff --git a/pkg/neg/manager_test.go b/pkg/neg/manager_test.go index ba097f349e..7286664205 100644 --- a/pkg/neg/manager_test.go +++ b/pkg/neg/manager_test.go @@ -1011,7 +1011,7 @@ func TestNegCRDuplicateCreations(t *testing.T) { portInfo := portInfoMap[negtypes.PortInfoMapKey{ServicePort: svcTuple1.Port, Subset: ""}] checkNegCR(t, &negs.Items[0], svcKey, svc1.UID, portInfo) // If update was unnecessary, the resource version should not change. - // If upate was necessary, the same CR should be used for an update so the resource + // If update was necessary, the same CR should be used for an update so the resource // version should be unchanged. API server will change resource version. if negs.Items[0].ResourceVersion != testNeg.ResourceVersion { t.Errorf("neg resource version should not be updated") @@ -1620,7 +1620,7 @@ func checkNegCR(t *testing.T, neg *negv1beta1.ServiceNetworkEndpointGroup, svcKe } if *ownerRefs[0].BlockOwnerDeletion != false { - t.Errorf("Expected neg owner ref not block owner deltion") + t.Errorf("Expected neg owner ref not block owner deletion") } } diff --git a/pkg/neg/readiness/poller.go b/pkg/neg/readiness/poller.go index fae529dca8..b367d544ce 100644 --- a/pkg/neg/readiness/poller.go +++ b/pkg/neg/readiness/poller.go @@ -39,7 +39,7 @@ const ( // retryDelay is the delay to retry health status polling. // GCE NEG API RPS quota is rate limited per every 100 seconds. - // Make this retry delay to match the ratelimiting interval. + // Make this retry delay to match the rate limiting interval. // More detail: https://cloud.google.com/compute/docs/api-rate-limits retryDelay = 100 * time.Second ) diff --git a/pkg/neg/readiness/poller_test.go b/pkg/neg/readiness/poller_test.go index 3837160aee..312efae367 100644 --- a/pkg/neg/readiness/poller_test.go +++ b/pkg/neg/readiness/poller_test.go @@ -561,7 +561,7 @@ func TestProcessHealthStatus(t *testing.T) { // processHealthStatus should not crash when pollMap does not have corresponding key. retry, err := poller.processHealthStatus(key, res) if retry != false { - t.Errorf("exepect retry == false, but got %v", retry) + t.Errorf("expect retry == false, but got %v", retry) } if err != nil { t.Errorf("expect err == nil, but got %v", err) diff --git a/pkg/neg/readiness/reflector.go b/pkg/neg/readiness/reflector.go index 4b0c6abd90..7789910711 100644 --- a/pkg/neg/readiness/reflector.go +++ b/pkg/neg/readiness/reflector.go @@ -48,7 +48,7 @@ const ( // negNotReadyReason is the pod condition reason when pod is not healthy in NEG negNotReadyReason = "LoadBalancerNegNotReady" // unreadyTimeout is the timeout for health status feedback for pod readiness. If load balancer health - // check is still not showing as Healthy for long than the time out since the pod is created. Skip wating and mark + // check is still not showing as Healthy for long than the time out since the pod is created. Skip waiting and mark // the pod as load balancer ready. // This is a fail-safe in case that should be longer than any reasonable amount of time for the healthy infrastructure catch up. unreadyTimeout = 10 * time.Minute @@ -177,7 +177,7 @@ func (r *readinessReflector) syncPod(podKey string, neg, backendService *meta.Ke func (r *readinessReflector) getExpectedNegCondition(pod *v1.Pod, neg, backendService *meta.Key) v1.PodCondition { expectedCondition := v1.PodCondition{Type: shared.NegReadinessGate} if pod == nil { - expectedCondition.Message = fmt.Sprintf("Unkown status for unkown pod.") + expectedCondition.Message = fmt.Sprintf("Unknown status for unknown pod.") return expectedCondition } diff --git a/pkg/neg/readiness/utils_test.go b/pkg/neg/readiness/utils_test.go index 45df7f527b..570dd08211 100644 --- a/pkg/neg/readiness/utils_test.go +++ b/pkg/neg/readiness/utils_test.go @@ -100,7 +100,7 @@ func TestNegReadinessConditionStatus(t *testing.T) { } { condition, ok := NegReadinessConditionStatus(tc.pod) if tc.expectCondition != condition { - t.Errorf("For test case %q, expect condiion %v, but got %v", tc.desc, tc.expectCondition, condition) + t.Errorf("For test case %q, expect condition %v, but got %v", tc.desc, tc.expectCondition, condition) } if tc.expectFound != ok { diff --git a/pkg/neg/syncers/endpoints_calculator.go b/pkg/neg/syncers/endpoints_calculator.go index 37e51f2f21..bf73dc6a03 100644 --- a/pkg/neg/syncers/endpoints_calculator.go +++ b/pkg/neg/syncers/endpoints_calculator.go @@ -112,7 +112,7 @@ func (l *LocalL4ILBEndpointsCalculator) CalculateEndpoints(eds []types.Endpoints // ClusterL4ILBEndpointGetter implements the NetworkEndpointsCalculator interface. // It exposes methods to calculate Network endpoints for GCE_VM_IP NEGs when the service // uses "ExternalTrafficPolicy: Cluster" mode This is the default mode. -// In this mode, the endpoints of the NEG are calculated by selecting nodes at random. Upto 25(subset size limit in this +// In this mode, the endpoints of the NEG are calculated by selecting nodes at random. Up to 25(subset size limit in this // mode) are selected. type ClusterL4ILBEndpointsCalculator struct { // nodeLister is used for listing all the nodes in the cluster when calculating the subset. diff --git a/pkg/neg/syncers/endpoints_calculator_test.go b/pkg/neg/syncers/endpoints_calculator_test.go index acba48b202..71bb22dc90 100644 --- a/pkg/neg/syncers/endpoints_calculator_test.go +++ b/pkg/neg/syncers/endpoints_calculator_test.go @@ -257,7 +257,7 @@ func deleteNodes(t *testing.T, nodeNames []string, nodeIndexer cache.Indexer) { for _, nodeName := range nodeNames { node, exists, err := nodeIndexer.GetByKey(nodeName) if err != nil || !exists { - t.Errorf("Could not lookuo node %q, err - %v", nodeName, err) + t.Errorf("Could not lookup node %q, err - %v", nodeName, err) continue } if err := nodeIndexer.Delete(node); err != nil { diff --git a/pkg/neg/syncers/retry_handler.go b/pkg/neg/syncers/retry_handler.go index a1fb14fdc9..2d9c0bf7b6 100644 --- a/pkg/neg/syncers/retry_handler.go +++ b/pkg/neg/syncers/retry_handler.go @@ -59,7 +59,7 @@ func NewDelayRetryHandler(retryFunc func(), backoff backoffHandler) *backoffRetr } // Retry triggers retry with back off -// At any time, there is only one onging retry allowed. +// At any time, there is only one ongoing retry allowed. func (h *backoffRetryHandler) Retry() error { h.stateLock.Lock() defer h.stateLock.Unlock() diff --git a/pkg/neg/syncers/subsets_test.go b/pkg/neg/syncers/subsets_test.go index c739905c87..466d3cd8b1 100644 --- a/pkg/neg/syncers/subsets_test.go +++ b/pkg/neg/syncers/subsets_test.go @@ -77,7 +77,7 @@ func TestFewerNodes(t *testing.T) { } } -// Tests the case where there is unever distribution of nodes in various zones. The goal is to select as many nodes as +// Tests the case where there is uneven distribution of nodes in various zones. The goal is to select as many nodes as // possible in all cases. func TestUnevenNodesInZones(t *testing.T) { t.Parallel() diff --git a/pkg/neg/syncers/syncer.go b/pkg/neg/syncers/syncer.go index b74291cd31..37a94f3239 100644 --- a/pkg/neg/syncers/syncer.go +++ b/pkg/neg/syncers/syncer.go @@ -81,7 +81,7 @@ func (s *syncer) Start() error { return fmt.Errorf("NEG syncer for %s is shutting down. ", s.NegSyncerKey.String()) } - s.logger.V(2).Info("Starting NEG syncer for service port", "negSynckerKey", s.NegSyncerKey.String()) + s.logger.V(2).Info("Starting NEG syncer for service port", "negSyncerKey", s.NegSyncerKey.String()) s.init() go func() { for { @@ -90,16 +90,16 @@ func (s *syncer) Start() error { err := s.core.sync() if err != nil { delay, retryErr := s.backoff.NextRetryDelay() - retryMesg := "" + retryMsg := "" if retryErr == ErrRetriesExceeded { - retryMesg = "(will not retry)" + retryMsg = "(will not retry)" } else { retryCh = s.clock.After(delay) - retryMesg = "(will retry)" + retryMsg = "(will retry)" } if svc := getService(s.serviceLister, s.Namespace, s.Name); svc != nil { - s.recorder.Eventf(svc, apiv1.EventTypeWarning, "SyncNetworkEndpointGroupFailed", "Failed to sync NEG %q %s: %v", s.NegSyncerKey.NegName, retryMesg, err) + s.recorder.Eventf(svc, apiv1.EventTypeWarning, "SyncNetworkEndpointGroupFailed", "Failed to sync NEG %q %s: %v", s.NegSyncerKey.NegName, retryMsg, err) } } else { s.backoff.ResetRetryDelay() @@ -111,7 +111,7 @@ func (s *syncer) Start() error { s.stateLock.Lock() s.shuttingDown = false s.stateLock.Unlock() - s.logger.V(2).Info("Stopping NEG syncer", "negSynckerKey", s.NegSyncerKey.String()) + s.logger.V(2).Info("Stopping NEG syncer", "negSyncerKey", s.NegSyncerKey.String()) return } case <-retryCh: @@ -133,7 +133,7 @@ func (s *syncer) Stop() { s.stateLock.Lock() defer s.stateLock.Unlock() if !s.stopped { - s.logger.V(2).Info("Stopping NEG syncer for service port", "negSynckerKey", s.NegSyncerKey.String()) + s.logger.V(2).Info("Stopping NEG syncer for service port", "negSyncerKey", s.NegSyncerKey.String()) s.stopped = true s.shuttingDown = true close(s.syncCh) @@ -142,7 +142,7 @@ func (s *syncer) Stop() { func (s *syncer) Sync() bool { if s.IsStopped() { - s.logger.Info("NEG syncer is already stopped.", "negSynckerKey", s.NegSyncerKey.String()) + s.logger.Info("NEG syncer is already stopped.", "negSyncerKey", s.NegSyncerKey.String()) return false } select { diff --git a/pkg/neg/syncers/transaction_test.go b/pkg/neg/syncers/transaction_test.go index 3fbef9f631..9d944c36f3 100644 --- a/pkg/neg/syncers/transaction_test.go +++ b/pkg/neg/syncers/transaction_test.go @@ -1404,7 +1404,7 @@ func TestUnknownNodes(t *testing.T) { } if !reflect.DeepEqual(expectedEndpoints, out) { - t.Errorf("endpoints were modified after syncInteral:\ngot %+v,\n expected %+v", out, expectedEndpoints) + t.Errorf("endpoints were modified after syncInternal:\ngot %+v,\n expected %+v", out, expectedEndpoints) } } } @@ -1628,7 +1628,7 @@ func checkNegDescription(t *testing.T, syncer *transactionSyncer, desc string) { } } -// checkCondition looks for the condition of the specified type and validates it has has the expectedStatus. +// checkCondition looks for the condition of the specified type and validates it has the expectedStatus. // It will also validate that the transition timestamp is updated as expected, which is specified by expectTransitionTSUpdate. func checkCondition(t *testing.T, conditions []negv1beta1.Condition, conditionType string, previousTS metav1.Time, expectedStatus corev1.ConditionStatus, expectTransitionTSUpdate bool) metav1.Time { var condition negv1beta1.Condition @@ -1704,7 +1704,7 @@ func createNegCR(testNegName string, creationTS metav1.Time, populateInitialized return neg } -// checkNegCR validates the the NegObjectReferences and the LastSyncTime. It will not validate the conditions fields but ensures at most 2 conditions exist +// checkNegCR validates the NegObjectReferences and the LastSyncTime. It will not validate the conditions fields but ensures at most 2 conditions exist func checkNegCR(t *testing.T, negCR *negv1beta1.ServiceNetworkEndpointGroup, previousLastSyncTime metav1.Time, expectZones sets.String, expectedNegRefs map[string]negv1beta1.NegObjectReference, expectSyncTimeUpdate, expectErr bool) { if expectSyncTimeUpdate && !previousLastSyncTime.Before(&negCR.Status.LastSyncTime) { t.Errorf("Expected Neg CR to have an updated LastSyncTime") diff --git a/pkg/neg/syncers/utils.go b/pkg/neg/syncers/utils.go index f9a60e5db4..c9293e539a 100644 --- a/pkg/neg/syncers/utils.go +++ b/pkg/neg/syncers/utils.go @@ -205,7 +205,7 @@ func ensureNetworkEndpointGroup(svcNamespace, svcName, negName, zone, negService var err error neg, err = cloud.GetNetworkEndpointGroup(negName, zone, version) if err != nil { - klog.Errorf("Error while retriving %q in zone %q: %v after initialization", negName, zone, err) + klog.Errorf("Error while retrieving %q in zone %q: %v after initialization", negName, zone, err) return negRef, err } } @@ -251,7 +251,7 @@ func toZoneNetworkEndpointMap(eds []negtypes.EndpointsData, zoneGetter negtypes. klog.V(2).Infof("Endpoint %q in Endpoints %s/%s does not have a Pod as the TargetRef object. Skipping", endpointAddress.Addresses, ed.Meta.Namespace, ed.Meta.Name) continue } - // Skip if the endpoint's pod not matching the subset lables. + // Skip if the endpoint's pod not matching the subset labels. if !shouldPodBeInDestinationRuleSubset(podLister, endpointAddress.TargetRef.Namespace, endpointAddress.TargetRef.Name, subsetLabels) { continue } @@ -400,7 +400,7 @@ func shouldPodBeInNeg(podLister cache.Indexer, namespace, name string) bool { return true } -// shouldPodBeInDestinationRuleSubset return ture if pod match the DestinationRule subset lables. +// shouldPodBeInDestinationRuleSubset return true if pod match the DestinationRule subset labels. func shouldPodBeInDestinationRuleSubset(podLister cache.Indexer, namespace, name string, subsetLabels string) bool { if podLister == nil { return false diff --git a/pkg/neg/syncers/utils_test.go b/pkg/neg/syncers/utils_test.go index 0c9b62bdeb..8a4e21078b 100644 --- a/pkg/neg/syncers/utils_test.go +++ b/pkg/neg/syncers/utils_test.go @@ -568,7 +568,7 @@ func TestToZoneNetworkEndpointMapUtil(t *testing.T) { func TestRetrieveExistingZoneNetworkEndpointMap(t *testing.T) { zoneGetter := negtypes.NewFakeZoneGetter() - negCloud := negtypes.NewFakeNetworkEndpointGroupCloud("test-subnetwork", "test-newtork") + negCloud := negtypes.NewFakeNetworkEndpointGroupCloud("test-subnetwork", "test-network") negName := "test-neg-name" irrelevantNegName := "irrelevant" testIP1 := "1.2.3.4" diff --git a/pkg/neg/types/cloudprovideradapter.go b/pkg/neg/types/cloudprovideradapter.go index d80762028a..4503c2999b 100644 --- a/pkg/neg/types/cloudprovideradapter.go +++ b/pkg/neg/types/cloudprovideradapter.go @@ -54,7 +54,7 @@ type cloudProviderAdapter struct { subnetworkURL string } -// GetNetworkEndpointGroup inmplements NetworkEndpointGroupCloud. +// GetNetworkEndpointGroup implements NetworkEndpointGroupCloud. func (a *cloudProviderAdapter) GetNetworkEndpointGroup(name string, zone string, version meta.Version) (*composite.NetworkEndpointGroup, error) { return composite.GetNetworkEndpointGroup(a.c, meta.ZonalKey(name, zone), version) diff --git a/pkg/neg/types/interfaces.go b/pkg/neg/types/interfaces.go index e34837730f..7f731207b2 100644 --- a/pkg/neg/types/interfaces.go +++ b/pkg/neg/types/interfaces.go @@ -67,7 +67,7 @@ type NegSyncer interface { type NegSyncerManager interface { // EnsureSyncer ensures corresponding syncers are started and stops any unnecessary syncer // portMap is a map of ServicePort Port to TargetPort. Returns counts of successful Neg syncers - // and failed Neg sycner creations + // and failed Neg syncer creations EnsureSyncers(namespace, name string, portMap PortInfoMap) (int, int, error) // StopSyncer stops all syncers related to the service. This call is asynchronous. It will not wait for all syncers to stop. StopSyncer(namespace, name string) diff --git a/pkg/neg/types/types.go b/pkg/neg/types/types.go index 91f0a18ccf..cc4418739c 100644 --- a/pkg/neg/types/types.go +++ b/pkg/neg/types/types.go @@ -127,7 +127,7 @@ type PortInfo struct { NegName string // ReadinessGate indicates if the NEG associated with the port has NEG readiness gate enabled // This is enabled with service port is reference by ingress. - // If the service port is only exposed as stand alone NEG, it should not be enbled. + // If the service port is only exposed as stand alone NEG, it should not be enabled. ReadinessGate bool // EpCalculatorMode indicates if the endpoints for the NEG associated with this port need to // be selected at random(L4ClusterMode), or by following service endpoints(L4LocalMode). @@ -188,7 +188,7 @@ func NewPortInfoMapForVMIPNEG(namespace, name string, namer namer.L4ResourcesNam return ret } -// NewPortInfoMapWithDestinationRule create PortInfoMap based on a gaven DesinationRule. +// NewPortInfoMapWithDestinationRule create PortInfoMap based on a given DestinationRule. // Return error message if the DestinationRule contains duplicated subsets. func NewPortInfoMapWithDestinationRule(namespace, name string, svcPortTupleSet SvcPortTupleSet, namer NetworkEndpointGroupNamer, readinessGate bool, destinationRule *istioV1alpha3.DestinationRule) (PortInfoMap, error) { diff --git a/pkg/neg/types/types_test.go b/pkg/neg/types/types_test.go index 2824984127..1ed496d0b2 100644 --- a/pkg/neg/types/types_test.go +++ b/pkg/neg/types/types_test.go @@ -612,7 +612,7 @@ func TestEndpointsDataFromEndpoints(t *testing.T) { ValidateAddressDataForEndpointsAddresses(endpointsData[i].Addresses, subset.Addresses, true, t) ValidateAddressDataForEndpointsAddresses(endpointsData[i].Addresses, subset.NotReadyAddresses, false, t) if len(endpointsData[i].Addresses) != len(subset.Addresses)+len(subset.NotReadyAddresses) { - t.Errorf("Unexpected len of endpointsData adresses, got %d, expected %d", len(endpointsData[i].Addresses), len(subset.Addresses)+len(subset.NotReadyAddresses)) + t.Errorf("Unexpected len of endpointsData addresses, got %d, expected %d", len(endpointsData[i].Addresses), len(subset.Addresses)+len(subset.NotReadyAddresses)) } } } @@ -758,7 +758,7 @@ func TestEndpointsDataFromEndpointSlices(t *testing.T) { } } if len(endpointsData[i].Addresses) != len(slice.Endpoints)-terminatingEndpointsNumber { - t.Errorf("Unexpected len of endpointsData adresses, got %d, expected %d", len(endpointsData[i].Addresses), len(slice.Endpoints)-1) + t.Errorf("Unexpected len of endpointsData addresses, got %d, expected %d", len(endpointsData[i].Addresses), len(slice.Endpoints)-1) } } } diff --git a/pkg/neg/utils.go b/pkg/neg/utils.go index b1b0c30d07..2448867e5d 100644 --- a/pkg/neg/utils.go +++ b/pkg/neg/utils.go @@ -28,7 +28,7 @@ import ( "k8s.io/ingress-gce/pkg/neg/types" ) -// NegSyncerType represents the the neg syncer type +// NegSyncerType represents the neg syncer type type NegSyncerType string // negServicePorts returns the SvcPortTupleSet that matches the exposed service port in the NEG annotation. @@ -55,7 +55,7 @@ func negServicePorts(ann *annotations.NegAnnotation, knownSvcTupleSet types.SvcP } // castToDestinationRule cast Unstructured obj to istioV1alpha3.DestinationRule -// Return targetServiceNamespace, targetSeriveName(DestinationRule.Host), DestionationRule and error. +// Return targetServiceNamespace, targetServiceName(DestinationRule.Host), DestinationRule and error. func castToDestinationRule(drus *unstructured.Unstructured) (string, string, *istioV1alpha3.DestinationRule, error) { drJSON, err := json.Marshal(drus.Object["spec"]) if err != nil { diff --git a/pkg/psc/controller.go b/pkg/psc/controller.go index 511026af14..3185ad835e 100644 --- a/pkg/psc/controller.go +++ b/pkg/psc/controller.go @@ -413,7 +413,7 @@ func (c *Controller) garbageCollectServiceAttachments() { } } -// deleteServiceAttachment attemps to delete the GCE Service Attachment resource +// deleteServiceAttachment attempts to delete the GCE Service Attachment resource // that corresponds to the provided CR. If successful, the finalizer on the CR // will be removed. func (c *Controller) deleteServiceAttachment(sa *sav1.ServiceAttachment) { @@ -621,7 +621,7 @@ func (c *Controller) ensureSAFinalizerRemoved(cr *sav1.ServiceAttachment) error // for a K8s Service func validateResourceReference(ref v1.TypedLocalObjectReference) error { if ref.APIGroup != nil && *ref.APIGroup != "" { - return fmt.Errorf("invalid resource reference: %s, apiGroup must be emptry or nil", *ref.APIGroup) + return fmt.Errorf("invalid resource reference: %s, apiGroup must be empty or nil", *ref.APIGroup) } if strings.ToLower(ref.Kind) != svcKind { @@ -638,7 +638,7 @@ func needsUpdate(existingSA, desiredSA *ga.ServiceAttachment) (bool, error) { // be the same as the one that eventually gets stored on the GCE object. For example // the controller takes the forwarding rule from the GA FR resource, however if the GCE // SA uses the Beta FR resource the self links will be different though the resource is - // the same. The same is true for the subnets. Due to this discrepency the GCE SA cannot + // the same. The same is true for the subnets. Due to this discrepancy the GCE SA cannot // be compared with a reflect.DeepEqual. // The TargetService on the GCE Service Attachment is the self link to the URL of the producer diff --git a/pkg/psc/controller_test.go b/pkg/psc/controller_test.go index 249c9273d3..c6269b595e 100644 --- a/pkg/psc/controller_test.go +++ b/pkg/psc/controller_test.go @@ -353,7 +353,7 @@ func TestServiceAttachmentConsumers(t *testing.T) { saName := "my-sa" svcName := "my-service" - saUID := "serivce-attachment-uid" + saUID := "service-attachment-uid" frIPAddr := "1.2.3.4" controller := newTestController("ZONAL") gceSAName := controller.saNamer.ServiceAttachment(testNamespace, saName, saUID) @@ -435,7 +435,7 @@ func TestServiceAttachmentUpdate(t *testing.T) { saName := "my-sa" svcName := "my-service" otherServiceName := "my-other-service" - saUID := "serivce-attachment-uid" + saUID := "service-attachment-uid" frIPAddr := "1.2.3.4" subnet1 := "subnet-1" @@ -683,7 +683,7 @@ func TestNeedsUpdate(t *testing.T) { func TestServiceAttachmentGarbageCollection(t *testing.T) { svcNamePrefix := "my-service" - saUIDPrefix := "serivce-attachment-uid" + saUIDPrefix := "service-attachment-uid" frIPAddr := "1.2.3.4" testcases := []struct { @@ -778,7 +778,7 @@ func TestServiceAttachmentGarbageCollection(t *testing.T) { t.Fatalf("failed to update service attachment to client: %q", err) } - // sync the controller cache to have have current set of serviceAttachments + // sync the controller cache to have current set of serviceAttachments syncServiceAttachmentLister(controller) if tc.getError != nil || tc.deleteError != nil { @@ -1271,7 +1271,7 @@ func validateSAStatus(status sav1.ServiceAttachmentStatus, sa *ga.ServiceAttachm return nil } -// verifyServiceAttachmentFinalizer verfies that the provided ServiceAttachment CR +// verifyServiceAttachmentFinalizer verifies that the provided ServiceAttachment CR // has the ServiceAttachmentFinalizerKey, otherwise it will return an error func verifyServiceAttachmentFinalizer(cr *sav1.ServiceAttachment) error { finalizers := cr.GetFinalizers() @@ -1302,7 +1302,7 @@ func syncServiceAttachmentLister(controller *Controller) error { return nil } -// verifyServiceAttachmentCRDeletion will verify that the provicded ServiceAttachment CR +// verifyServiceAttachmentCRDeletion will verify that the provided ServiceAttachment CR // does not have the service attachment finalizer and that the deletion timestamp has been // set func verifyServiceAttachmentCRDeletion(controller *Controller, sa *sav1.ServiceAttachment) error { @@ -1325,7 +1325,7 @@ func verifyServiceAttachmentCRDeletion(controller *Controller, sa *sav1.ServiceA } // verifyGCEServiceAttachmentDeletion verifies that the provided CR's corresponding GCE -// Service Attachmen resource has been deleted +// Service Attachment resource has been deleted func verifyGCEServiceAttachmentDeletion(controller *Controller, sa *sav1.ServiceAttachment) error { gceSAName := controller.saNamer.ServiceAttachment(sa.Namespace, sa.Name, string(sa.UID)) gceSA, err := getServiceAttachment(controller.cloud, gceSAName) diff --git a/pkg/psc/metrics/types.go b/pkg/psc/metrics/types.go index 0407b99e62..f0fc1bf37d 100644 --- a/pkg/psc/metrics/types.go +++ b/pkg/psc/metrics/types.go @@ -16,7 +16,7 @@ limitations under the License. package metrics -// PSCState tracks service attachment and psc feautre usage +// PSCState tracks service attachment and psc feature usage type PSCState struct { // InSuccess specifies if the ServiceAttachment was successfully created InSuccess bool diff --git a/pkg/ratelimit/ratelimit.go b/pkg/ratelimit/ratelimit.go index ec37d81d21..95e6cab7cf 100644 --- a/pkg/ratelimit/ratelimit.go +++ b/pkg/ratelimit/ratelimit.go @@ -106,7 +106,7 @@ func (grl *GCERateLimiter) Accept(ctx context.Context, key *cloud.RateLimitKey) // associated with the passed in key. func (grl *GCERateLimiter) rateLimitImpl(key *cloud.RateLimitKey) flowcontrol.RateLimiter { // Since the passed in key will have the ProjectID field filled in, we need to - // create a copy which does not, so that retreiving the rate limiter implementation + // create a copy which does not, so that retrieving the rate limiter implementation // through the map works as expected. keyCopy := cloud.RateLimitKey{ ProjectID: "", diff --git a/pkg/storage/configmaps.go b/pkg/storage/configmaps.go index a963e468c5..0a9cb98ec5 100644 --- a/pkg/storage/configmaps.go +++ b/pkg/storage/configmaps.go @@ -182,7 +182,7 @@ func (a *APIServerconfigMapStore) Delete(obj interface{}) error { func (a *APIServerconfigMapStore) GetByKey(key string) (item interface{}, exists bool, err error) { nsName := strings.Split(key, "/") if len(nsName) != 2 { - return nil, false, fmt.Errorf("failed to get key %v, unexpecte format, expecting ns/name", key) + return nil, false, fmt.Errorf("failed to get key %v, unexpected format, expecting ns/name", key) } ns, name := nsName[0], nsName[1] cfg, err := a.client.CoreV1().ConfigMaps(ns).Get(context.TODO(), name, metav1.GetOptions{}) diff --git a/pkg/storage/configmaps_test.go b/pkg/storage/configmaps_test.go index 6a072cd8e9..e29550530a 100644 --- a/pkg/storage/configmaps_test.go +++ b/pkg/storage/configmaps_test.go @@ -65,7 +65,7 @@ func TestCreateOnlyConfigMap(t *testing.T) { } } -func TestFakeConfigMapVaule(t *testing.T) { +func TestFakeConfigMapValue(t *testing.T) { vault := NewFakeConfigMapVault(api.NamespaceSystem, "ingress-uid") // Get value from an empty vault. val, exists, err := vault.Get(UIDDataKey) diff --git a/pkg/storage/doc.go b/pkg/storage/doc.go index 492d30a49b..9d32a382ba 100644 --- a/pkg/storage/doc.go +++ b/pkg/storage/doc.go @@ -19,7 +19,7 @@ limitations under the License. // 1. There is only so much information we can pack into 64 chars allowed // by GCE for resource names. // 2. An Ingress controller cannot assume total control over a project, in -// fact in a majority of cases (ubernetes, tests, multiple gke clusters in +// fact in a majority of cases (kubernetes, tests, multiple gke clusters in // same project) there *will* be multiple controllers in a project. // 3. If the Ingress controller pod is killed, an Ingress is deleted while // the pod is down, and then the controller is re-scheduled on another node, diff --git a/pkg/translator/healthchecks.go b/pkg/translator/healthchecks.go index 1d8a522540..d2bb32b837 100644 --- a/pkg/translator/healthchecks.go +++ b/pkg/translator/healthchecks.go @@ -77,7 +77,7 @@ type HealthCheck struct { ForNEG bool ForILB bool - // As the {HTTP, HTTPS, HTTP2} settings are identical, we mantain the + // As the {HTTP, HTTPS, HTTP2} settings are identical, we maintain the // settings at the outer-level and copy into the appropriate struct // in the HealthCheck embedded struct (see `merge()`) when getting the // compute struct back. diff --git a/pkg/translator/translator.go b/pkg/translator/translator.go index c8a8efc9c3..c0ff81ce47 100644 --- a/pkg/translator/translator.go +++ b/pkg/translator/translator.go @@ -178,7 +178,7 @@ const hostRulePrefix = "host" // deleted, we need to find all host PathMatchers that have the backend // and remove the mapping. When a new path is added to a host (happens // more frequently than service deletion) we just need to lookup the 1 -// pathmatcher of the host. +// path matcher of the host. func ToCompositeURLMap(g *utils.GCEURLMap, namer namer.IngressFrontendNamer, key *meta.Key) *composite.UrlMap { defaultBackendName := g.DefaultBackend.BackendName() key.Name = defaultBackendName diff --git a/pkg/translator/translator_test.go b/pkg/translator/translator_test.go index 36bcc50894..8ef84c89a9 100644 --- a/pkg/translator/translator_test.go +++ b/pkg/translator/translator_test.go @@ -147,7 +147,7 @@ func TestToRedirectUrlMap(t *testing.T) { expect: nil, }, { - desc: "Disabled with with no response code set", + desc: "Disabled with no response code set", fc: &frontendconfigv1beta1.FrontendConfig{Spec: frontendconfigv1beta1.FrontendConfigSpec{RedirectToHttps: &frontendconfigv1beta1.HttpsRedirectConfig{Enabled: false}}}, expect: nil, }, diff --git a/pkg/utils/namer/namer.go b/pkg/utils/namer/namer.go index 6a2d2ff9f6..883aad1ebd 100644 --- a/pkg/utils/namer/namer.go +++ b/pkg/utils/namer/namer.go @@ -81,7 +81,7 @@ const ( maxNEGDescriptiveLabel = 38 // maxNEGDescriptiveLabelASM is the max length for namespace, name, - // port and DestinationRule subset for neg name. It use one more hypen + // port and DestinationRule subset for neg name. It use one more hyphen // connector compared to maxNEGDescriptiveLabel maxNEGDescriptiveLabelASM = maxNEGDescriptiveLabel - 1 @@ -204,7 +204,7 @@ func (n *Namer) Firewall() string { func truncate(key string) string { if len(key) > nameLenLimit { // GCE requires names to end with an alphanumeric, but allows - // characters like '-', so make sure the trucated name ends + // characters like '-', so make sure the truncated name ends // legally. return fmt.Sprintf("%v%v", key[:nameLenLimit], alphaNumericChar) } @@ -344,7 +344,7 @@ func (n *Namer) FirewallRule() string { func (n *Namer) LoadBalancer(key string) LoadBalancerName { // TODO: Pipe the clusterName through, for now it saves code churn // to just grab it globally, especially since we haven't decided how - // to handle namespace conflicts in the Ubernetes context. + // to handle namespace conflicts in the Kubernetes context. parts := strings.Split(key, clusterNameDelimiter) scrubbedName := strings.Replace(key, "/", "-", -1) clusterName := n.UID() diff --git a/pkg/utils/namer/serviceattachmentnamer.go b/pkg/utils/namer/serviceattachmentnamer.go index e490297301..2773cdf57d 100644 --- a/pkg/utils/namer/serviceattachmentnamer.go +++ b/pkg/utils/namer/serviceattachmentnamer.go @@ -24,7 +24,7 @@ const ( // maxSADescriptiveLabel is the max length for prefix, namespace, and name for // service attachment. 63 - 1 (naming schema version prefix) // - 2 (service attachment identifier prefix) - 8 (truncated kube system id) - 8 (suffix hash) - // - 5 (hyphn connectors) = 39 + // - 5 (hyphen connectors) = 39 maxSADescriptiveLabel = 39 // serviceAttachmentPrefix is the prefix used in service attachment naming scheme diff --git a/pkg/utils/slice/slice.go b/pkg/utils/slice/slice.go index fdf3a5bed6..857b5144d1 100644 --- a/pkg/utils/slice/slice.go +++ b/pkg/utils/slice/slice.go @@ -17,7 +17,7 @@ limitations under the License. package slice // ContainsString checks if a given slice of strings contains the provided string. -// If a modifier func is provided, it is called with the slice item before the comparation. +// If a modifier func is provided, it is called with the slice item before the comparison. func ContainsString(slice []string, s string, modifier func(s string) string) bool { for _, item := range slice { if item == s { diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 674882647e..3cdc2e63d2 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -61,7 +61,7 @@ const ( Create // Update used to record updates in a sync pool. Update - // Delete used to record deltions from a sync pool. + // Delete used to record deletions from a sync pool. Delete // AddInstances used to record a call to AddInstances. AddInstances @@ -409,7 +409,7 @@ var ( CandidateNodesPredicate = func(node *api_v1.Node) bool { return nodePredicateInternal(node, false, false) } - // CandidateNodesPredicateIncludeUnreadyExcludeUpgradingNodes selects all nodes except ones that are upgradind and/or have any exclude labels. This function tolerates unready nodes. + // CandidateNodesPredicateIncludeUnreadyExcludeUpgradingNodes selects all nodes except ones that are upgrading and/or have any exclude labels. This function tolerates unready nodes. // TODO(prameshj) - Once the kubernetes/kubernetes Predicate function includes Unready nodes and the GKE nodepool code sets exclude labels on upgrade, this can be replaced with CandidateNodesPredicate. CandidateNodesPredicateIncludeUnreadyExcludeUpgradingNodes = func(node *api_v1.Node) bool { return nodePredicateInternal(node, true, true) @@ -718,7 +718,7 @@ func LegacyForwardingRuleName(svc *api_v1.Service) string { } // L4LBResourceDescription stores the description fields for L4 ILB or NetLB resources. -// This is useful to indetify which resources correspond to which L4 LB service. +// This is useful to identify which resources correspond to which L4 LB service. type L4LBResourceDescription struct { // ServiceName indicates the name of the service the resource is for. ServiceName string `json:"networking.gke.io/service-name"` diff --git a/pkg/utils/utils_test.go b/pkg/utils/utils_test.go index 939f4efb95..d5474f319b 100644 --- a/pkg/utils/utils_test.go +++ b/pkg/utils/utils_test.go @@ -529,7 +529,7 @@ func TestGetNodeConditionPredicate(t *testing.T) { }, expectAccept: false, expectAcceptByUnreadyNodePredicate: true, - name: "uneady node", + name: "unready node", }, { node: api_v1.Node{