diff --git a/cmd/glbc/main.go b/cmd/glbc/main.go index a84dc2e0f8..90004bab8c 100644 --- a/cmd/glbc/main.go +++ b/cmd/glbc/main.go @@ -26,7 +26,7 @@ import ( flag "github.com/spf13/pflag" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/ingress-gce/pkg/frontendconfig" - "k8s.io/ingress-gce/pkg/healthchecks" + "k8s.io/ingress-gce/pkg/healthchecks_l4" "k8s.io/ingress-gce/pkg/ingparams" "k8s.io/ingress-gce/pkg/l4lb" "k8s.io/ingress-gce/pkg/psc" @@ -276,7 +276,7 @@ func runControllers(ctx *ingctx.ControllerContext) { fwc := firewalls.NewFirewallController(ctx, flags.F.NodePortRanges.Values()) - healthchecks.InitializeL4(ctx.Cloud, ctx) + healthchecks_l4.Initialize(ctx.Cloud, ctx) if flags.F.RunL4Controller { l4Controller := l4lb.NewILBController(ctx, stopCh) diff --git a/pkg/healthchecks/interfaces.go b/pkg/healthchecks/interfaces.go index 0b9956be13..60cb8d9f79 100644 --- a/pkg/healthchecks/interfaces.go +++ b/pkg/healthchecks/interfaces.go @@ -17,15 +17,13 @@ limitations under the License. package healthchecks import ( + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" computealpha "google.golang.org/api/compute/v0.alpha" computebeta "google.golang.org/api/compute/v0.beta" - compute "google.golang.org/api/compute/v1" + "google.golang.org/api/compute/v1" v1 "k8s.io/api/core/v1" "k8s.io/ingress-gce/pkg/translator" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/ingress-gce/pkg/utils/namer" - - "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" ) // HealthCheckProvider is an interface to manage a single GCE health check. @@ -58,19 +56,3 @@ type HealthChecker interface { Delete(name string, scope meta.KeyType) error Get(name string, version meta.Version, scope meta.KeyType) (*translator.HealthCheck, error) } - -// L4HealthChecks defines methods for creating and deleting health checks (and their firewall rules) for l4 services -type L4HealthChecks interface { - // EnsureL4HealthCheck creates health check (and firewall rule) for l4 service - EnsureL4HealthCheck(svc *v1.Service, namer namer.L4ResourcesNamer, sharedHC bool, scope meta.KeyType, l4Type utils.L4LBType, nodeNames []string) *EnsureL4HealthCheckResult - // DeleteHealthCheck deletes health check (and firewall rule) for l4 service - DeleteHealthCheck(svc *v1.Service, namer namer.L4ResourcesNamer, sharedHC bool, scope meta.KeyType, l4Type utils.L4LBType) (string, error) -} - -type EnsureL4HealthCheckResult struct { - HCName string - HCLink string - HCFirewallRuleName string - GceResourceInError string - Err error -} diff --git a/pkg/healthchecks/healthchecks_l4.go b/pkg/healthchecks_l4/healthchecks_l4.go similarity index 86% rename from pkg/healthchecks/healthchecks_l4.go rename to pkg/healthchecks_l4/healthchecks_l4.go index 6d7fc1effc..94c869d468 100644 --- a/pkg/healthchecks/healthchecks_l4.go +++ b/pkg/healthchecks_l4/healthchecks_l4.go @@ -14,14 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package healthchecks +package healthchecks_l4 import ( "fmt" "strconv" "sync" - cloudprovider "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -49,20 +48,21 @@ const ( var ( // instanceLock to prevent duplicate initialization. instanceLock = &sync.Mutex{} - // instance is a singleton instance, created by InitializeL4 + // instance is a singleton instance, created by Initialize instance *l4HealthChecks ) type l4HealthChecks struct { // sharedResourceLock serializes operations on the healthcheck and firewall // resources shared across multiple Services. + hcProvider HealthChecksProvider sharedResourcesLock sync.Mutex cloud *gce.Cloud recorderFactory events.RecorderProducer } -// InitializeL4 creates singleton instance, must be run before L4() func -func InitializeL4(cloud *gce.Cloud, recorderFactory events.RecorderProducer) { +// Initialize creates singleton instance, must be run before GetInstance() func +func Initialize(cloud *gce.Cloud, recorderFactory events.RecorderProducer) { instanceLock.Lock() defer instanceLock.Unlock() @@ -74,21 +74,23 @@ func InitializeL4(cloud *gce.Cloud, recorderFactory events.RecorderProducer) { instance = &l4HealthChecks{ cloud: cloud, recorderFactory: recorderFactory, + hcProvider: NewHealthChecks(cloud, meta.VersionGA), } klog.V(3).Infof("Initialized L4 Healthchecks") } -// FakeL4 creates instance of l4HealthChecks> USe for test only. -func FakeL4(cloud *gce.Cloud, recorderFactory events.RecorderProducer) *l4HealthChecks { +// Fake creates instance of l4HealthChecks. Use for test only. +func Fake(cloud *gce.Cloud, recorderFactory events.RecorderProducer) *l4HealthChecks { instance = &l4HealthChecks{ cloud: cloud, recorderFactory: recorderFactory, + hcProvider: NewHealthChecks(cloud, meta.VersionGA), } return instance } -// L4 returns singleton instance, must be run after InitializeL4 -func L4() *l4HealthChecks { +// GetInstance returns singleton instance, must be run after Initialize +func GetInstance() *l4HealthChecks { return instance } @@ -153,7 +155,7 @@ func (l4hc *l4HealthChecks) DeleteHealthCheck(svc *corev1.Service, namer namer.L defer l4hc.sharedResourcesLock.Unlock() } - err := utils.IgnoreHTTPNotFound(l4hc.deleteHealthCheck(hcName, scope)) + err := l4hc.hcProvider.Delete(hcName, scope) if err != nil { // Ignore deletion error due to health check in use by another resource. if !utils.IsInUsedByError(err) { @@ -168,17 +170,11 @@ func (l4hc *l4HealthChecks) DeleteHealthCheck(svc *corev1.Service, namer namer.L } func (l4hc *l4HealthChecks) ensureL4HealthCheckInternal(hcName string, svcName types.NamespacedName, shared bool, path string, port int32, scope meta.KeyType, l4Type utils.L4LBType) (*composite.HealthCheck, string, error) { - selfLink := "" - key, err := composite.CreateKey(l4hc.cloud, hcName, scope) + hc, err := l4hc.hcProvider.Get(hcName, scope) if err != nil { - return nil, selfLink, fmt.Errorf("Failed to create key for healthcheck with name %s for service %s", hcName, svcName.String()) - } - hc, err := composite.GetHealthCheck(l4hc.cloud, key, meta.VersionGA) - if err != nil { - if !utils.IsNotFoundError(err) { - return nil, selfLink, err - } + return nil, "", err } + var region string if scope == meta.Regional { region = l4hc.cloud.Region() @@ -188,14 +184,17 @@ func (l4hc *l4HealthChecks) ensureL4HealthCheckInternal(hcName string, svcName t if hc == nil { // Create the healthcheck klog.V(2).Infof("Creating healthcheck %s for service %s, shared = %v. Expected healthcheck: %v", hcName, svcName, shared, expectedHC) - err = composite.CreateHealthCheck(l4hc.cloud, key, expectedHC) + err = l4hc.hcProvider.Create(expectedHC) + if err != nil { + return nil, "", err + } + selfLink, err := l4hc.hcProvider.SelfLink(expectedHC.Name, scope) if err != nil { - return nil, selfLink, err + return nil, "", err } - selfLink = cloudprovider.SelfLink(meta.VersionGA, l4hc.cloud.ProjectID(), "healthChecks", key) return expectedHC, selfLink, nil } - selfLink = hc.SelfLink + selfLink := hc.SelfLink if !needToUpdateHealthChecks(hc, expectedHC) { // nothing to do klog.V(3).Infof("Healthcheck %v already exists", hcName) @@ -203,7 +202,7 @@ func (l4hc *l4HealthChecks) ensureL4HealthCheckInternal(hcName string, svcName t } mergeHealthChecks(hc, expectedHC) klog.V(2).Infof("Updating healthcheck %s for service %s, updated healthcheck: %v", hcName, svcName, expectedHC) - err = composite.UpdateHealthCheck(l4hc.cloud, key, expectedHC) + err = l4hc.hcProvider.Update(expectedHC.Name, scope, expectedHC) if err != nil { return nil, selfLink, err } @@ -226,14 +225,6 @@ func (l4hc *l4HealthChecks) ensureFirewall(svc *corev1.Service, hcFwName string, return firewalls.EnsureL4LBFirewallForHc(svc, sharedHC, &hcFWRParams, l4hc.cloud, l4hc.recorderFactory.Recorder(svc.Namespace)) } -func (l4hc *l4HealthChecks) deleteHealthCheck(name string, scope meta.KeyType) error { - key, err := composite.CreateKey(l4hc.cloud, name, scope) - if err != nil { - return fmt.Errorf("Failed to create composite key for healthcheck %s - %w", name, err) - } - return composite.DeleteHealthCheck(l4hc.cloud, key, meta.VersionGA) -} - func (l4hc *l4HealthChecks) deleteHealthCheckFirewall(svc *corev1.Service, hcName, hcFwName string, sharedHC bool, l4Type utils.L4LBType) (string, error) { namespacedName := types.NamespacedName{Name: svc.Name, Namespace: svc.Namespace} @@ -265,12 +256,12 @@ func (l4hc *l4HealthChecks) healthCheckFirewallSafeToDelete(hcName string, share if l4Type == utils.XLB { scopeToCheck = meta.Global } - key, err := composite.CreateKey(l4hc.cloud, hcName, scopeToCheck) + + hc, err := l4hc.hcProvider.Get(hcName, scopeToCheck) if err != nil { - return false, fmt.Errorf("Failed to create composite key for healthcheck %s - %w", hcName, err) + return false, fmt.Errorf("l4hc.hcProvider.Get(%s, %s) returned error %w, want nil", hcName, scopeToCheck, err) } - _, err = composite.GetHealthCheck(l4hc.cloud, key, meta.VersionGA) - return utils.IsNotFoundError(err), nil + return hc == nil, nil } func (l4hc *l4HealthChecks) deleteFirewall(name string, svc *corev1.Service) error { diff --git a/pkg/healthchecks/healthchecks_l4_test.go b/pkg/healthchecks_l4/healthchecks_l4_test.go similarity index 99% rename from pkg/healthchecks/healthchecks_l4_test.go rename to pkg/healthchecks_l4/healthchecks_l4_test.go index f0fccb53e1..d5e3c6a177 100644 --- a/pkg/healthchecks/healthchecks_l4_test.go +++ b/pkg/healthchecks_l4/healthchecks_l4_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package healthchecks +package healthchecks_l4 import ( "testing" @@ -109,7 +109,7 @@ func TestCompareHealthChecks(t *testing.T) { } } -func TestCreateHealthCheck(t *testing.T) { +func TestNewHealthCheck(t *testing.T) { t.Parallel() namespaceName := types.NamespacedName{Name: "svc", Namespace: "default"} diff --git a/pkg/healthchecks_l4/interfaces.go b/pkg/healthchecks_l4/interfaces.go new file mode 100644 index 0000000000..ed0cdf21d1 --- /dev/null +++ b/pkg/healthchecks_l4/interfaces.go @@ -0,0 +1,33 @@ +package healthchecks_l4 + +import ( + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + v1 "k8s.io/api/core/v1" + "k8s.io/ingress-gce/pkg/composite" + "k8s.io/ingress-gce/pkg/utils" + "k8s.io/ingress-gce/pkg/utils/namer" +) + +// L4HealthChecks defines methods for creating and deleting health checks (and their firewall rules) for l4 services +type L4HealthChecks interface { + // EnsureL4HealthCheck creates health check (and firewall rule) for l4 service + EnsureL4HealthCheck(svc *v1.Service, namer namer.L4ResourcesNamer, sharedHC bool, scope meta.KeyType, l4Type utils.L4LBType, nodeNames []string) *EnsureL4HealthCheckResult + // DeleteHealthCheck deletes health check (and firewall rule) for l4 service + DeleteHealthCheck(svc *v1.Service, namer namer.L4ResourcesNamer, sharedHC bool, scope meta.KeyType, l4Type utils.L4LBType) (string, error) +} + +type EnsureL4HealthCheckResult struct { + HCName string + HCLink string + HCFirewallRuleName string + GceResourceInError string + Err error +} + +type HealthChecksProvider interface { + Get(name string, scope meta.KeyType) (*composite.HealthCheck, error) + Create(healthCheck *composite.HealthCheck) error + Update(name string, scope meta.KeyType, updatedHealthCheck *composite.HealthCheck) error + Delete(name string, scope meta.KeyType) error + SelfLink(name string, scope meta.KeyType) (string, error) +} diff --git a/pkg/healthchecks_l4/provider.go b/pkg/healthchecks_l4/provider.go new file mode 100644 index 0000000000..15d0441ebe --- /dev/null +++ b/pkg/healthchecks_l4/provider.go @@ -0,0 +1,86 @@ +package healthchecks_l4 + +import ( + "fmt" + + cloudprovider "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + "k8s.io/ingress-gce/pkg/composite" + "k8s.io/ingress-gce/pkg/utils" + "k8s.io/legacy-cloud-providers/gce" +) + +type HealthChecks struct { + cloud *gce.Cloud + version meta.Version +} + +func NewHealthChecks(cloud *gce.Cloud, version meta.Version) *HealthChecks { + return &HealthChecks{ + cloud: cloud, + version: version, + } +} + +func (hc *HealthChecks) createKey(name string, scope meta.KeyType) (*meta.Key, error) { + return composite.CreateKey(hc.cloud, name, scope) +} + +func (hc *HealthChecks) Get(name string, scope meta.KeyType) (*composite.HealthCheck, error) { + key, err := hc.createKey(name, scope) + if err != nil { + return nil, fmt.Errorf("hc.createKey(%s, %s) returned error %w, want nil", name, scope, err) + } + healthCheck, err := composite.GetHealthCheck(hc.cloud, key, hc.version) + if err != nil { + if utils.IsNotFoundError(err) { + return nil, nil + } + return nil, fmt.Errorf("composite.GetHealthCheck(_, %v, %v) returned error %w, want nil", key, meta.VersionGA, err) + } + return healthCheck, nil +} + +func (hc *HealthChecks) Create(healthCheck *composite.HealthCheck) error { + key, err := hc.createKey(healthCheck.Name, healthCheck.Scope) + if err != nil { + return fmt.Errorf("hc.createKey(%s, %s) returned error: %w, want nil", healthCheck.Name, healthCheck.Scope, err) + } + + err = composite.CreateHealthCheck(hc.cloud, key, healthCheck) + if err != nil { + return fmt.Errorf("composite.CreateHealthCheck(_, %s, %v) returned error %w, want nil", key, healthCheck, err) + } + return nil +} + +func (hc *HealthChecks) Update(name string, scope meta.KeyType, updatedHealthCheck *composite.HealthCheck) error { + key, err := hc.createKey(name, scope) + if err != nil { + return fmt.Errorf("hc.createKey(%s, %s) returned error: %w, want nil", name, scope, err) + } + + err = composite.UpdateHealthCheck(hc.cloud, key, updatedHealthCheck) + if err != nil { + return fmt.Errorf("composite.UpdateHealthCheck(_, %s, %v) returned error %w, want nil", key, updatedHealthCheck, err) + } + return nil +} + +func (hc *HealthChecks) Delete(name string, scope meta.KeyType) error { + key, err := hc.createKey(name, scope) + if err != nil { + return fmt.Errorf("hc.createKey(%s, %s) returned error %w, want nil", name, scope, err) + } + + return utils.IgnoreHTTPNotFound(composite.DeleteHealthCheck(hc.cloud, key, hc.version)) +} + +func (hc *HealthChecks) SelfLink(name string, scope meta.KeyType) (string, error) { + key, err := hc.createKey(name, scope) + if err != nil { + return "", fmt.Errorf("hc.createKey(%s, %s) returned error %w, want nil", name, scope, err) + } + + return cloudprovider.SelfLink(meta.VersionGA, hc.cloud.ProjectID(), "healthChecks", key), nil +} diff --git a/pkg/healthchecks_l4/provider_test.go b/pkg/healthchecks_l4/provider_test.go new file mode 100644 index 0000000000..6263699aa3 --- /dev/null +++ b/pkg/healthchecks_l4/provider_test.go @@ -0,0 +1,256 @@ +package healthchecks_l4 + +import ( + "testing" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "k8s.io/ingress-gce/pkg/composite" + "k8s.io/ingress-gce/pkg/utils" + "k8s.io/legacy-cloud-providers/gce" +) + +func TestCreateHealthCheck(t *testing.T) { + testCases := []struct { + healthCheck *composite.HealthCheck + desc string + }{ + { + healthCheck: &composite.HealthCheck{ + Name: "regional-hc", + Scope: meta.Regional, + }, + desc: "Test creating regional health check", + }, + { + healthCheck: &composite.HealthCheck{ + Name: "global-hc", + Scope: meta.Global, + }, + desc: "Test creating glbal health check", + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) + hc := NewHealthChecks(fakeGCE, meta.VersionGA) + + err := hc.Create(tc.healthCheck) + if err != nil { + t.Fatalf("hc.Create(%v), returned error %v, want nil", tc.healthCheck, err) + } + + verifyHealthCheckExists(t, fakeGCE, tc.healthCheck.Name, tc.healthCheck.Scope) + }) + } +} + +func TestGetHealthCheck(t *testing.T) { + regionalHealthCheck := &composite.HealthCheck{ + Name: "regional-hc", + Version: meta.VersionGA, + Scope: meta.Regional, + } + globalHealthCheck := &composite.HealthCheck{ + Name: "global-hc", + Version: meta.VersionGA, + Scope: meta.Global, + } + + testCases := []struct { + existingHealthChecks []*composite.HealthCheck + getHCName string + getHCScope meta.KeyType + expectedHealthCheck *composite.HealthCheck + desc string + }{ + { + existingHealthChecks: []*composite.HealthCheck{regionalHealthCheck, globalHealthCheck}, + getHCName: regionalHealthCheck.Name, + getHCScope: regionalHealthCheck.Scope, + expectedHealthCheck: regionalHealthCheck, + desc: "Test getting regional health check", + }, + { + existingHealthChecks: []*composite.HealthCheck{regionalHealthCheck, globalHealthCheck}, + getHCName: globalHealthCheck.Name, + getHCScope: globalHealthCheck.Scope, + expectedHealthCheck: globalHealthCheck, + desc: "Test getting global health check", + }, + { + existingHealthChecks: []*composite.HealthCheck{regionalHealthCheck, globalHealthCheck}, + getHCName: "non-existent-hc", + getHCScope: meta.Global, + expectedHealthCheck: nil, + desc: "Test getting non existent global health check", + }, + { + existingHealthChecks: []*composite.HealthCheck{regionalHealthCheck, globalHealthCheck}, + getHCName: "non-existent-hc", + getHCScope: meta.Regional, + expectedHealthCheck: nil, + desc: "Test getting non existent regional health check", + }, + { + existingHealthChecks: []*composite.HealthCheck{regionalHealthCheck, globalHealthCheck}, + getHCName: regionalHealthCheck.Name, + getHCScope: meta.Global, + expectedHealthCheck: nil, + desc: "Test getting existent regional health check, but providing global scope", + }, + { + existingHealthChecks: []*composite.HealthCheck{regionalHealthCheck, globalHealthCheck}, + getHCName: globalHealthCheck.Name, + getHCScope: meta.Regional, + expectedHealthCheck: nil, + desc: "Test getting existent global health check, but providing regional scope", + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) + for _, hc := range tc.existingHealthChecks { + mustCreateHealthCheck(t, fakeGCE, hc) + } + hcp := NewHealthChecks(fakeGCE, meta.VersionGA) + + hc, err := hcp.Get(tc.getHCName, tc.getHCScope) + if err != nil { + t.Fatalf("hcp.Get(%v), returned error %v, want nil", tc.getHCName, err) + } + + // Scope field gets removed (but region added), after creating health check + ignoreFields := cmpopts.IgnoreFields(composite.HealthCheck{}, "SelfLink", "Region", "Scope") + if !cmp.Equal(hc, tc.expectedHealthCheck, ignoreFields) { + diff := cmp.Diff(hc, tc.expectedHealthCheck, ignoreFields) + t.Errorf("hcp.Get(s) returned %v, not equal to expectedHealthCheck %v, diff: %v", hc, tc.expectedHealthCheck, diff) + } + }) + } +} + +func TestDeleteHealthCheck(t *testing.T) { + regionalHealthCheck := &composite.HealthCheck{ + Name: "regional-hc", + Version: meta.VersionGA, + Scope: meta.Regional, + } + globalHealthCheck := &composite.HealthCheck{ + Name: "global-hc", + Version: meta.VersionGA, + Scope: meta.Global, + } + + testCases := []struct { + existingHealthChecks []*composite.HealthCheck + deleteHCName string + deleteHCScope meta.KeyType + shouldExistHealthChecks []*composite.HealthCheck + desc string + }{ + { + existingHealthChecks: []*composite.HealthCheck{regionalHealthCheck, globalHealthCheck}, + deleteHCName: regionalHealthCheck.Name, + deleteHCScope: regionalHealthCheck.Scope, + shouldExistHealthChecks: []*composite.HealthCheck{globalHealthCheck}, + desc: "Delete regional health check", + }, + { + existingHealthChecks: []*composite.HealthCheck{regionalHealthCheck, globalHealthCheck}, + deleteHCName: globalHealthCheck.Name, + deleteHCScope: globalHealthCheck.Scope, + shouldExistHealthChecks: []*composite.HealthCheck{regionalHealthCheck}, + desc: "Delete global health check", + }, + { + existingHealthChecks: []*composite.HealthCheck{regionalHealthCheck, globalHealthCheck}, + deleteHCName: "non-existent", + deleteHCScope: meta.Regional, + shouldExistHealthChecks: []*composite.HealthCheck{regionalHealthCheck, globalHealthCheck}, + desc: "Delete non existent healthCheck", + }, + { + existingHealthChecks: []*composite.HealthCheck{regionalHealthCheck, globalHealthCheck}, + deleteHCName: globalHealthCheck.Name, + deleteHCScope: meta.Regional, + shouldExistHealthChecks: []*composite.HealthCheck{regionalHealthCheck, globalHealthCheck}, + desc: "Delete global health check name, but using regional scope", + }, + { + existingHealthChecks: []*composite.HealthCheck{regionalHealthCheck, globalHealthCheck}, + deleteHCName: regionalHealthCheck.Name, + deleteHCScope: meta.Global, + shouldExistHealthChecks: []*composite.HealthCheck{regionalHealthCheck, globalHealthCheck}, + desc: "Delete regional health check name, but using global scope", + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) + for _, hc := range tc.existingHealthChecks { + mustCreateHealthCheck(t, fakeGCE, hc) + } + hc := NewHealthChecks(fakeGCE, meta.VersionGA) + + err := hc.Delete(tc.deleteHCName, tc.deleteHCScope) + if err != nil { + t.Fatalf("hc.Delete(%v), returned error %v, want nil", tc.deleteHCName, err) + } + + verifyHealthCheckNotExists(t, fakeGCE, tc.deleteHCName, tc.deleteHCScope) + for _, hc := range tc.shouldExistHealthChecks { + verifyHealthCheckExists(t, fakeGCE, hc.Name, hc.Scope) + } + }) + } +} + +func verifyHealthCheckExists(t *testing.T, cloud *gce.Cloud, name string, scope meta.KeyType) { + t.Helper() + verifyHealthCheckShouldExist(t, cloud, name, scope, true) +} + +func verifyHealthCheckNotExists(t *testing.T, cloud *gce.Cloud, name string, scope meta.KeyType) { + t.Helper() + verifyHealthCheckShouldExist(t, cloud, name, scope, false) +} + +func verifyHealthCheckShouldExist(t *testing.T, cloud *gce.Cloud, name string, scope meta.KeyType, shouldExist bool) { + t.Helper() + + key, err := composite.CreateKey(cloud, name, scope) + if err != nil { + t.Fatalf("Failed to create key for fetching health check %s, err: %v", name, err) + } + _, err = composite.GetHealthCheck(cloud, key, meta.VersionGA) + if err != nil { + if utils.IsNotFoundError(err) { + if shouldExist { + t.Errorf("Health check %s in scope %s was not found", name, scope) + } + return + } + t.Fatalf("composite.GetHealthCheck(_, %v, %v) returned error %v, want nil", key, meta.VersionGA, err) + } + if !shouldExist { + t.Errorf("Health Check %s in scope %s exists, expected to be not found", name, scope) + } +} + +func mustCreateHealthCheck(t *testing.T, cloud *gce.Cloud, hc *composite.HealthCheck) { + t.Helper() + + key, err := composite.CreateKey(cloud, hc.Name, hc.Scope) + if err != nil { + t.Fatalf("composite.CreateKey(_, %s, %s) returned error %v, want nil", hc.Name, hc.Scope, err) + } + err = composite.CreateHealthCheck(cloud, key, hc) + if err != nil { + t.Fatalf("composite.CreateHealthCheck(_, %s, %v) returned error %v, want nil", key, hc, err) + } +} diff --git a/pkg/l4lb/l4controller_test.go b/pkg/l4lb/l4controller_test.go index d49c989cb2..679a0e63d1 100644 --- a/pkg/l4lb/l4controller_test.go +++ b/pkg/l4lb/l4controller_test.go @@ -19,14 +19,13 @@ package l4lb import ( context2 "context" "fmt" - "k8s.io/ingress-gce/pkg/healthchecks" + "net/http" "testing" "time" + "k8s.io/ingress-gce/pkg/healthchecks_l4" "k8s.io/ingress-gce/pkg/loadbalancers" - "net/http" - "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/mock" @@ -71,7 +70,7 @@ func newServiceController(t *testing.T, fakeGCE *gce.Cloud) *L4Controller { for _, n := range nodes { ctx.NodeInformer.GetIndexer().Add(n) } - healthchecks.FakeL4(ctx.Cloud, ctx) + healthchecks_l4.Fake(ctx.Cloud, ctx) return NewILBController(ctx, stopCh) } diff --git a/pkg/l4lb/l4netlbcontroller_test.go b/pkg/l4lb/l4netlbcontroller_test.go index 31acee4f8f..eaf0a5acee 100644 --- a/pkg/l4lb/l4netlbcontroller_test.go +++ b/pkg/l4lb/l4netlbcontroller_test.go @@ -42,7 +42,7 @@ import ( "k8s.io/ingress-gce/pkg/annotations" "k8s.io/ingress-gce/pkg/composite" ingctx "k8s.io/ingress-gce/pkg/context" - "k8s.io/ingress-gce/pkg/healthchecks" + "k8s.io/ingress-gce/pkg/healthchecks_l4" "k8s.io/ingress-gce/pkg/loadbalancers" "k8s.io/ingress-gce/pkg/test" "k8s.io/ingress-gce/pkg/utils" @@ -240,7 +240,7 @@ func newL4NetLBServiceController() *L4NetLBController { for _, n := range nodes { ctx.NodeInformer.GetIndexer().Add(n) } - healthchecks.FakeL4(ctx.Cloud, ctx) + healthchecks_l4.Fake(ctx.Cloud, ctx) return NewL4NetLBController(ctx, stopCh) } @@ -873,7 +873,7 @@ func TestHealthCheckWhenExternalTrafficPolicyWasUpdated(t *testing.T) { // delete shared health check if is created, update service to Cluster and // check that non-shared health check was created hcNameShared, _ := lc.namer.L4HealthCheck(svc.Namespace, svc.Name, true) - healthchecks.FakeL4(lc.ctx.Cloud, lc.ctx).DeleteHealthCheck(svc, lc.namer, true, meta.Regional, utils.XLB) + healthchecks_l4.Fake(lc.ctx.Cloud, lc.ctx).DeleteHealthCheck(svc, lc.namer, true, meta.Regional, utils.XLB) // Update ExternalTrafficPolicy to Cluster check if shared HC was created err = updateAndAssertExternalTrafficPolicy(newSvc, lc, v1.ServiceExternalTrafficPolicyTypeCluster, hcNameShared) if err != nil { diff --git a/pkg/loadbalancers/l4.go b/pkg/loadbalancers/l4.go index 0938bfc1b2..6dc71121b6 100644 --- a/pkg/loadbalancers/l4.go +++ b/pkg/loadbalancers/l4.go @@ -32,7 +32,7 @@ import ( "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/firewalls" "k8s.io/ingress-gce/pkg/forwardingrules" - "k8s.io/ingress-gce/pkg/healthchecks" + "k8s.io/ingress-gce/pkg/healthchecks_l4" "k8s.io/ingress-gce/pkg/metrics" "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/namer" @@ -52,8 +52,8 @@ type L4 struct { Service *corev1.Service ServicePort utils.ServicePort NamespacedName types.NamespacedName - l4HealthChecks healthchecks.L4HealthChecks forwardingRules ForwardingRulesProvider + l4HealthChecks healthchecks_l4.L4HealthChecks } // L4ILBSyncResult contains information about the outcome of an L4 ILB sync. It stores the list of resource name annotations, @@ -76,7 +76,7 @@ func NewL4Handler(service *corev1.Service, cloud *gce.Cloud, scope meta.KeyType, namer: namer, recorder: recorder, Service: service, - l4HealthChecks: healthchecks.L4(), + l4HealthChecks: healthchecks_l4.GetInstance(), forwardingRules: forwardingrules.New(cloud, meta.VersionGA, scope), } l.NamespacedName = types.NamespacedName{Name: service.Name, Namespace: service.Namespace} diff --git a/pkg/loadbalancers/l4_test.go b/pkg/loadbalancers/l4_test.go index a91fbfb58f..8aa9629ce6 100644 --- a/pkg/loadbalancers/l4_test.go +++ b/pkg/loadbalancers/l4_test.go @@ -23,11 +23,10 @@ import ( "strings" "testing" - "k8s.io/ingress-gce/pkg/healthchecks" - "google.golang.org/api/compute/v1" "k8s.io/ingress-gce/pkg/backends" "k8s.io/ingress-gce/pkg/firewalls" + "k8s.io/ingress-gce/pkg/healthchecks_l4" "k8s.io/ingress-gce/pkg/utils" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" @@ -69,7 +68,7 @@ func TestEnsureInternalBackendServiceUpdates(t *testing.T) { svc := test.NewL4ILBService(false, 8080) namer := namer_util.NewL4Namer(kubeSystemUID, nil) l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) bsName, _ := l.namer.L4Backend(l.Service.Namespace, l.Service.Name) _, err := l.backendPool.EnsureL4BackendService(bsName, "", "TCP", string(svc.Spec.SessionAffinity), string(cloud.SchemeInternal), l.NamespacedName, meta.VersionGA) @@ -120,7 +119,7 @@ func TestEnsureInternalLoadBalancer(t *testing.T) { svc := test.NewL4ILBService(false, 8080) namer := namer_util.NewL4Namer(kubeSystemUID, nil) l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) if _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName); err != nil { t.Errorf("Unexpected error when adding nodes %v", err) @@ -177,7 +176,7 @@ func TestEnsureInternalLoadBalancerTypeChange(t *testing.T) { svc := test.NewL4ILBService(false, 8080) namer := namer_util.NewL4Namer(kubeSystemUID, nil) l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) if _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName); err != nil { t.Errorf("Unexpected error when adding nodes %v", err) @@ -211,7 +210,7 @@ func TestEnsureInternalLoadBalancerWithExistingResources(t *testing.T) { namer := namer_util.NewL4Namer(kubeSystemUID, nil) l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) if _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName); err != nil { t.Errorf("Unexpected error when adding nodes %v", err) @@ -254,7 +253,7 @@ func TestEnsureInternalLoadBalancerClearPreviousResources(t *testing.T) { svc := test.NewL4ILBService(true, 8080) namer := namer_util.NewL4Namer(kubeSystemUID, nil) l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) if err != nil { @@ -374,7 +373,7 @@ func TestUpdateResourceLinks(t *testing.T) { svc := test.NewL4ILBService(true, 8080) namer := namer_util.NewL4Namer(kubeSystemUID, nil) l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) if err != nil { @@ -452,7 +451,7 @@ func TestEnsureInternalLoadBalancerHealthCheckConfigurable(t *testing.T) { svc := test.NewL4ILBService(true, 8080) namer := namer_util.NewL4Namer(kubeSystemUID, nil) l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) if err != nil { @@ -495,7 +494,7 @@ func TestEnsureInternalLoadBalancerDeleted(t *testing.T) { svc := test.NewL4ILBService(false, 8080) namer := namer_util.NewL4Namer(kubeSystemUID, nil) l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) if _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName); err != nil { t.Errorf("Unexpected error when adding nodes %v", err) @@ -527,7 +526,7 @@ func TestEnsureInternalLoadBalancerDeletedTwiceDoesNotError(t *testing.T) { svc := test.NewL4ILBService(false, 8080) namer := namer_util.NewL4Namer(kubeSystemUID, nil) l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) if _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName); err != nil { t.Errorf("Unexpected error when adding nodes %v", err) @@ -644,7 +643,7 @@ func TestHealthCheckFirewallDeletionWithNetLB(t *testing.T) { func ensureService(fakeGCE *gce.Cloud, namer *namer_util.L4Namer, nodeNames []string, zoneName string, port int, t *testing.T) (*v1.Service, *L4, *L4ILBSyncResult) { svc := test.NewL4ILBService(false, 8080) l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) if _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, zoneName); err != nil { return nil, nil, &L4ILBSyncResult{Error: fmt.Errorf("Unexpected error when adding nodes %v", err)} @@ -669,7 +668,7 @@ func TestEnsureInternalLoadBalancerWithSpecialHealthCheck(t *testing.T) { svc := test.NewL4ILBService(false, 8080) namer := namer_util.NewL4Namer(kubeSystemUID, nil) l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) if _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName); err != nil { t.Errorf("Unexpected error when adding nodes %v", err) @@ -776,7 +775,7 @@ func TestEnsureInternalLoadBalancerErrors(t *testing.T) { fakeGCE := getFakeGCECloud(gce.DefaultTestClusterValues()) l := NewL4Handler(params.service, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) //lbName := l.namer.L4Backend(params.service.Namespace, params.service.Name) frName := l.GetFRName() @@ -859,7 +858,7 @@ func TestEnsureInternalLoadBalancerEnableGlobalAccess(t *testing.T) { svc := test.NewL4ILBService(false, 8080) namer := namer_util.NewL4Namer(kubeSystemUID, nil) l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) if _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName); err != nil { t.Errorf("Unexpected error when adding nodes %v", err) @@ -941,7 +940,7 @@ func TestEnsureInternalLoadBalancerCustomSubnet(t *testing.T) { svc := test.NewL4ILBService(false, 8080) namer := namer_util.NewL4Namer(kubeSystemUID, nil) l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) if _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName); err != nil { t.Errorf("Unexpected error when adding nodes %v", err) @@ -1039,7 +1038,7 @@ func TestEnsureInternalFirewallPortRanges(t *testing.T) { svc := test.NewL4ILBService(false, 8080) namer := namer_util.NewL4Namer(kubeSystemUID, nil) l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) fwName, _ := l.namer.L4Backend(l.Service.Namespace, l.Service.Name) tc := struct { @@ -1094,7 +1093,7 @@ func TestEnsureInternalLoadBalancerModifyProtocol(t *testing.T) { svc := test.NewL4ILBService(false, 8080) namer := namer_util.NewL4Namer(kubeSystemUID, nil) l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName) if err != nil { @@ -1186,7 +1185,7 @@ func TestEnsureInternalLoadBalancerAllPorts(t *testing.T) { svc := test.NewL4ILBService(false, 8080) namer := namer_util.NewL4Namer(kubeSystemUID, nil) l := NewL4Handler(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) if _, err := test.CreateAndInsertNodes(l.cloud, nodeNames, vals.ZoneName); err != nil { t.Errorf("Unexpected error when adding nodes %v", err) diff --git a/pkg/loadbalancers/l4netlb.go b/pkg/loadbalancers/l4netlb.go index 8e108b7184..bb77f432d8 100644 --- a/pkg/loadbalancers/l4netlb.go +++ b/pkg/loadbalancers/l4netlb.go @@ -31,7 +31,7 @@ import ( "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/firewalls" "k8s.io/ingress-gce/pkg/forwardingrules" - "k8s.io/ingress-gce/pkg/healthchecks" + "k8s.io/ingress-gce/pkg/healthchecks_l4" "k8s.io/ingress-gce/pkg/metrics" "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/namer" @@ -50,7 +50,7 @@ type L4NetLB struct { Service *corev1.Service ServicePort utils.ServicePort NamespacedName types.NamespacedName - l4HealthChecks healthchecks.L4HealthChecks + l4HealthChecks healthchecks_l4.L4HealthChecks forwardingRules ForwardingRulesProvider } @@ -91,7 +91,7 @@ func NewL4NetLB(service *corev1.Service, cloud *gce.Cloud, scope meta.KeyType, n Service: service, NamespacedName: types.NamespacedName{Name: service.Name, Namespace: service.Namespace}, backendPool: backends.NewPool(cloud, namer), - l4HealthChecks: healthchecks.L4(), + l4HealthChecks: healthchecks_l4.GetInstance(), forwardingRules: forwardingrules.New(cloud, meta.VersionGA, scope), } portId := utils.ServicePortID{Service: l4netlb.NamespacedName} diff --git a/pkg/loadbalancers/l4netlb_test.go b/pkg/loadbalancers/l4netlb_test.go index b91732ae53..cd2ffd58ae 100644 --- a/pkg/loadbalancers/l4netlb_test.go +++ b/pkg/loadbalancers/l4netlb_test.go @@ -21,10 +21,6 @@ import ( "strings" "testing" - "k8s.io/ingress-gce/pkg/firewalls" - "k8s.io/ingress-gce/pkg/flags" - "k8s.io/ingress-gce/pkg/healthchecks" - "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/mock" @@ -34,6 +30,9 @@ import ( servicehelper "k8s.io/cloud-provider/service/helpers" "k8s.io/ingress-gce/pkg/annotations" "k8s.io/ingress-gce/pkg/composite" + "k8s.io/ingress-gce/pkg/firewalls" + "k8s.io/ingress-gce/pkg/flags" + "k8s.io/ingress-gce/pkg/healthchecks_l4" "k8s.io/ingress-gce/pkg/metrics" "k8s.io/ingress-gce/pkg/test" "k8s.io/ingress-gce/pkg/utils" @@ -57,7 +56,7 @@ func TestEnsureL4NetLoadBalancer(t *testing.T) { namer := namer_util.NewL4Namer(kubeSystemUID, namer_util.NewNamer(vals.ClusterName, "cluster-fw")) l4netlb := NewL4NetLB(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l4netlb.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l4netlb.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) if _, err := test.CreateAndInsertNodes(l4netlb.cloud, nodeNames, vals.ZoneName); err != nil { t.Errorf("Unexpected error when adding nodes %v", err) @@ -108,7 +107,7 @@ func TestDeleteL4NetLoadBalancer(t *testing.T) { namer := namer_util.NewL4Namer(kubeSystemUID, namer_util.NewNamer(vals.ClusterName, "cluster-fw")) l4NetLB := NewL4NetLB(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l4NetLB.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l4NetLB.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) if _, err := test.CreateAndInsertNodes(l4NetLB.cloud, nodeNames, vals.ZoneName); err != nil { t.Errorf("Unexpected error when adding nodes %v", err) @@ -210,7 +209,7 @@ func ensureLoadBalancer(port int, vals gce.TestClusterValues, fakeGCE *gce.Cloud emptyNodes := []string{} l4NetLB := NewL4NetLB(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l4NetLB.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l4NetLB.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) result := l4NetLB.EnsureFrontend(emptyNodes, svc) if result.Error != nil { @@ -353,7 +352,7 @@ func TestMetricsForStandardNetworkTier(t *testing.T) { namer := namer_util.NewL4Namer(kubeSystemUID, namer_util.NewNamer(vals.ClusterName, "cluster-fw")) l4netlb := NewL4NetLB(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l4netlb.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l4netlb.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) if _, err := test.CreateAndInsertNodes(l4netlb.cloud, nodeNames, vals.ZoneName); err != nil { t.Errorf("Unexpected error when adding nodes %v", err) @@ -400,7 +399,7 @@ func TestEnsureNetLBFirewallDestinations(t *testing.T) { svc := test.NewL4NetLBRBSService(8080) namer := namer_util.NewL4Namer(kubeSystemUID, nil) l4netlb := NewL4NetLB(svc, fakeGCE, meta.Regional, namer, record.NewFakeRecorder(100)) - l4netlb.l4HealthChecks = healthchecks.FakeL4(fakeGCE, &test.FakeRecorderSource{}) + l4netlb.l4HealthChecks = healthchecks_l4.Fake(fakeGCE, &test.FakeRecorderSource{}) if _, err := test.CreateAndInsertNodes(l4netlb.cloud, nodeNames, vals.ZoneName); err != nil { t.Errorf("Unexpected error when adding nodes %v", err)