Skip to content

Commit

Permalink
unit tests for L4 ILB
Browse files Browse the repository at this point in the history
Most of the tests in l4_test.go are from gce_loadbalancer_internal_test.go
  • Loading branch information
prameshj committed Feb 6, 2020
1 parent 0c9afbd commit 734e3d5
Show file tree
Hide file tree
Showing 7 changed files with 1,599 additions and 2 deletions.
10 changes: 8 additions & 2 deletions pkg/backends/syncer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,12 @@ func (p *portset) check(fakeGCE *gce.Cloud) error {
return fmt.Errorf("backend for port %+v should exist, but got: %v", sp.NodePort, err)
}
} else {
if bs, err := composite.GetBackendService(fakeGCE, key, features.VersionFromServicePort(&sp)); !utils.IsHTTPErrorCode(err, http.StatusNotFound) {
bs, err := composite.GetBackendService(fakeGCE, key, features.VersionFromServicePort(&sp))
if err == nil || !utils.IsHTTPErrorCode(err, http.StatusNotFound) {
if sp.PrimaryIPNEGEnabled {
// It is expected that these Backends should not get cleaned up in the GC loop.
continue
}
return fmt.Errorf("backend for port %+v should not exist, but got %v", sp, bs)
}
}
Expand Down Expand Up @@ -333,7 +338,7 @@ func TestGC(t *testing.T) {
}
}

// Test GC with both ELB and ILBs
// Test GC with both ELB and ILBs. Add in an L4 ILB NEG which should not be deleted as part of GC.
func TestGCMixed(t *testing.T) {
fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues())
syncer := newTestSyncer(fakeGCE)
Expand All @@ -345,6 +350,7 @@ func TestGCMixed(t *testing.T) {
{NodePort: 84, Protocol: annotations.ProtocolHTTP, NEGEnabled: true, L7ILBEnabled: true, BackendNamer: defaultNamer},
{NodePort: 85, Protocol: annotations.ProtocolHTTPS, NEGEnabled: true, L7ILBEnabled: true, BackendNamer: defaultNamer},
{NodePort: 86, Protocol: annotations.ProtocolHTTP, NEGEnabled: true, L7ILBEnabled: true, BackendNamer: defaultNamer},
{ID: utils.ServicePortID{Service: types.NamespacedName{Name: "testsvc"}}, PrimaryIPNEGEnabled: true, BackendNamer: defaultNamer},
}
ps := newPortset(svcNodePorts)
if err := ps.add(svcNodePorts); err != nil {
Expand Down
105 changes: 105 additions & 0 deletions pkg/healthchecks/healthchecks_l4_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package healthchecks

import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/ingress-gce/pkg/composite"
"testing"
)

func TestMergeHealthChecks(t *testing.T) {
t.Parallel()
for _, tc := range []struct {
desc string
checkIntervalSec int64
timeoutSec int64
healthyThreshold int64
unhealthyThreshold int64
wantCheckIntervalSec int64
wantTimeoutSec int64
wantHealthyThreshold int64
wantUnhealthyThreshold int64
}{
{"unchanged", gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold},
{"interval - too small - should reconcile", gceHcCheckIntervalSeconds - 1, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold},
{"timeout - too small - should reconcile", gceHcCheckIntervalSeconds, gceHcTimeoutSeconds - 1, gceHcHealthyThreshold, gceHcUnhealthyThreshold, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold},
{"healthy threshold - too small - should reconcile", gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold - 1, gceHcUnhealthyThreshold, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold},
{"unhealthy threshold - too small - should reconcile", gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold - 1, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold},
{"interval - user configured - should keep", gceHcCheckIntervalSeconds + 1, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold, gceHcCheckIntervalSeconds + 1, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold},
{"timeout - user configured - should keep", gceHcCheckIntervalSeconds, gceHcTimeoutSeconds + 1, gceHcHealthyThreshold, gceHcUnhealthyThreshold, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds + 1, gceHcHealthyThreshold, gceHcUnhealthyThreshold},
{"healthy threshold - user configured - should keep", gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold + 1, gceHcUnhealthyThreshold, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold + 1, gceHcUnhealthyThreshold},
{"unhealthy threshold - user configured - should keep", gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold + 1, gceHcCheckIntervalSeconds, gceHcTimeoutSeconds, gceHcHealthyThreshold, gceHcUnhealthyThreshold + 1},
} {
t.Run(tc.desc, func(t *testing.T) {
wantHC := NewL4HealthCheck("hc", types.NamespacedName{Name: "svc", Namespace: "default"}, false, "/", 12345)
hc := &composite.HealthCheck{
CheckIntervalSec: tc.checkIntervalSec,
TimeoutSec: tc.timeoutSec,
HealthyThreshold: tc.healthyThreshold,
UnhealthyThreshold: tc.unhealthyThreshold,
}
mergeHealthChecks(hc, wantHC)
if wantHC.CheckIntervalSec != tc.wantCheckIntervalSec {
t.Errorf("wantHC.CheckIntervalSec = %d; want %d", wantHC.CheckIntervalSec, tc.checkIntervalSec)
}
if wantHC.TimeoutSec != tc.wantTimeoutSec {
t.Errorf("wantHC.TimeoutSec = %d; want %d", wantHC.TimeoutSec, tc.timeoutSec)
}
if wantHC.HealthyThreshold != tc.wantHealthyThreshold {
t.Errorf("wantHC.HealthyThreshold = %d; want %d", wantHC.HealthyThreshold, tc.healthyThreshold)
}
if wantHC.UnhealthyThreshold != tc.wantUnhealthyThreshold {
t.Errorf("wantHC.UnhealthyThreshold = %d; want %d", wantHC.UnhealthyThreshold, tc.unhealthyThreshold)
}
})
}
}

func TestCompareHealthChecks(t *testing.T) {
t.Parallel()
for _, tc := range []struct {
desc string
modifier func(*composite.HealthCheck)
wantChanged bool
}{
{"unchanged", nil, false},
{"nil HttpHealthCheck", func(hc *composite.HealthCheck) { hc.HttpHealthCheck = nil }, true},
{"desc does not match", func(hc *composite.HealthCheck) { hc.Description = "bad-desc" }, true},
{"port does not match", func(hc *composite.HealthCheck) { hc.HttpHealthCheck.Port = 54321 }, true},
{"requestPath does not match", func(hc *composite.HealthCheck) { hc.HttpHealthCheck.RequestPath = "/anotherone" }, true},
{"interval needs update", func(hc *composite.HealthCheck) { hc.CheckIntervalSec = gceHcCheckIntervalSeconds - 1 }, true},
{"timeout needs update", func(hc *composite.HealthCheck) { hc.TimeoutSec = gceHcTimeoutSeconds - 1 }, true},
{"healthy threshold needs update", func(hc *composite.HealthCheck) { hc.HealthyThreshold = gceHcHealthyThreshold - 1 }, true},
{"unhealthy threshold needs update", func(hc *composite.HealthCheck) { hc.UnhealthyThreshold = gceHcUnhealthyThreshold - 1 }, true},
{"interval does not need update", func(hc *composite.HealthCheck) { hc.CheckIntervalSec = gceHcCheckIntervalSeconds + 1 }, false},
{"timeout does not need update", func(hc *composite.HealthCheck) { hc.TimeoutSec = gceHcTimeoutSeconds + 1 }, false},
{"healthy threshold does not need update", func(hc *composite.HealthCheck) { hc.HealthyThreshold = gceHcHealthyThreshold + 1 }, false},
{"unhealthy threshold does not need update", func(hc *composite.HealthCheck) { hc.UnhealthyThreshold = gceHcUnhealthyThreshold + 1 }, false},
} {
t.Run(tc.desc, func(t *testing.T) {
hc := NewL4HealthCheck("hc", types.NamespacedName{Name: "svc", Namespace: "default"}, false, "/", 12345)
wantHC := NewL4HealthCheck("hc", types.NamespacedName{Name: "svc", Namespace: "default"}, false, "/", 12345)
if tc.modifier != nil {
tc.modifier(hc)
}
if gotChanged := needToUpdateHealthChecks(hc, wantHC); gotChanged != tc.wantChanged {
t.Errorf("needToUpdateHealthChecks(%#v, %#v) = %t; want changed = %t", hc, wantHC, gotChanged, tc.wantChanged)
}
})
}
}
229 changes: 229 additions & 0 deletions pkg/l4controller/l4controller_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,229 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package l4controller

import (
"k8s.io/client-go/kubernetes"
testing2 "k8s.io/client-go/testing"
"k8s.io/ingress-gce/pkg/loadbalancers"
"k8s.io/ingress-gce/pkg/neg/types"
"reflect"
"testing"
"time"

"github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud"
"github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta"
api_v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/ingress-gce/pkg/composite"
"k8s.io/ingress-gce/pkg/context"
"k8s.io/ingress-gce/pkg/test"
"k8s.io/ingress-gce/pkg/utils/common"
"k8s.io/ingress-gce/pkg/utils/namer"
"k8s.io/legacy-cloud-providers/gce"
)

const (
clusterUID = "aaaaa"
resetLBStatus = "{\"status\":{\"loadBalancer\":{\"ingress\":null}}}"
)

func newServiceController() *L4Controller {
kubeClient := fake.NewSimpleClientset()
fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues())
(fakeGCE.Compute().(*cloud.MockGCE)).MockForwardingRules.InsertHook = loadbalancers.InsertForwardingRuleHook

namer := namer.NewNamer(clusterUID, "")

stopCh := make(chan struct{})
ctxConfig := context.ControllerContextConfig{
Namespace: api_v1.NamespaceAll,
ResyncPeriod: 1 * time.Minute,
}
ctx := context.NewControllerContext(nil, kubeClient, nil, nil, fakeGCE, namer, "" /*kubeSystemUID*/, ctxConfig)
return NewL4Controller(ctx, stopCh)
}

func addILBService(l4c *L4Controller, svc *api_v1.Service) {
l4c.ctx.KubeClient.CoreV1().Services(svc.Namespace).Create(svc)
l4c.ctx.ServiceInformer.GetIndexer().Add(svc)
}

func updateILBService(l4c *L4Controller, svc *api_v1.Service) {
l4c.ctx.KubeClient.CoreV1().Services(svc.Namespace).Update(svc)
l4c.ctx.ServiceInformer.GetIndexer().Update(svc)
}

func deleteILBService(l4c *L4Controller, svc *api_v1.Service) {
l4c.ctx.KubeClient.CoreV1().Services(svc.Namespace).Delete(svc.Name, &v1.DeleteOptions{})
l4c.ctx.ServiceInformer.GetIndexer().Delete(svc)
}

func addNEG(l4c *L4Controller, svc *api_v1.Service) {
// Also create a fake NEG for this service since the sync code will try to link the backend service to NEG
negName := l4c.ctx.ClusterNamer.PrimaryIPNEG(svc.Namespace, svc.Name)
neg := &composite.NetworkEndpointGroup{Name: negName}
key := meta.ZonalKey(negName, types.TestZone1)
composite.CreateNetworkEndpointGroup(l4c.ctx.Cloud, key, neg)
}

func getKeyForSvc(svc *api_v1.Service, t *testing.T) string {
key, err := common.KeyFunc(svc)
if err != nil {
t.Fatalf("Failed to get key for service %v, err : %v", svc, err)
}
return key
}

// validatePatchRequest validates that the given client patched the resource with the given change.
// This is needed because there is a bug in go-client test implementation where a patch operation cannot be used
// to delete fields - https://github.com/kubernetes/client-go/issues/607
// TODO remove this once https://github.com/kubernetes/client-go/issues/607 has been fixed.
func validatePatchRequest(client kubernetes.Interface, patchVal string, t *testing.T) {
fakeClient := client.(*fake.Clientset)
actionLen := len(fakeClient.Actions())
if actionLen == 0 {
t.Errorf("Expected atleast one action in fake client")
}
// The latest action should be the one setting status to the given value
patchAction := fakeClient.Actions()[actionLen-1].(testing2.PatchAction)
if !reflect.DeepEqual(patchAction.GetPatch(), []byte(patchVal)) {
t.Errorf("Expected patch '%s', got '%s'", patchVal, string(patchAction.GetPatch()))
}
}

func validateSvcStatus(svc *api_v1.Service, expectStatus bool, t *testing.T) {
if common.HasGivenFinalizer(svc.ObjectMeta, common.ILBFinalizerV2) != expectStatus {
t.Fatalf("Expected L4 finalizer present to be %v, but it was %v", expectStatus, !expectStatus)
}
if len(svc.Status.LoadBalancer.Ingress) == 0 || svc.Status.LoadBalancer.Ingress[0].IP == "" {
if expectStatus {
t.Fatalf("Invalid LoadBalancer status field in service - %+v", svc.Status.LoadBalancer)
}
}
if len(svc.Status.LoadBalancer.Ingress) > 0 && !expectStatus {
// TODO uncomment below once https://github.com/kubernetes/client-go/issues/607 has been fixed.
// t.Fatalf("Expected LoadBalancer status to be empty, Got %v", svc.Status.LoadBalancer)
}
}

// TestProcessCreateOrUpdate verifies the processing loop in L4Controller.
// This test adds a new service, then performs a valid update and then modifies the service type to External and ensures
// that the status field is as expected in each case.
func TestProcessCreateOrUpdate(t *testing.T) {
l4c := newServiceController()
newSvc := test.NewL4ILBService(false, 8080)
addILBService(l4c, newSvc)
addNEG(l4c, newSvc)
err := l4c.sync(getKeyForSvc(newSvc, t))
if err != nil {
t.Errorf("Failed to sync newly added service %s, err %v", newSvc.Name, err)
}
// List the service and ensure that it contains the finalizer as well as Status field.
newSvc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{})
if err != nil {
t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err)
}
validateSvcStatus(newSvc, true, t)

// set the TrafficPolicy of the service to Local
newSvc.Spec.ExternalTrafficPolicy = api_v1.ServiceExternalTrafficPolicyTypeLocal
updateILBService(l4c, newSvc)
err = l4c.sync(getKeyForSvc(newSvc, t))
if err != nil {
t.Errorf("Failed to sync updated service %s, err %v", newSvc.Name, err)
}
// List the service and ensure that it contains the finalizer as well as Status field.
newSvc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{})
if err != nil {
t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err)
}
validateSvcStatus(newSvc, true, t)

// Remove the Internal LoadBalancer annotation, this should trigger a cleanup.
delete(newSvc.Annotations, gce.ServiceAnnotationLoadBalancerType)
updateILBService(l4c, newSvc)
err = l4c.sync(getKeyForSvc(newSvc, t))
if err != nil {
t.Errorf("Failed to sync updated service %s, err %v", newSvc.Name, err)
}
// TODO remove this once https://github.com/kubernetes/client-go/issues/607 has been fixed.
validatePatchRequest(l4c.client, resetLBStatus, t)
// List the service and ensure that it contains the finalizer as well as Status field.
newSvc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{})
if err != nil {
t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err)
}
validateSvcStatus(newSvc, false, t)
}

func TestProcessDeletion(t *testing.T) {
l4c := newServiceController()
newSvc := test.NewL4ILBService(false, 8080)
addILBService(l4c, newSvc)
addNEG(l4c, newSvc)
err := l4c.sync(getKeyForSvc(newSvc, t))
if err != nil {
t.Errorf("Failed to sync newly added service %s, err %v", newSvc.Name, err)
}
// List the service and ensure that it contains the finalizer as well as Status field.
newSvc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{})
if err != nil {
t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err)
}
validateSvcStatus(newSvc, true, t)

// Mark the service for deletion by updating timestamp. Use svc instead of newSvc since that has the finalizer.
newSvc.DeletionTimestamp = &v1.Time{}
updateILBService(l4c, newSvc)
err = l4c.sync(getKeyForSvc(newSvc, t))
if err != nil {
t.Errorf("Failed to sync updated service %s, err %v", newSvc.Name, err)
}
// TODO remove this once https://github.com/kubernetes/client-go/issues/607 has been fixed.
validatePatchRequest(l4c.client, resetLBStatus, t)
// List the service and ensure that it contains the finalizer as well as Status field.
newSvc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{})
if err != nil {
t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err)
}
validateSvcStatus(newSvc, false, t)
deleteILBService(l4c, newSvc)
newSvc, err = l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{})
if newSvc != nil {
t.Errorf("Expected service to be deleted, but was found - %v", newSvc)
}
}

func TestProcessCreateLegacyService(t *testing.T) {
l4c := newServiceController()
newSvc := test.NewL4ILBService(false, 8080)
// Set the legacy finalizer
newSvc.Finalizers = append(newSvc.Finalizers, common.LegacyILBFinalizer)
addILBService(l4c, newSvc)
err := l4c.sync(getKeyForSvc(newSvc, t))
if err != nil {
t.Errorf("Failed to sync newly added service %s, err %v", newSvc.Name, err)
}
// List the service and ensure that the status field is not updated.
svc, err := l4c.client.CoreV1().Services(newSvc.Namespace).Get(newSvc.Name, v1.GetOptions{})
if err != nil {
t.Errorf("Failed to lookup service %s, err: %v", newSvc.Name, err)
}
validateSvcStatus(svc, false, t)
}
7 changes: 7 additions & 0 deletions pkg/loadbalancers/fakes.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,3 +43,10 @@ func InsertGlobalForwardingRuleHook(ctx context.Context, key *meta.Key, obj *com
}
return false, nil
}

func InsertForwardingRuleHook(ctx context.Context, key *meta.Key, obj *compute.ForwardingRule, m *cloud.MockForwardingRules) (b bool, e error) {
if obj.IPAddress == "" {
obj.IPAddress = "10.0.0.1"
}
return false, nil
}
Loading

0 comments on commit 734e3d5

Please sign in to comment.