diff --git a/cmd/glbc/main.go b/cmd/glbc/main.go index 1d23dc0548..870b092a13 100644 --- a/cmd/glbc/main.go +++ b/cmd/glbc/main.go @@ -24,6 +24,8 @@ import ( "time" flag "github.com/spf13/pflag" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/ingress-gce/pkg/frontendconfig" "k8s.io/klog" @@ -123,6 +125,16 @@ func main() { klog.V(0).Infof("Cluster name: %+v", namer.UID()) } + var kubeSystemUID types.UID + if flags.F.EnableV2FrontendNamer { + // Get kube-system UID that will be used for v2 frontend naming scheme. + kubeSystemNS, err := kubeClient.CoreV1().Namespaces().Get("kube-system", metav1.GetOptions{}) + if err != nil { + klog.Fatalf("Error getting kube-system namespace: %v", err) + } + kubeSystemUID = kubeSystemNS.GetUID() + } + cloud := app.NewGCEClient() defaultBackendServicePort := app.DefaultBackendServicePort(kubeClient) ctxConfig := ingctx.ControllerContextConfig{ @@ -136,7 +148,7 @@ func main() { ASMConfigMapNamespace: flags.F.ASMConfigMapBasedConfigNamespace, ASMConfigMapName: flags.F.ASMConfigMapBasedConfigCMName, } - ctx := ingctx.NewControllerContext(kubeConfig, kubeClient, backendConfigClient, frontendConfigClient, cloud, namer, ctxConfig) + ctx := ingctx.NewControllerContext(kubeConfig, kubeClient, backendConfigClient, frontendConfigClient, cloud, namer, kubeSystemUID, ctxConfig) go app.RunHTTPServer(ctx.HealthCheck) if !flags.F.LeaderElection.LeaderElect { diff --git a/pkg/context/context.go b/pkg/context/context.go index 0e5802d866..8122341cf5 100644 --- a/pkg/context/context.go +++ b/pkg/context/context.go @@ -20,6 +20,7 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic/dynamicinformer" informerv1 "k8s.io/client-go/informers/core/v1" @@ -55,7 +56,8 @@ type ControllerContext struct { Cloud *gce.Cloud - ClusterNamer *namer.Namer + ClusterNamer *namer.Namer + KubeSystemUID types.UID ControllerContextConfig ASMConfigController *cmconfig.ConfigMapConfigController @@ -100,6 +102,7 @@ func NewControllerContext( frontendConfigClient frontendconfigclient.Interface, cloud *gce.Cloud, namer *namer.Namer, + kubeSystemUID types.UID, config ControllerContextConfig) *ControllerContext { context := &ControllerContext{ @@ -107,6 +110,7 @@ func NewControllerContext( KubeClient: kubeClient, Cloud: cloud, ClusterNamer: namer, + KubeSystemUID: kubeSystemUID, ControllerContextConfig: config, IngressInformer: informerv1beta1.NewIngressInformer(kubeClient, config.Namespace, config.ResyncPeriod, utils.NewNamespaceIndexer()), ServiceInformer: informerv1.NewServiceInformer(kubeClient, config.Namespace, config.ResyncPeriod, utils.NewNamespaceIndexer()), diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index b71a2fe1bc..9aff0cbeaa 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -115,7 +115,7 @@ func NewLoadBalancerController( hasSynced: ctx.HasSynced, nodes: NewNodeController(ctx, instancePool), instancePool: instancePool, - l7Pool: loadbalancers.NewLoadBalancerPool(ctx.Cloud, ctx.ClusterNamer, ctx, namer.NewFrontendNamerFactory(ctx.ClusterNamer)), + l7Pool: loadbalancers.NewLoadBalancerPool(ctx.Cloud, ctx.ClusterNamer, ctx, namer.NewFrontendNamerFactory(ctx.ClusterNamer, ctx.KubeSystemUID)), backendSyncer: backends.NewBackendSyncer(backendPool, healthChecker, ctx.Cloud), negLinker: backends.NewNEGLinker(backendPool, negtypes.NewAdapter(ctx.Cloud), ctx.Cloud), igLinker: backends.NewInstanceGroupLinker(instancePool, backendPool), @@ -330,9 +330,10 @@ func (lbc *LoadBalancerController) Stop(deleteAll bool) error { // TODO(rramkumar): Do we need deleteAll? Can we get rid of its' flag? if deleteAll { klog.Infof("Shutting down cluster manager.") - if err := lbc.l7Pool.Shutdown(); err != nil { + if err := lbc.l7Pool.Shutdown(lbc.ctx.Ingresses().List()); err != nil { return err } + // The backend pool will also delete instance groups. return lbc.backendSyncer.Shutdown() } @@ -453,29 +454,46 @@ func (lbc *LoadBalancerController) SyncLoadBalancer(state interface{}) error { return nil } -// GCLoadBalancers implements Controller. -func (lbc *LoadBalancerController) GCLoadBalancers(toKeep []*v1beta1.Ingress) error { - // Only GCE ingress associated resources are managed by this controller. - GCEIngresses := operator.Ingresses(toKeep).Filter(utils.IsGCEIngress).AsList() - return lbc.l7Pool.GC(common.ToIngressKeys(GCEIngresses)) +// GCv1LoadBalancers implements Controller. +func (lbc *LoadBalancerController) GCv1LoadBalancers(toKeep []*v1beta1.Ingress) error { + return lbc.l7Pool.GCv1(common.ToIngressKeys(toKeep)) } -// MaybeRemoveFinalizers cleans up Finalizers if needed. -func (lbc *LoadBalancerController) MaybeRemoveFinalizers(toCleanup []*v1beta1.Ingress) error { +// GCv2LoadBalancer implements Controller. +func (lbc *LoadBalancerController) GCv2LoadBalancer(ing *v1beta1.Ingress) error { + return lbc.l7Pool.GCv2(ing) +} + +// EnsureDeleteV1Finalizers implements Controller. +func (lbc *LoadBalancerController) EnsureDeleteV1Finalizers(toCleanup []*v1beta1.Ingress) error { if !flags.F.FinalizerRemove { klog.V(4).Infof("Removing finalizers not enabled") return nil } for _, ing := range toCleanup { ingClient := lbc.ctx.KubeClient.NetworkingV1beta1().Ingresses(ing.Namespace) - if err := common.RemoveFinalizer(ing, ingClient); err != nil { - klog.Errorf("Failed to remove Finalizer from Ingress %s/%s: %v", ing.Namespace, ing.Name, err) + if err := common.EnsureDeleteFinalizer(ing, ingClient, common.FinalizerKey); err != nil { + klog.Errorf("Failed to ensure delete finalizer %s for ingress %s: %v", common.FinalizerKey, common.NamespacedName(ing), err) return err } } return nil } +// EnsureDeleteV2Finalizer implements Controller. +func (lbc *LoadBalancerController) EnsureDeleteV2Finalizer(ing *v1beta1.Ingress) error { + if !flags.F.FinalizerRemove { + klog.V(4).Infof("Removing finalizers not enabled") + return nil + } + ingClient := lbc.ctx.KubeClient.NetworkingV1beta1().Ingresses(ing.Namespace) + if err := common.EnsureDeleteFinalizer(ing, ingClient, common.FinalizerKeyV2); err != nil { + klog.Errorf("Failed to ensure delete finalizer %s for ingress %s: %v", common.FinalizerKeyV2, common.NamespacedName(ing), err) + return err + } + return nil +} + // PostProcess implements Controller. func (lbc *LoadBalancerController) PostProcess(state interface{}) error { // We expect state to be a syncState @@ -501,20 +519,24 @@ func (lbc *LoadBalancerController) sync(key string) error { return fmt.Errorf("error getting Ingress for key %s: %v", key, err) } - // Snapshot of list of ingresses. + // Capture GC state for ingress. allIngresses := lbc.ctx.Ingresses().List() + // Determine if the ingress needs to be GCed. if !ingExists || utils.NeedsCleanup(ing) { + frontendGCAlgorithm := frontendGCAlgorithm(ingExists, ing) + klog.V(3).Infof("Using algorithm %v to GC ingress %v", frontendGCAlgorithm, ing) // GC will find GCE resources that were used for this ingress and delete them. - return lbc.ingSyncer.GC(allIngresses) + err := lbc.ingSyncer.GC(allIngresses, ing, frontendGCAlgorithm) + if err != nil { + lbc.ctx.Recorder(ing.Namespace).Eventf(ing, apiv1.EventTypeWarning, "GC", fmt.Sprintf("Error during GC: %v", err)) + } + return err } - // Get ingress and DeepCopy for assurance that we don't pollute other goroutines with changes. - ing = ing.DeepCopy() - ingClient := lbc.ctx.KubeClient.NetworkingV1beta1().Ingresses(ing.Namespace) + // Ensure that a finalizer is attached. if flags.F.FinalizerAdd { - if err := common.AddFinalizer(ing, ingClient); err != nil { - klog.Errorf("Failed to add Finalizer to Ingress %q: %v", key, err) + if err = lbc.ensureFinalizer(ing); err != nil { return err } } @@ -538,7 +560,10 @@ func (lbc *LoadBalancerController) sync(key string) error { // Garbage collection will occur regardless of an error occurring. If an error occurred, // it could have been caused by quota issues; therefore, garbage collecting now may // free up enough quota for the next sync to pass. - if gcErr := lbc.ingSyncer.GC(allIngresses); gcErr != nil { + frontendGCAlgorithm := frontendGCAlgorithm(ingExists, ing) + klog.V(3).Infof("Using algorithm %v to GC ingress %v", frontendGCAlgorithm, ing) + if gcErr := lbc.ingSyncer.GC(allIngresses, ing, frontendGCAlgorithm); gcErr != nil { + lbc.ctx.Recorder(ing.Namespace).Eventf(ing, apiv1.EventTypeWarning, "GC", fmt.Sprintf("Error during GC: %v", gcErr)) return fmt.Errorf("error during sync %v, error during GC %v", syncErr, gcErr) } @@ -649,3 +674,91 @@ func (lbc *LoadBalancerController) ToSvcPorts(ings []*v1beta1.Ingress) []utils.S } return knownPorts } + +// defaultFrontendNamingScheme returns frontend naming scheme for an ingress without finalizer. +// This is used for adding an appropriate finalizer on the ingress. +func (lbc *LoadBalancerController) defaultFrontendNamingScheme(ing *v1beta1.Ingress) (namer.Scheme, error) { + // Ingress frontend naming scheme is determined based on the following logic, + // V2 frontend namer is disabled : v1 frontend naming scheme + // V2 frontend namer is enabled + // - VIP does not exists : v2 frontend naming scheme + // - VIP exists + // - GCE URL Map exists : v1 frontend naming scheme + // - GCE URL Map does not exists : v2 frontend naming scheme + if !flags.F.EnableV2FrontendNamer { + return namer.V1NamingScheme, nil + } + if !utils.HasVIP(ing) { + return namer.V2NamingScheme, nil + } + urlMapExists, err := lbc.l7Pool.HasUrlMap(ing) + if err != nil { + return "", err + } + if urlMapExists { + return namer.V1NamingScheme, nil + } + return namer.V2NamingScheme, nil +} + +// ensureFinalizer ensures that a finalizer is attached. +func (lbc *LoadBalancerController) ensureFinalizer(ing *v1beta1.Ingress) error { + ingKey := common.NamespacedName(ing) + if common.HasFinalizer(ing.ObjectMeta) { + klog.V(4).Infof("Finalizer exists for ingress %s", ingKey) + return nil + } + // Get ingress and DeepCopy for assurance that we don't pollute other goroutines with changes. + ing = ing.DeepCopy() + ingClient := lbc.ctx.KubeClient.NetworkingV1beta1().Ingresses(ing.Namespace) + namingScheme, err := lbc.defaultFrontendNamingScheme(ing) + if err != nil { + return err + } + finalizerKey, err := namer.FinalizerForNamingScheme(namingScheme) + if err != nil { + return err + } + if err := common.EnsureFinalizer(ing, ingClient, finalizerKey); err != nil { + klog.Errorf("Failed to ensure finalizer %s for ingress %s: %v", finalizerKey, ingKey, err) + return err + } + return nil +} + +// frontendGCAlgorithm returns the naming scheme using which frontend resources needs to be cleanedup. +// This also returns a boolean to specify if we need to delete frontend resources. +// GC path is +// If ingress does not exist : v1 frontends and all backends +// If ingress exists +// - Needs cleanup +// - If v1 naming scheme : v1 frontends and all backends +// - If v2 naming scheme : v2 frontends and all backends +// - Does not need cleanup +// - Finalizer enabled : all backends +// - Finalizer disabled : v1 frontends and all backends +func frontendGCAlgorithm(ingExists bool, ing *v1beta1.Ingress) utils.FrontendGCAlgorithm { + // If ingress does not exist, that means its pre-finalizer era. + // Run GC via v1 naming scheme. + if !ingExists { + return utils.CleanupV1FrontendResources + } + // Determine if we do not need to delete current ingress. + if !utils.NeedsCleanup(ing) { + // GC backends only if current ingress does not need cleanup and finalizers is enabled. + if flags.F.FinalizerAdd { + return utils.NoCleanUpNeeded + } + return utils.CleanupV1FrontendResources + } + namingScheme := namer.FrontendNamingScheme(ing) + switch namingScheme { + case namer.V2NamingScheme: + return utils.CleanupV2FrontendResources + case namer.V1NamingScheme: + return utils.CleanupV1FrontendResources + default: + klog.Errorf("Unexpected naming scheme %v", namingScheme) + return utils.NoCleanUpNeeded + } +} diff --git a/pkg/controller/controller_test.go b/pkg/controller/controller_test.go index e8ec510ed6..ba5b34cb23 100644 --- a/pkg/controller/controller_test.go +++ b/pkg/controller/controller_test.go @@ -19,12 +19,14 @@ package controller import ( "fmt" "reflect" + "sort" "strings" "testing" "time" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" "github.com/google/go-cmp/cmp" + "google.golang.org/api/compute/v1" api_v1 "k8s.io/api/core/v1" "k8s.io/api/networking/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,6 +36,7 @@ import ( "k8s.io/client-go/kubernetes/fake" "k8s.io/ingress-gce/pkg/annotations" backendconfigclient "k8s.io/ingress-gce/pkg/backendconfig/client/clientset/versioned/fake" + "k8s.io/ingress-gce/pkg/common/operator" "k8s.io/ingress-gce/pkg/context" "k8s.io/ingress-gce/pkg/events" "k8s.io/ingress-gce/pkg/flags" @@ -52,19 +55,6 @@ var ( clusterUID = "aaaaa" ) -// saveFinalizerFlags captures current value of finalizer flags and -// restore them after a test is finished. -type saveFinalizerFlags struct{ add, remove bool } - -func (s *saveFinalizerFlags) save() { - s.add = flags.F.FinalizerAdd - s.remove = flags.F.FinalizerRemove -} -func (s *saveFinalizerFlags) reset() { - flags.F.FinalizerAdd = s.add - flags.F.FinalizerRemove = s.remove -} - // newLoadBalancerController create a loadbalancer controller. func newLoadBalancerController() *LoadBalancerController { kubeClient := fake.NewSimpleClientset() @@ -82,12 +72,11 @@ func newLoadBalancerController() *LoadBalancerController { HealthCheckPath: "/", DefaultBackendHealthCheckPath: "/healthz", } - - ctx := context.NewControllerContext(nil, kubeClient, backendConfigClient, nil, fakeGCE, namer, ctxConfig) + ctx := context.NewControllerContext(nil, kubeClient, backendConfigClient, nil, fakeGCE, namer, "" /*kubeSystemUID*/, ctxConfig) lbc := NewLoadBalancerController(ctx, stopCh) // TODO(rramkumar): Fix this so we don't have to override with our fake lbc.instancePool = instances.NewNodePool(instances.NewFakeInstanceGroups(sets.NewString(), namer), namer) - lbc.l7Pool = loadbalancers.NewLoadBalancerPool(fakeGCE, namer, events.RecorderProducerMock{}, namer_util.NewFrontendNamerFactory(namer)) + lbc.l7Pool = loadbalancers.NewLoadBalancerPool(fakeGCE, namer, events.RecorderProducerMock{}, namer_util.NewFrontendNamerFactory(namer, "")) lbc.instancePool.Init(&instances.FakeZoneLister{Zones: []string{"zone-a"}}) lbc.hasSynced = func() bool { return true } @@ -139,11 +128,15 @@ func setDeletionTimestamp(lbc *LoadBalancerController, ing *v1beta1.Ingress) { func deleteIngress(lbc *LoadBalancerController, ing *v1beta1.Ingress) { if len(ing.GetFinalizers()) == 0 { - lbc.ctx.KubeClient.NetworkingV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, &meta_v1.DeleteOptions{}) - lbc.ctx.IngressInformer.GetIndexer().Delete(ing) + deleteIngressWithFinalizer(lbc, ing) } } +func deleteIngressWithFinalizer(lbc *LoadBalancerController, ing *v1beta1.Ingress) { + lbc.ctx.KubeClient.NetworkingV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, &meta_v1.DeleteOptions{}) + lbc.ctx.IngressInformer.GetIndexer().Delete(ing) +} + // getKey returns the key for an ingress. func getKey(ing *v1beta1.Ingress, t *testing.T) string { key, err := common.KeyFunc(ing) @@ -188,9 +181,11 @@ func TestIngressSyncError(t *testing.T) { // Ingresses that need to be deleted, and keep the ones that don't, depending // on whether Finalizer Adds and/or Removes are enabled. func TestIngressCreateDeleteFinalizer(t *testing.T) { - var flagSaver saveFinalizerFlags - flagSaver.save() - defer flagSaver.reset() + flagSaver := test.NewFlagSaver() + flagSaver.Save(test.FinalizerAddFlag, &flags.F.FinalizerAdd) + defer flagSaver.Reset(test.FinalizerAddFlag, &flags.F.FinalizerAdd) + flagSaver.Save(test.FinalizerRemoveFlag, &flags.F.FinalizerRemove) + defer flagSaver.Reset(test.FinalizerRemoveFlag, &flags.F.FinalizerRemove) testCases := []struct { enableFinalizerAdd bool enableFinalizerRemove bool @@ -309,9 +304,11 @@ func TestIngressCreateDeleteFinalizer(t *testing.T) { // a good ingress config status is updated and LB is deleted after class change. // Note: This test cannot be run in parallel as it stubs global flags. func TestIngressClassChangeWithFinalizer(t *testing.T) { - var flagSaver saveFinalizerFlags - flagSaver.save() - defer flagSaver.reset() + flagSaver := test.NewFlagSaver() + flagSaver.Save(test.FinalizerAddFlag, &flags.F.FinalizerAdd) + defer flagSaver.Reset(test.FinalizerAddFlag, &flags.F.FinalizerAdd) + flagSaver.Save(test.FinalizerRemoveFlag, &flags.F.FinalizerRemove) + defer flagSaver.Reset(test.FinalizerRemoveFlag, &flags.F.FinalizerRemove) flags.F.FinalizerAdd = true flags.F.FinalizerRemove = true lbc := newLoadBalancerController() @@ -364,9 +361,11 @@ func TestIngressClassChangeWithFinalizer(t *testing.T) { // multiple ingresses with shared resources are added or deleted. // Note: This test cannot be run in parallel as it stubs global flags. func TestIngressesWithSharedResourcesWithFinalizer(t *testing.T) { - var flagSaver saveFinalizerFlags - flagSaver.save() - defer flagSaver.reset() + flagSaver := test.NewFlagSaver() + flagSaver.Save(test.FinalizerAddFlag, &flags.F.FinalizerAdd) + defer flagSaver.Reset(test.FinalizerAddFlag, &flags.F.FinalizerAdd) + flagSaver.Save(test.FinalizerRemoveFlag, &flags.F.FinalizerRemove) + defer flagSaver.Reset(test.FinalizerRemoveFlag, &flags.F.FinalizerRemove) flags.F.FinalizerAdd = true flags.F.FinalizerRemove = true lbc := newLoadBalancerController() @@ -433,9 +432,9 @@ func TestIngressesWithSharedResourcesWithFinalizer(t *testing.T) { // to existing ingressesToCleanup when lbc is upgraded to enable finalizer. // Note: This test cannot be run in parallel as it stubs global flags. func TestEnableFinalizer(t *testing.T) { - var flagSaver saveFinalizerFlags - flagSaver.save() - defer flagSaver.reset() + flagSaver := test.NewFlagSaver() + flagSaver.Save(test.FinalizerAddFlag, &flags.F.FinalizerAdd) + defer flagSaver.Reset(test.FinalizerAddFlag, &flags.F.FinalizerAdd) lbc := newLoadBalancerController() svc := test.NewService(types.NamespacedName{Name: "my-service", Namespace: "namespace1"}, api_v1.ServiceSpec{ Type: api_v1.ServiceTypeNodePort, @@ -670,3 +669,309 @@ func TestToRuntimeInfoCerts(t *testing.T) { t.Errorf("lbInfo.TLS = %v, want %v", lbInfo.TLS, tlsCerts) } } + +// TestIngressTagging asserts that appropriate finalizer that defines frontend naming scheme, +// is added to ingress being synced. +func TestIngressTagging(t *testing.T) { + flagSaver := test.NewFlagSaver() + flagSaver.Save(test.FinalizerAddFlag, &flags.F.FinalizerAdd) + defer flagSaver.Reset(test.FinalizerAddFlag, &flags.F.FinalizerAdd) + flagSaver.Save(test.EnableV2FrontendNamerFlag, &flags.F.EnableV2FrontendNamer) + defer flagSaver.Reset(test.EnableV2FrontendNamerFlag, &flags.F.EnableV2FrontendNamer) + testCases := []struct { + enableFinalizerAdd bool + enableV2Namer bool + vipExists bool + urlMapExists bool + expectedFinalizer string + }{ + {enableFinalizerAdd: false, expectedFinalizer: ""}, + {enableFinalizerAdd: true, enableV2Namer: false, expectedFinalizer: common.FinalizerKey}, + {enableFinalizerAdd: true, enableV2Namer: true, vipExists: false, expectedFinalizer: common.FinalizerKeyV2}, + {enableFinalizerAdd: true, enableV2Namer: true, vipExists: true, urlMapExists: false, expectedFinalizer: common.FinalizerKeyV2}, + {enableFinalizerAdd: true, enableV2Namer: true, vipExists: true, urlMapExists: true, expectedFinalizer: common.FinalizerKey}, + } + + for idx, tc := range testCases { + var desc string + switch idx { + case 0: + desc = fmt.Sprintf("enableFinalizerAdd %t", tc.enableFinalizerAdd) + case 1: + desc = fmt.Sprintf("enableFinalizerAdd %t enableV2Namer %t ", tc.enableFinalizerAdd, tc.enableV2Namer) + case 2: + desc = fmt.Sprintf("enableFinalizerAdd %t enableV2Namer %t vipExists %t", tc.enableFinalizerAdd, tc.enableV2Namer, tc.vipExists) + default: + desc = fmt.Sprintf("enableFinalizerAdd %t enableV2Namer %t vipExists %t urlMapExists %t", tc.enableFinalizerAdd, tc.enableV2Namer, tc.vipExists, tc.urlMapExists) + } + t.Run(desc, func(t *testing.T) { + flags.F.FinalizerAdd = tc.enableFinalizerAdd + flags.F.EnableV2FrontendNamer = tc.enableV2Namer + + lbc := newLoadBalancerController() + svc := test.NewService(types.NamespacedName{Name: "my-service", Namespace: "default"}, api_v1.ServiceSpec{ + Type: api_v1.ServiceTypeNodePort, + Ports: []api_v1.ServicePort{{Port: 80}}, + }) + addService(lbc, svc) + + defaultBackend := backend("my-service", intstr.FromInt(80)) + ing := test.NewIngress(types.NamespacedName{Name: "my-ingress", Namespace: "default"}, + v1beta1.IngressSpec{ + Backend: &defaultBackend, + }) + if tc.vipExists { + ing.Status.LoadBalancer.Ingress = []api_v1.LoadBalancerIngress{{IP: "0.0.0.0"}} + } + addIngress(lbc, ing) + // Create URL map if enabled. + if tc.urlMapExists { + lbName := lbc.ctx.ClusterNamer.LoadBalancer(common.IngressKeyFunc(ing)) + lbc.ctx.Cloud.CreateURLMap(&compute.UrlMap{Name: lbc.ctx.ClusterNamer.UrlMap(lbName)}) + } + + ingStoreKey := getKey(ing, t) + if err := lbc.sync(ingStoreKey); err != nil { + t.Fatalf("lbc.sync(%v) = %v, want nil", ingStoreKey, err) + } + + updatedIng, err := lbc.ctx.KubeClient.NetworkingV1beta1().Ingresses(ing.Namespace).Get(ing.Name, meta_v1.GetOptions{}) + if err != nil { + t.Fatalf("Get(%v) = %v, want nil", ingStoreKey, err) + } + ingFinalizers := updatedIng.GetFinalizers() + // Verify that no finalizer is added when finalizer flag is disabled. + if !flags.F.FinalizerAdd { + if l := len(ingFinalizers); l != 0 { + t.Fatalf("len(updatedIng.GetFinalizers()) = %d, want 0; updatedIng = %+v", l, updatedIng) + } + return + } + // Verify that appropriate finalizer is added + if l := len(ingFinalizers); l != 1 { + t.Fatalf("len(updatedIng.GetFinalizers()) = %d, want 1; updatedIng = %+v", l, updatedIng) + } + if diff := cmp.Diff(tc.expectedFinalizer, ingFinalizers[0]); diff != "" { + t.Fatalf("Got diff for Finalizer (-want +got):\n%s", diff) + } + }) + } +} + +// TestGC asserts that GC workflow deletes multiple ingresses on a single sync +// if delete events for those ingresses are lost. +// Note that this workflow is valid only when finalizer is disabled. +func TestGCMultiple(t *testing.T) { + flagSaver := test.NewFlagSaver() + flagSaver.Save(test.FinalizerRemoveFlag, &flags.F.FinalizerRemove) + defer flagSaver.Reset(test.FinalizerRemoveFlag, &flags.F.FinalizerRemove) + flags.F.FinalizerRemove = true + const namespace = "namespace" + + ings := []string{"v1-ing1", "v1-ing2", "v1-ing3", "v1-ing4"} + expectedIngresses := []string{"namespace/v1-ing3", "namespace/v1-ing4"} + // expected ingresses after creation. + expectedIngressKeys := make([]string, 0) + for _, ingName := range ings { + expectedIngressKeys = append(expectedIngressKeys, fmt.Sprintf("%s/%s", namespace, ingName)) + } + lbc := newLoadBalancerController() + // Create ingresses and run sync on them. + var updatedIngs []*v1beta1.Ingress + for _, ing := range ings { + updatedIngs = append(updatedIngs, ensureIngress(t, lbc, namespace, ing, namer_util.V1NamingScheme)) + } + + allIngressKeys := lbc.ctx.Ingresses().ListKeys() + sort.Strings(allIngressKeys) + if diff := cmp.Diff(expectedIngressKeys, allIngressKeys); diff != "" { + t.Fatalf("Got diff for ingresses (-want +got):\n%s", diff) + } + + // delete multiple ingresses during a single sync. + timestamp := meta_v1.NewTime(time.Now()) + // Add deletion timestamp to mock ingress deletions. + // Delete half of the ingresses. + for i := 0; i < len(updatedIngs)/2; i++ { + updatedIngs[i].SetDeletionTimestamp(×tamp) + updateIngress(lbc, updatedIngs[i]) + } + // Sync on the last ingress. + ingStoreKey := getKey(updatedIngs[len(updatedIngs)-1], t) + if err := lbc.sync(ingStoreKey); err != nil { + t.Fatalf("lbc.sync(%v) = %v, want nil", ingStoreKey, err) + } + + // Update ingress store with any potential changes made by controller. + // This step is needed only because we use a mock controller setup. + for _, ing := range updatedIngs { + lbc.ctx.IngressInformer.GetIndexer().Update(getUpdatedIngress(t, lbc, ing)) + } + + // Assert that controller returns same ingresses as expected ingresses. + // Controller sync removes finalizers from all deletion candidates. + // Filter ingresses with finalizer for un-deleted ingresses. + allIngresses := operator.Ingresses(lbc.ctx.Ingresses().List()).Filter(func(ing *v1beta1.Ingress) bool { + return common.HasFinalizer(ing.ObjectMeta) + }).AsList() + allIngressKeys = common.ToIngressKeys(allIngresses) + sort.Strings(allIngressKeys) + if diff := cmp.Diff(expectedIngresses, allIngressKeys); diff != "" { + t.Fatalf("Got diff for Ingresses after delete (-want +got):\n%s", diff) + } +} + +// TestGC asserts that GC workflow runs as expected during a controller sync. +func TestGC(t *testing.T) { + flagSaver := test.NewFlagSaver() + flagSaver.Save(test.FinalizerRemoveFlag, &flags.F.FinalizerRemove) + defer flagSaver.Reset(test.FinalizerRemoveFlag, &flags.F.FinalizerRemove) + flagSaver.Save(test.FinalizerAddFlag, &flags.F.FinalizerAdd) + defer flagSaver.Reset(test.FinalizerAddFlag, &flags.F.FinalizerAdd) + flagSaver.Save(test.EnableV2FrontendNamerFlag, &flags.F.EnableV2FrontendNamer) + defer flagSaver.Reset(test.EnableV2FrontendNamerFlag, &flags.F.EnableV2FrontendNamer) + flags.F.FinalizerRemove = true + flags.F.FinalizerAdd = true + flags.F.EnableV2FrontendNamer = true + const namespace = "namespace" + for _, tc := range []struct { + desc string + v1IngressNames []string + v2IngressNames []string + expectedIngresses []string + }{ + { + desc: "empty", + v1IngressNames: []string{}, + v2IngressNames: []string{}, + expectedIngresses: []string{}, + }, + { + desc: "v1 ingresses only", + v1IngressNames: []string{"v1-ing1", "v1-ing2", "v1-ing3", "v1-ing4"}, + v2IngressNames: []string{}, + expectedIngresses: []string{"namespace/v1-ing3", "namespace/v1-ing4"}, + }, + { + desc: "v2 ingresses only", + v1IngressNames: []string{}, + v2IngressNames: []string{"v2-ing1", "v2-ing2", "v2-ing3", "v2-ing4"}, + expectedIngresses: []string{"namespace/v2-ing3", "namespace/v2-ing4"}, + }, + { + desc: "both ingresses", + v1IngressNames: []string{"v1-ing1", "v1-ing2", "v1-ing3", "v1-ing4"}, + v2IngressNames: []string{"v2-ing1", "v2-ing2", "v2-ing3", "v2-ing4"}, + expectedIngresses: []string{"namespace/v1-ing3", "namespace/v1-ing4", "namespace/v2-ing3", "namespace/v2-ing4"}, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + // expected ingresses after creation. + expectedIngressKeys := make([]string, 0) + for _, ingName := range tc.v1IngressNames { + expectedIngressKeys = append(expectedIngressKeys, fmt.Sprintf("%s/%s", namespace, ingName)) + } + for _, ingName := range tc.v2IngressNames { + expectedIngressKeys = append(expectedIngressKeys, fmt.Sprintf("%s/%s", namespace, ingName)) + } + lbc := newLoadBalancerController() + // Create ingresses and run sync on them. + var v1Ingresses, v2Ingresses []*v1beta1.Ingress + for _, ing := range tc.v1IngressNames { + v1Ingresses = append(v1Ingresses, ensureIngress(t, lbc, namespace, ing, namer_util.V1NamingScheme)) + } + for _, ing := range tc.v2IngressNames { + v2Ingresses = append(v2Ingresses, ensureIngress(t, lbc, namespace, ing, namer_util.V2NamingScheme)) + } + + allIngressKeys := lbc.ctx.Ingresses().ListKeys() + sort.Strings(allIngressKeys) + if diff := cmp.Diff(expectedIngressKeys, allIngressKeys); diff != "" { + t.Fatalf("Got diff for ingresses (-want +got):\n%s", diff) + } + + // Delete half of the v1 ingresses. + for i := 0; i < len(v1Ingresses)/2; i++ { + // Add deletion stamp to indicate that ingress is deleted. + timestamp := meta_v1.NewTime(time.Now()) + v1Ingresses[i].SetDeletionTimestamp(×tamp) + updateIngress(lbc, v1Ingresses[i]) + ingStoreKey := getKey(v1Ingresses[i], t) + if err := lbc.sync(ingStoreKey); err != nil { + t.Fatalf("lbc.sync(%v) = %v, want nil", ingStoreKey, err) + } + } + + // Delete half of the v2 ingresses. + for i := 0; i < len(v2Ingresses)/2; i++ { + // Add deletion stamp to indicate that ingress is deleted. + timestamp := meta_v1.NewTime(time.Now()) + v2Ingresses[i].SetDeletionTimestamp(×tamp) + updateIngress(lbc, v2Ingresses[i]) + ingStoreKey := getKey(v2Ingresses[i], t) + if err := lbc.sync(ingStoreKey); err != nil { + t.Fatalf("lbc.sync(%v) = %v, want nil", ingStoreKey, err) + } + } + + // Update ingress store with any potential changes made by controller. + // This step is needed only because we use a mock controller setup. + for _, ing := range v1Ingresses { + lbc.ctx.IngressInformer.GetIndexer().Update(getUpdatedIngress(t, lbc, ing)) + } + for _, ing := range v2Ingresses { + lbc.ctx.IngressInformer.GetIndexer().Update(getUpdatedIngress(t, lbc, ing)) + } + + // Assert that controller returns same ingresses as expected ingresses. + // Controller sync removes finalizers from all deletion candidates. + // Filter ingresses with finalizer for un-deleted ingresses. + allIngresses := operator.Ingresses(lbc.ctx.Ingresses().List()).Filter(func(ing *v1beta1.Ingress) bool { + return common.HasFinalizer(ing.ObjectMeta) + }).AsList() + allIngressKeys = common.ToIngressKeys(allIngresses) + sort.Strings(allIngressKeys) + if diff := cmp.Diff(tc.expectedIngresses, allIngressKeys); diff != "" { + t.Fatalf("Got diff for Ingresses after delete (-want +got):\n%s", diff) + } + }) + } +} + +// ensureIngress creates an ingress and syncs it with ingress controller. +// This returns updated ingress after sync. +func ensureIngress(t *testing.T, lbc *LoadBalancerController, namespace, name string, scheme namer_util.Scheme) *v1beta1.Ingress { + serviceName := fmt.Sprintf("service-for-%s", name) + svc := test.NewService(types.NamespacedName{Name: serviceName, Namespace: namespace}, api_v1.ServiceSpec{ + Type: api_v1.ServiceTypeNodePort, + Ports: []api_v1.ServicePort{{Port: 80}}, + }) + addService(lbc, svc) + + defaultBackend := backend(serviceName, intstr.FromInt(80)) + ing := test.NewIngress(types.NamespacedName{Name: name, Namespace: namespace}, + v1beta1.IngressSpec{ + Backend: &defaultBackend, + }) + if scheme == namer_util.V1NamingScheme { + ing.ObjectMeta.Finalizers = []string{common.FinalizerKey} + } else if scheme == namer_util.V2NamingScheme { + ing.ObjectMeta.Finalizers = []string{common.FinalizerKeyV2} + } + addIngress(lbc, ing) + + ingStoreKey := getKey(ing, t) + if err := lbc.sync(ingStoreKey); err != nil { + t.Fatalf("lbc.sync(%v) = %v, want nil", ingStoreKey, err) + } + + return getUpdatedIngress(t, lbc, ing) +} + +func getUpdatedIngress(t *testing.T, lbc *LoadBalancerController, ing *v1beta1.Ingress) *v1beta1.Ingress { + updatedIng, err := lbc.ctx.KubeClient.NetworkingV1beta1().Ingresses(ing.Namespace).Get(ing.Name, meta_v1.GetOptions{}) + if err != nil { + t.Fatalf("lbc.ctx.KubeClient...Get(%v) = %v, want nil", getKey(ing, t), err) + } + return updatedIng +} diff --git a/pkg/controller/translator/translator_test.go b/pkg/controller/translator/translator_test.go index c523e30e21..c0310400b4 100644 --- a/pkg/controller/translator/translator_test.go +++ b/pkg/controller/translator/translator_test.go @@ -65,7 +65,7 @@ func fakeTranslator() *Translator { HealthCheckPath: "/", DefaultBackendHealthCheckPath: "/healthz", } - ctx := context.NewControllerContext(nil, client, backendConfigClient, nil, nil, defaultNamer, ctxConfig) + ctx := context.NewControllerContext(nil, client, backendConfigClient, nil, nil, defaultNamer, "" /*kubeSystemUID*/, ctxConfig) gce := &Translator{ ctx: ctx, } diff --git a/pkg/firewalls/controller_test.go b/pkg/firewalls/controller_test.go index 6e7c46acfa..f2c0524261 100644 --- a/pkg/firewalls/controller_test.go +++ b/pkg/firewalls/controller_test.go @@ -46,7 +46,7 @@ func newFirewallController() *FirewallController { DefaultBackendSvcPort: test.DefaultBeSvcPort, } - ctx := context.NewControllerContext(nil, kubeClient, backendConfigClient, nil, fakeGCE, defaultNamer, ctxConfig) + ctx := context.NewControllerContext(nil, kubeClient, backendConfigClient, nil, fakeGCE, defaultNamer, "" /*kubeSystemUID*/, ctxConfig) fwc := NewFirewallController(ctx, []string{"30000-32767"}) fwc.hasSynced = func() bool { return true } diff --git a/pkg/flags/flags.go b/pkg/flags/flags.go index 6c33fe8629..ab3b007f89 100644 --- a/pkg/flags/flags.go +++ b/pkg/flags/flags.go @@ -89,6 +89,7 @@ var ( ASMConfigMapBasedConfigCMName string EnableNonGCPMode bool EnableDeleteUnusedFrontends bool + EnableV2FrontendNamer bool LeaderElection LeaderElectionConfiguration }{} @@ -208,6 +209,7 @@ L7 load balancing. CSV values accepted. Example: -node-port-ranges=80,8080,400-5 flag.StringVar(&F.ASMConfigMapBasedConfigCMName, "asm-configmap-based-config-cmname", "ingress-controller-asm-cm-config", "ASM Configmap based config: configmap name") flag.BoolVar(&F.EnableNonGCPMode, "enable-non-gcp-mode", false, "Set to true when running on a non-GCP cluster.") flag.BoolVar(&F.EnableDeleteUnusedFrontends, "enable-delete-unused-frontends", false, "Enable deleting unused gce frontend resources.") + flag.BoolVar(&F.EnableV2FrontendNamer, "enable-v2-frontend-namer", false, "Enable v2 ingress frontend naming policy.") } type RateLimitSpecs struct { diff --git a/pkg/loadbalancers/interfaces.go b/pkg/loadbalancers/interfaces.go index 6a3b0c753b..418d342b8c 100644 --- a/pkg/loadbalancers/interfaces.go +++ b/pkg/loadbalancers/interfaces.go @@ -17,17 +17,20 @@ limitations under the License. package loadbalancers import ( - "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" - "k8s.io/ingress-gce/pkg/composite" - "k8s.io/ingress-gce/pkg/loadbalancers/features" + "k8s.io/api/networking/v1beta1" ) // LoadBalancerPool is an interface to manage the cloud resources associated // with a gce loadbalancer. type LoadBalancerPool interface { + // Ensure ensures a loadbalancer and its resources given the RuntimeInfo. Ensure(ri *L7RuntimeInfo) (*L7, error) - Delete(name string, versions *features.ResourceVersions, scope meta.KeyType) error - GC(names []string) error - Shutdown() error - List(key *meta.Key, version meta.Version) ([]*composite.UrlMap, error) + // GCv2 garbage collects loadbalancer associated with given ingress using v2 naming scheme. + GCv2(ing *v1beta1.Ingress) error + // GCv1 garbage collects loadbalancers not in the input list using v1 naming scheme. + GCv1(names []string) error + // Shutdown deletes all loadbalancers for given list of ingresses. + Shutdown(ings []*v1beta1.Ingress) error + // HasUrlMap returns true if an URL map exists in GCE for given ingress. + HasUrlMap(ing *v1beta1.Ingress) (bool, error) } diff --git a/pkg/loadbalancers/l7s.go b/pkg/loadbalancers/l7s.go index a8a8ee05be..6984a7956b 100644 --- a/pkg/loadbalancers/l7s.go +++ b/pkg/loadbalancers/l7s.go @@ -18,13 +18,18 @@ package loadbalancers import ( "fmt" + "net/http" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + "k8s.io/api/networking/v1beta1" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/ingress-gce/pkg/common/operator" "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/events" "k8s.io/ingress-gce/pkg/flags" "k8s.io/ingress-gce/pkg/loadbalancers/features" + "k8s.io/ingress-gce/pkg/utils" + "k8s.io/ingress-gce/pkg/utils/common" namer_util "k8s.io/ingress-gce/pkg/utils/namer" "k8s.io/klog" "k8s.io/legacy-cloud-providers/gce" @@ -52,7 +57,7 @@ func NewLoadBalancerPool(cloud *gce.Cloud, v1NamerHelper namer_util.V1FrontendNa } } -// Ensure ensures a loadbalancer and its resources given the RuntimeInfo +// Ensure implements LoadBalancerPool. func (l *L7s) Ensure(ri *L7RuntimeInfo) (*L7, error) { lb := &L7{ runtimeInfo: ri, @@ -69,8 +74,8 @@ func (l *L7s) Ensure(ri *L7RuntimeInfo) (*L7, error) { return lb, nil } -// Delete deletes a load balancer by name. -func (l *L7s) Delete(name string, versions *features.ResourceVersions, scope meta.KeyType) error { +// delete deletes a loadbalancer by name. +func (l *L7s) delete(name string, versions *features.ResourceVersions, scope meta.KeyType) error { lb := &L7{ runtimeInfo: &L7RuntimeInfo{}, cloud: l.cloud, @@ -86,8 +91,25 @@ func (l *L7s) Delete(name string, versions *features.ResourceVersions, scope met return nil } -// List returns a list of urlMaps (the top level LB resource) that belong to the cluster -func (l *L7s) List(key *meta.Key, version meta.Version) ([]*composite.UrlMap, error) { +// v2Delete deletes a loadbalancer frontend resources by ingress using v2 naming scheme. +func (l *L7s) v2Delete(ing *v1beta1.Ingress, versions *features.ResourceVersions, scope meta.KeyType) error { + lb := &L7{ + runtimeInfo: &L7RuntimeInfo{Ingress: ing}, + cloud: l.cloud, + namer: l.namerFactory.Namer(ing), + scope: scope, + } + + klog.V(3).Infof("Deleting lb (using v2 naming scheme) %v", lb) + + if err := lb.Cleanup(versions); err != nil { + return err + } + return nil +} + +// list returns a list of urlMaps (the top level LB resource) that belong to the cluster. +func (l *L7s) list(key *meta.Key, version meta.Version) ([]*composite.UrlMap, error) { var result []*composite.UrlMap urlMaps, err := composite.ListUrlMaps(l.cloud, key, version) if err != nil { @@ -103,10 +125,21 @@ func (l *L7s) List(key *meta.Key, version meta.Version) ([]*composite.UrlMap, er return result, nil } -// GC garbage collects loadbalancers not in the input list. +// GCv2 implements LoadBalancerPool. +func (l *L7s) GCv2(ing *v1beta1.Ingress) error { + ingKey := common.NamespacedName(ing) + klog.V(2).Infof("GC(%v)", ingKey) + if err := l.v2Delete(ing, features.VersionsFromIngress(ing), features.ScopeFromIngress(ing)); err != nil { + return err + } + klog.V(2).Infof("GC(%v) ok", ingKey) + return nil +} + +// GCv1 implements LoadBalancerPool. // TODO(shance): Update to handle regional and global LB with same name -func (l *L7s) GC(names []string) error { - klog.V(2).Infof("GC(%v)", names) +func (l *L7s) GCv1(names []string) error { + klog.V(2).Infof("GCv1(%v)", names) knownLoadBalancers := sets.NewString() for _, n := range names { @@ -119,7 +152,7 @@ func (l *L7s) GC(names []string) error { if err != nil { return fmt.Errorf("error getting regional key: %v", err) } - urlMaps, err := l.List(key, features.L7ILBVersions().UrlMap) + urlMaps, err := l.list(key, features.L7ILBVersions().UrlMap) if err != nil { return fmt.Errorf("error listing regional LBs: %v", err) } @@ -130,7 +163,7 @@ func (l *L7s) GC(names []string) error { } // TODO(shance): fix list taking a key - urlMaps, err := l.List(meta.GlobalKey(""), meta.VersionGA) + urlMaps, err := l.list(meta.GlobalKey(""), meta.VersionGA) if err != nil { return fmt.Errorf("error listing global LBs: %v", err) } @@ -142,7 +175,7 @@ func (l *L7s) GC(names []string) error { return nil } -// gc is a helper for GC +// gc is a helper for GCv1. // TODO(shance): get versions from description func (l *L7s) gc(urlMaps []*composite.UrlMap, knownLoadBalancers sets.String, versions *features.ResourceVersions) []error { var errors []error @@ -164,18 +197,48 @@ func (l *L7s) gc(urlMaps []*composite.UrlMap, knownLoadBalancers sets.String, ve } klog.V(2).Infof("GCing loadbalancer %v", l7Name) - if err := l.Delete(l7Name, versions, scope); err != nil { + if err := l.delete(l7Name, versions, scope); err != nil { errors = append(errors, fmt.Errorf("error deleting loadbalancer %q", l7Name)) } } return nil } -// Shutdown logs whether or not the pool is empty. -func (l *L7s) Shutdown() error { - if err := l.GC([]string{}); err != nil { - return err +// Shutdown implements LoadBalancerPool. +func (l *L7s) Shutdown(ings []*v1beta1.Ingress) error { + // Delete ingresses that use v1 naming scheme. + if err := l.GCv1([]string{}); err != nil { + return fmt.Errorf("error deleting load-balancers for v1 naming policy: %v", err) + } + // Delete ingresses that use v2 naming policy. + var errs []error + v2Ings := operator.Ingresses(ings).Filter(func(ing *v1beta1.Ingress) bool { + return namer_util.FrontendNamingScheme(ing) == namer_util.V2NamingScheme + }).AsList() + for _, ing := range v2Ings { + if err := l.GCv2(ing); err != nil { + errs = append(errs, err) + } + } + if errs != nil { + return fmt.Errorf("error deleting load-balancers for v2 naming policy: %v", utils.JoinErrs(errs)) } klog.V(2).Infof("Loadbalancer pool shutdown.") return nil } + +// HasUrlMap implements LoadBalancerPool. +func (l *L7s) HasUrlMap(ing *v1beta1.Ingress) (bool, error) { + namer := l.namerFactory.Namer(ing) + key, err := composite.CreateKey(l.cloud, namer.UrlMap(), features.ScopeFromIngress(ing)) + if err != nil { + return false, err + } + if _, err := composite.GetUrlMap(l.cloud, key, features.VersionsFromIngress(ing).UrlMap); err != nil { + if utils.IsHTTPErrorCode(err, http.StatusNotFound) { + return false, nil + } + return false, err + } + return true, nil +} diff --git a/pkg/loadbalancers/l7s_test.go b/pkg/loadbalancers/l7s_test.go index 54e78c23be..9080d5221d 100644 --- a/pkg/loadbalancers/l7s_test.go +++ b/pkg/loadbalancers/l7s_test.go @@ -25,16 +25,20 @@ import ( "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" + "k8s.io/api/networking/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/context" "k8s.io/ingress-gce/pkg/loadbalancers/features" + "k8s.io/ingress-gce/pkg/utils/common" namer_util "k8s.io/ingress-gce/pkg/utils/namer" "k8s.io/legacy-cloud-providers/gce" ) const ( testClusterName = "0123456789abcedf" + kubeSystemUID = "ksuid123" // resourceLeakLimit is the limit when ingress namespace and name are too long and ingress // GC will leak the LB resources because the cluster uid got truncated. @@ -164,7 +168,7 @@ func TestGC(t *testing.T) { } otherNamer := namer_util.NewNamer("clusteruid", "fw1") - otherFeNamerFactory := namer_util.NewFrontendNamerFactory(otherNamer) + otherFeNamerFactory := namer_util.NewFrontendNamerFactory(otherNamer, "") otherKeys := []string{ "a/a", "namespace/name", @@ -185,7 +189,7 @@ func TestGC(t *testing.T) { createFakeLoadbalancer(cloud, namer, versions, defaultScope) } - err := l7sPool.GC(tc.ingressLBs) + err := l7sPool.GCv1(tc.ingressLBs) if err != nil { t.Errorf("For case %q, do not expect err: %v", tc.desc, err) } @@ -247,7 +251,7 @@ func TestDoNotGCWantedLB(t *testing.T) { for _, tc := range testCases { namer := l7sPool.namerFactory.NamerForLbName(l7sPool.v1NamerHelper.LoadBalancer(tc.key)) createFakeLoadbalancer(l7sPool.cloud, namer, versions, defaultScope) - err := l7sPool.GC([]string{tc.key}) + err := l7sPool.GCv1([]string{tc.key}) if err != nil { t.Errorf("For case %q, do not expect err: %v", tc.desc, err) } @@ -283,7 +287,7 @@ func TestGCToLeakLB(t *testing.T) { for _, tc := range testCases { namer := l7sPool.namerFactory.NamerForLbName(l7sPool.v1NamerHelper.LoadBalancer(tc.key)) createFakeLoadbalancer(l7sPool.cloud, namer, versions, defaultScope) - err := l7sPool.GC([]string{}) + err := l7sPool.GCv1([]string{}) if err != nil { t.Errorf("For case %q, do not expect err: %v", tc.desc, err) } @@ -301,11 +305,327 @@ func TestGCToLeakLB(t *testing.T) { } } +// TestV2GC asserts that GC workflow for v2 naming scheme deletes load balancer +// associated with given v2 ingress. This also checks that other v2 ingress +// associated LBs or v1 ingress associated LBs are not deleted. +func TestV2GC(t *testing.T) { + t.Parallel() + pool := newTestLoadBalancerPool() + l7sPool := pool.(*L7s) + cloud := l7sPool.cloud + feNamerFactory := l7sPool.namerFactory + testCases := []struct { + desc string + ingressToDelete *v1beta1.Ingress + addV1Ingresses bool + gcpLBs []*v1beta1.Ingress + expectedLBs []*v1beta1.Ingress + }{ + { + desc: "empty", + ingressToDelete: newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKeyV2), + addV1Ingresses: false, + gcpLBs: []*v1beta1.Ingress{}, + expectedLBs: []*v1beta1.Ingress{}, + }, + { + desc: "simple case v2 only", + ingressToDelete: newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKeyV2), + gcpLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:15], longName[:20], common.FinalizerKeyV2), + }, + expectedLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:15], longName[:20], common.FinalizerKeyV2), + }, + }, + { + desc: "simple case both v1 and v2", + ingressToDelete: newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKeyV2), + gcpLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:11], common.FinalizerKey), + newIngressWithFinalizer(longName[:20], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName[:21], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName, longName, common.FinalizerKey), + }, + expectedLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:11], common.FinalizerKey), + newIngressWithFinalizer(longName[:20], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName[:21], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName, longName, common.FinalizerKey), + }, + }, + { + desc: "63 characters v2 only", + ingressToDelete: newIngressWithFinalizer(longName[:18], longName[:18], common.FinalizerKeyV2), + gcpLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:18], longName[:18], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:19], longName[:18], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + }, + expectedLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:19], longName[:18], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + }, + }, + { + desc: "63 characters both v1 and v2", + ingressToDelete: newIngressWithFinalizer(longName[:18], longName[:18], common.FinalizerKeyV2), + gcpLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:18], longName[:18], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKey), + newIngressWithFinalizer(longName[:18], longName[:19], common.FinalizerKey), + newIngressWithFinalizer(longName[:21], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName, longName, common.FinalizerKey), + }, + expectedLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKey), + newIngressWithFinalizer(longName[:18], longName[:19], common.FinalizerKey), + newIngressWithFinalizer(longName[:21], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName, longName, common.FinalizerKey), + }, + }, + { + desc: "longNameSpace v2 only", + ingressToDelete: newIngressWithFinalizer(longName, longName[:1], common.FinalizerKeyV2), + gcpLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:18], longName[:18], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName, longName[:1], common.FinalizerKeyV2), + }, + expectedLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:18], longName[:18], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + }, + }, + { + desc: "longNameSpace both v1 and v2", + ingressToDelete: newIngressWithFinalizer(longName, longName[:1], common.FinalizerKeyV2), + gcpLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName, longName[:1], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKey), + newIngressWithFinalizer(longName[:18], longName[:19], common.FinalizerKey), + newIngressWithFinalizer(longName[:21], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName, longName, common.FinalizerKey), + }, + expectedLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKey), + newIngressWithFinalizer(longName[:18], longName[:19], common.FinalizerKey), + newIngressWithFinalizer(longName[:21], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName, longName, common.FinalizerKey), + }, + }, + { + desc: "longName v2 only", + ingressToDelete: newIngressWithFinalizer(longName[:1], longName, common.FinalizerKeyV2), + gcpLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:18], longName[:18], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:1], longName, common.FinalizerKeyV2), + }, + expectedLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:18], longName[:18], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + }, + }, + { + desc: "longName both v1 and v2", + ingressToDelete: newIngressWithFinalizer(longName[:1], longName, common.FinalizerKeyV2), + gcpLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:1], longName, common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKey), + newIngressWithFinalizer(longName[:18], longName[:19], common.FinalizerKey), + newIngressWithFinalizer(longName[:21], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName, longName, common.FinalizerKey), + }, + expectedLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKey), + newIngressWithFinalizer(longName[:18], longName[:19], common.FinalizerKey), + newIngressWithFinalizer(longName[:21], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName, longName, common.FinalizerKey), + }, + }, + { + desc: "longNameSpace and longName v2 only", + ingressToDelete: newIngressWithFinalizer(longName, longName, common.FinalizerKeyV2), + gcpLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:18], longName[:18], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName, longName, common.FinalizerKeyV2), + }, + expectedLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:18], longName[:18], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + }, + }, + { + desc: "longNameSpace and longName both v1 and v2", + ingressToDelete: newIngressWithFinalizer(longName, longName, common.FinalizerKeyV2), + gcpLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName, longName, common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKey), + newIngressWithFinalizer(longName[:18], longName[:19], common.FinalizerKey), + newIngressWithFinalizer(longName[:21], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName, longName, common.FinalizerKey), + }, + expectedLBs: []*v1beta1.Ingress{ + newIngressWithFinalizer(longName[:10], longName[:15], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:30], longName[:30], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKey), + newIngressWithFinalizer(longName[:18], longName[:19], common.FinalizerKey), + newIngressWithFinalizer(longName[:21], longName[:27], common.FinalizerKey), + newIngressWithFinalizer(longName, longName, common.FinalizerKey), + }, + }, + } + + // Add LBs owned by another cluster. + otherNamer := namer_util.NewNamer("clusteruid", "fw1") + otherFeNamerFactory := namer_util.NewFrontendNamerFactory(otherNamer, "ksuid234") + otherIngresses := []*v1beta1.Ingress{ + // ingresses with v1 naming scheme. + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKey), + newIngressWithFinalizer(longName[:20], longName[:27], common.FinalizerKey), + // ingresses with v2 naming scheme. + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:15], longName[:20], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:16], longName[:20], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:17], longName[:20], common.FinalizerKeyV2), + newIngressWithFinalizer(longName, longName, common.FinalizerKeyV2), + } + versions := features.GAResourceVersions + + for _, ing := range otherIngresses { + createFakeLoadbalancer(cloud, otherFeNamerFactory.Namer(ing), versions, defaultScope) + } + + for _, tc := range testCases { + desc := fmt.Sprintf("%s namespaceLength %d nameLength %d", tc.desc, len(tc.ingressToDelete.Namespace), len(tc.ingressToDelete.Name)) + t.Run(desc, func(t *testing.T) { + for _, ing := range tc.gcpLBs { + createFakeLoadbalancer(cloud, feNamerFactory.Namer(ing), versions, defaultScope) + } + + err := l7sPool.GCv2(tc.ingressToDelete) + if err != nil { + t.Errorf("l7sPool.GC(%q) = %v, want nil for case %q", common.NamespacedName(tc.ingressToDelete), err, tc.desc) + } + + // Check if LBs associated with other ingresses are not deleted. + for _, ing := range otherIngresses { + if err := checkFakeLoadBalancer(cloud, otherFeNamerFactory.Namer(ing), versions, defaultScope, true); err != nil { + t.Errorf("checkFakeLoadBalancer(...) = %v, want nil for case %q and ingress %q", err, tc.desc, common.NamespacedName(ing)) + } + } + + // Check if the total number of url maps is as expected. + urlMaps, _ := l7sPool.cloud.ListURLMaps() + if ingCount := len(tc.expectedLBs) + len(otherIngresses); ingCount != len(urlMaps) { + t.Errorf("len(l7sPool.cloud.ListURLMaps()) = %d, want %d for case %q", len(urlMaps), ingCount, tc.desc) + } + + // Check if the load balancer associated with ingress to be deleted is actually GCed. + if err := checkFakeLoadBalancer(cloud, feNamerFactory.Namer(tc.ingressToDelete), versions, defaultScope, false); err != nil { + t.Errorf("checkFakeLoadBalancer(...) = %v, want nil for case %q and ingress %q", err, tc.desc, common.NamespacedName(tc.ingressToDelete)) + } + + // Check if all expected LBs exist. + for _, ing := range tc.expectedLBs { + feNamer := feNamerFactory.Namer(ing) + if err := checkFakeLoadBalancer(cloud, feNamer, versions, defaultScope, true); err != nil { + t.Errorf("checkFakeLoadBalancer(...) = %v, want nil for case %q and ingress %q", err, tc.desc, common.NamespacedName(ing)) + } + removeFakeLoadBalancer(cloud, feNamer, versions, defaultScope) + } + }) + } +} + +// TestDoNotLeakV2LB asserts that GC workflow for v2 naming scheme does not leak +// GCE resources for different namespaced names. +func TestDoNotLeakV2LB(t *testing.T) { + t.Parallel() + pool := newTestLoadBalancerPool() + l7sPool := pool.(*L7s) + cloud := l7sPool.cloud + feNamerFactory := l7sPool.namerFactory + + type testCase struct { + desc string + ing *v1beta1.Ingress + } + var testCases []testCase + + for i := 3; i <= len(longName)*2; i++ { + ing := newIngressWithFinalizer(longName[:i/2], longName[:(i-i/2)], common.FinalizerKeyV2) + testCases = append(testCases, testCase{fmt.Sprintf("IngressKeyLength: %d", i), ing}) + } + + // Add LBs owned by another cluster. + otherNamer := namer_util.NewNamer("clusteruid", "fw1") + otherFeNamerFactory := namer_util.NewFrontendNamerFactory(otherNamer, "ksuid234") + otherIngresses := []*v1beta1.Ingress{ + // ingresses with v1 naming scheme. + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKey), + newIngressWithFinalizer(longName[:20], longName[:27], common.FinalizerKey), + // ingresses with v2 naming scheme. + newIngressWithFinalizer(longName[:10], longName[:10], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:15], longName[:20], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:16], longName[:20], common.FinalizerKeyV2), + newIngressWithFinalizer(longName[:17], longName[:20], common.FinalizerKeyV2), + newIngressWithFinalizer(longName, longName, common.FinalizerKeyV2), + } + versions := features.GAResourceVersions + + for _, ing := range otherIngresses { + createFakeLoadbalancer(cloud, otherFeNamerFactory.Namer(ing), versions, defaultScope) + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + feNamer := feNamerFactory.Namer(tc.ing) + createFakeLoadbalancer(l7sPool.cloud, feNamer, versions, defaultScope) + err := l7sPool.GCv2(tc.ing) + if err != nil { + t.Errorf("l7sPool.GC(%q) = %v, want nil for case %q", common.NamespacedName(tc.ing), err, tc.desc) + } + + if err := checkFakeLoadBalancer(l7sPool.cloud, feNamer, versions, defaultScope, false); err != nil { + t.Errorf("checkFakeLoadBalancer(...) = %v, want nil for case %q", err, tc.desc) + } + urlMaps, _ := l7sPool.cloud.ListURLMaps() + if ingCount := len(otherIngresses); ingCount != len(urlMaps) { + t.Errorf("len(l7sPool.cloud.ListURLMaps()) = %d, want %d for case %q", len(urlMaps), ingCount, tc.desc) + } + removeFakeLoadBalancer(l7sPool.cloud, feNamer, versions, defaultScope) + }) + } +} + func newTestLoadBalancerPool() LoadBalancerPool { namer := namer_util.NewNamer(testClusterName, "fw1") fakeGCECloud := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) ctx := &context.ControllerContext{} - return NewLoadBalancerPool(fakeGCECloud, namer, ctx, namer_util.NewFrontendNamerFactory(namer)) + return NewLoadBalancerPool(fakeGCECloud, namer, ctx, namer_util.NewFrontendNamerFactory(namer, kubeSystemUID)) } func createFakeLoadbalancer(cloud *gce.Cloud, namer namer_util.IngressFrontendNamer, versions *features.ResourceVersions, scope meta.KeyType) { @@ -401,3 +721,13 @@ func generateKeyWithLength(length int) string { length = length - 1 return fmt.Sprintf("%s/%s", longName[:length/2], longName[:length-length/2]) } + +func newIngressWithFinalizer(namespace, name, finalizer string) *v1beta1.Ingress { + return &v1beta1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Finalizers: []string{finalizer}, + }, + } +} diff --git a/pkg/loadbalancers/loadbalancers_test.go b/pkg/loadbalancers/loadbalancers_test.go index ff1c64036c..09025c9da1 100644 --- a/pkg/loadbalancers/loadbalancers_test.go +++ b/pkg/loadbalancers/loadbalancers_test.go @@ -56,7 +56,7 @@ const ( ) type testJig struct { - pool LoadBalancerPool + pool L7s fakeGCE *gce.Cloud mock *cloud.MockGCE namer *namer_util.Namer @@ -100,7 +100,7 @@ func newTestJig(t *testing.T) *testJig { mockGCE.MockGlobalForwardingRules.InsertHook = InsertGlobalForwardingRuleHook ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(namer, "").Namer(ing) return &testJig{ pool: newFakeLoadBalancerPool(fakeGCE, t, namer), @@ -127,12 +127,12 @@ func newIngress() *v1beta1.Ingress { } } -func newFakeLoadBalancerPool(cloud *gce.Cloud, t *testing.T, namer *namer_util.Namer) LoadBalancerPool { +func newFakeLoadBalancerPool(cloud *gce.Cloud, t *testing.T, namer *namer_util.Namer) L7s { fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), namer) nodePool := instances.NewNodePool(fakeIGs, namer) nodePool.Init(&instances.FakeZoneLister{Zones: []string{defaultZone}}) - return NewLoadBalancerPool(cloud, namer, events.RecorderProducerMock{}, namer_util.NewFrontendNamerFactory(namer)) + return L7s{cloud, namer, events.RecorderProducerMock{}, namer_util.NewFrontendNamerFactory(namer, "")} } func newILBIngress() *v1beta1.Ingress { @@ -326,7 +326,7 @@ func TestCertUpdate(t *testing.T) { gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) certName1 := feNamer.SSLCertName(GetCertHash("cert")) certName2 := feNamer.SSLCertName(GetCertHash("cert2")) @@ -364,7 +364,7 @@ func TestMultipleSecretsWithSameCert(t *testing.T) { gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) lbInfo := &L7RuntimeInfo{ AllowHTTP: false, @@ -393,7 +393,7 @@ func TestCertCreationWithCollision(t *testing.T) { gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) certName1 := feNamer.SSLCertName(GetCertHash("cert")) certName2 := feNamer.SSLCertName(GetCertHash("cert2")) @@ -454,7 +454,7 @@ func TestMultipleCertRetentionAfterRestart(t *testing.T) { cert2 := createCert("key2", "cert2", "name2") cert3 := createCert("key3", "cert3", "name3") ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) certName1 := feNamer.SSLCertName(cert1.CertHash) certName2 := feNamer.SSLCertName(cert2.CertHash) certName3 := feNamer.SSLCertName(cert3.CertHash) @@ -502,7 +502,7 @@ func TestUpgradeToNewCertNames(t *testing.T) { gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) lbInfo := &L7RuntimeInfo{ AllowHTTP: false, UrlMap: gceUrlMap, @@ -565,7 +565,7 @@ func TestMaxCertsUpload(t *testing.T) { expectCerts := make(map[string]string) expectCertsExtra := make(map[string]string) ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) for ix := 0; ix < FakeCertQuota; ix++ { str := strconv.Itoa(ix) @@ -628,7 +628,7 @@ func TestIdenticalHostnameCerts(t *testing.T) { var tlsCerts []*TLSCerts expectCerts := make(map[string]string) ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) contents := "" for ix := 0; ix < 3; ix++ { @@ -653,7 +653,7 @@ func TestIdenticalHostnameCerts(t *testing.T) { verifyCertAndProxyLink(expectCerts, expectCerts, j, t) // Fetch the target proxy certs and go through in order verifyProxyCertsInOrder(" foo.com", j, t) - j.pool.Delete(common.IngressKeyFunc(lbInfo.Ingress), features.GAResourceVersions, defaultScope) + j.pool.delete(common.IngressKeyFunc(lbInfo.Ingress), features.GAResourceVersions, defaultScope) } } @@ -708,7 +708,7 @@ func TestIdenticalHostnameCertsPreShared(t *testing.T) { verifyCertAndProxyLink(expectCerts, expectCerts, j, t) // Fetch the target proxy certs and go through in order verifyProxyCertsInOrder(" foo.com", j, t) - j.pool.Delete(common.IngressKeyFunc(lbInfo.Ingress), features.GAResourceVersions, defaultScope) + j.pool.delete(common.IngressKeyFunc(lbInfo.Ingress), features.GAResourceVersions, defaultScope) } } @@ -721,7 +721,7 @@ func TestPreSharedToSecretBasedCertUpdate(t *testing.T) { gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) certName1 := feNamer.SSLCertName(GetCertHash("cert")) certName2 := feNamer.SSLCertName(GetCertHash("cert2")) @@ -1195,7 +1195,7 @@ func TestList(t *testing.T) { t.Fatalf("j.pool.Ensure() = %v; want nil", err) } - urlMaps, err := j.pool.List(key, defaultVersion) + urlMaps, err := j.pool.list(key, defaultVersion) if err != nil { t.Fatalf("j.pool.List(%q, %q) = %v, want nil", key, defaultVersion, err) } @@ -1222,7 +1222,7 @@ func TestSecretBasedAndPreSharedCerts(t *testing.T) { gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) namer := namer_util.NewNamer(clusterName, "fw1") ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(namer, "").Namer(ing) certName1 := feNamer.SSLCertName(GetCertHash("cert")) certName2 := feNamer.SSLCertName(GetCertHash("cert2")) @@ -1288,7 +1288,7 @@ func TestMaxSecretBasedAndPreSharedCerts(t *testing.T) { expectCerts := make(map[string]string) expectCertsExtra := make(map[string]string) ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) for ix := 0; ix < TargetProxyCertLimit; ix++ { str := strconv.Itoa(ix) @@ -1379,7 +1379,7 @@ func TestSecretBasedToPreSharedCertUpdate(t *testing.T) { gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) certName1 := feNamer.SSLCertName(GetCertHash("cert")) lbInfo := &L7RuntimeInfo{ @@ -1435,7 +1435,7 @@ func TestSecretBasedToPreSharedCertUpdateWithErrors(t *testing.T) { gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) certName1 := feNamer.SSLCertName(GetCertHash("cert")) lbInfo := &L7RuntimeInfo{ @@ -1494,7 +1494,7 @@ func TestResourceDeletionWithProtocol(t *testing.T) { gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) ing := newIngress() - feNamer := namer_util.NewFrontendNamerFactory(j.namer).Namer(ing) + feNamer := namer_util.NewFrontendNamerFactory(j.namer, "").Namer(ing) versions := features.GAResourceVersions certName1 := feNamer.SSLCertName(GetCertHash("cert1")) diff --git a/pkg/loadbalancers/url_maps_test.go b/pkg/loadbalancers/url_maps_test.go index fe21ffae90..8843d14b4b 100644 --- a/pkg/loadbalancers/url_maps_test.go +++ b/pkg/loadbalancers/url_maps_test.go @@ -81,7 +81,7 @@ func TestToComputeURLMap(t *testing.T) { }, } - namerFactory := namer_util.NewFrontendNamerFactory(namer) + namerFactory := namer_util.NewFrontendNamerFactory(namer, "") feNamer := namerFactory.NamerForLbName("ns/lb-name") gotComputeURLMap := toCompositeURLMap(gceURLMap, feNamer, meta.GlobalKey("ns-lb-name")) if !mapsEqual(gotComputeURLMap, wantComputeMap) { diff --git a/pkg/neg/controller_test.go b/pkg/neg/controller_test.go index ca3294db96..f0dde5ec18 100644 --- a/pkg/neg/controller_test.go +++ b/pkg/neg/controller_test.go @@ -89,7 +89,7 @@ func newTestController(kubeClient kubernetes.Interface) *Controller { ASMConfigMapNamespace: "kube-system", ASMConfigMapName: "ingress-controller-config-test", } - context := context.NewControllerContext(nil, kubeClient, backendConfigClient, nil, gce.NewFakeGCECloud(gce.DefaultTestClusterValues()), namer, ctxConfig) + context := context.NewControllerContext(nil, kubeClient, backendConfigClient, nil, gce.NewFakeGCECloud(gce.DefaultTestClusterValues()), namer, "" /*kubeSystemUID*/, ctxConfig) // Hack the context.Init func. configMapInformer := informerv1.NewConfigMapInformer(kubeClient, context.Namespace, context.ResyncPeriod, utils.NewNamespaceIndexer()) diff --git a/pkg/neg/manager_test.go b/pkg/neg/manager_test.go index 33f62bd485..8373240166 100644 --- a/pkg/neg/manager_test.go +++ b/pkg/neg/manager_test.go @@ -74,7 +74,7 @@ func NewTestSyncerManager(kubeClient kubernetes.Interface) *syncerManager { ResyncPeriod: 1 * time.Second, DefaultBackendSvcPort: defaultBackend, } - context := context.NewControllerContext(nil, kubeClient, backendConfigClient, nil, gce.NewFakeGCECloud(gce.DefaultTestClusterValues()), namer, ctxConfig) + context := context.NewControllerContext(nil, kubeClient, backendConfigClient, nil, gce.NewFakeGCECloud(gce.DefaultTestClusterValues()), namer, "" /*kubeSystemUID*/, ctxConfig) manager := newSyncerManager( namer, diff --git a/pkg/neg/readiness/reflector_test.go b/pkg/neg/readiness/reflector_test.go index a913e8aa83..1963627871 100644 --- a/pkg/neg/readiness/reflector_test.go +++ b/pkg/neg/readiness/reflector_test.go @@ -62,7 +62,7 @@ func fakeContext() *context.ControllerContext { } fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) negtypes.MockNetworkEndpointAPIs(fakeGCE) - context := context.NewControllerContext(nil, kubeClient, nil, nil, fakeGCE, namer, ctxConfig) + context := context.NewControllerContext(nil, kubeClient, nil, nil, fakeGCE, namer, "" /*kubeSystemUID*/, ctxConfig) return context } diff --git a/pkg/neg/syncers/syncer_test.go b/pkg/neg/syncers/syncer_test.go index 5ce521fe3e..ed5c8c6d40 100644 --- a/pkg/neg/syncers/syncer_test.go +++ b/pkg/neg/syncers/syncer_test.go @@ -88,7 +88,7 @@ func newSyncerTester() *syncerTester { ResyncPeriod: 1 * time.Second, DefaultBackendSvcPort: defaultBackend, } - context := context.NewControllerContext(nil, kubeClient, backendConfigClient, nil, nil, namer, ctxConfig) + context := context.NewControllerContext(nil, kubeClient, backendConfigClient, nil, nil, namer, "" /*kubeSystemUID*/, ctxConfig) negSyncerKey := negtypes.NegSyncerKey{ Namespace: testServiceNamespace, Name: testServiceName, diff --git a/pkg/neg/syncers/transaction_test.go b/pkg/neg/syncers/transaction_test.go index c18e4f4098..7b18b93e8d 100644 --- a/pkg/neg/syncers/transaction_test.go +++ b/pkg/neg/syncers/transaction_test.go @@ -824,7 +824,7 @@ func newTestTransactionSyncer(fakeGCE negtypes.NetworkEndpointGroupCloud) (negty ResyncPeriod: 1 * time.Second, DefaultBackendSvcPort: defaultBackend, } - context := context.NewControllerContext(nil, kubeClient, backendConfigClient, nil, nil, namer, ctxConfig) + context := context.NewControllerContext(nil, kubeClient, backendConfigClient, nil, nil, namer, "" /*kubeSystemUID*/, ctxConfig) svcPort := negtypes.NegSyncerKey{ Namespace: testNamespace, Name: testService, diff --git a/pkg/sync/interfaces.go b/pkg/sync/interfaces.go index f91185746e..5bc4ee8c8c 100644 --- a/pkg/sync/interfaces.go +++ b/pkg/sync/interfaces.go @@ -18,6 +18,7 @@ package sync import ( "k8s.io/api/networking/v1beta1" + "k8s.io/ingress-gce/pkg/utils" ) // Syncer is an interface to sync GCP resources associated with an Ingress. @@ -26,9 +27,10 @@ type Syncer interface { Sync(state interface{}) error // GC cleans up GCLB resources for all Ingresses and can optionally // use some arbitrary to help with the process. + // GC workflow performs frontend resource deletion based on given gc algorithm. // TODO(rramkumar): Do we need to rethink the strategy of GC'ing // all Ingresses at once? - GC(ings []*v1beta1.Ingress) error + GC(ings []*v1beta1.Ingress, currIng *v1beta1.Ingress, frontendGCAlgorithm utils.FrontendGCAlgorithm) error } // Controller is an interface for ingress controllers and declares methods @@ -40,10 +42,16 @@ type Controller interface { GCBackends(toKeep []*v1beta1.Ingress) error // SyncLoadBalancer syncs the front-end load balancer resources for a GCLB given some existing state. SyncLoadBalancer(state interface{}) error - // GCLoadBalancers garbage collects front-end load balancer resources for all ingresses given a list of ingresses to exclude from GC. - GCLoadBalancers(toKeep []*v1beta1.Ingress) error + // GCv1LoadBalancers garbage collects front-end load balancer resources for all ingresses + // given a list of ingresses with v1 naming policy to exclude from GC. + GCv1LoadBalancers(toKeep []*v1beta1.Ingress) error + // GCv2LoadBalancer garbage collects front-end load balancer resources for given ingress + // with v2 naming policy. + GCv2LoadBalancer(ing *v1beta1.Ingress) error // PostProcess allows for doing some post-processing after an Ingress is synced to a GCLB. PostProcess(state interface{}) error - // MaybeRemoveFinalizers removes finalizers from a list of ingresses one by one after all of its associated resources are deleted. - MaybeRemoveFinalizers(toCleanup []*v1beta1.Ingress) error + // EnsureDeleteV1Finalizers ensures that v1 finalizers are removed for given list of ingresses. + EnsureDeleteV1Finalizers(toCleanup []*v1beta1.Ingress) error + // EnsureDeleteV2Finalizer ensures that v2 finalizer is removed for given ingress. + EnsureDeleteV2Finalizer(ing *v1beta1.Ingress) error } diff --git a/pkg/sync/sync.go b/pkg/sync/sync.go index 08db148558..d87d7f4221 100644 --- a/pkg/sync/sync.go +++ b/pkg/sync/sync.go @@ -23,6 +23,7 @@ import ( "k8s.io/api/networking/v1beta1" "k8s.io/ingress-gce/pkg/common/operator" "k8s.io/ingress-gce/pkg/utils" + "k8s.io/ingress-gce/pkg/utils/namer" ) // ErrSkipBackendsSync is an error that can be returned by a Controller to @@ -61,21 +62,56 @@ func (s *IngressSyncer) Sync(state interface{}) error { } // GC implements Syncer. -func (s *IngressSyncer) GC(ings []*v1beta1.Ingress) error { - // Partition GC state into ingresses those need cleanup and those don't. +func (s *IngressSyncer) GC(ings []*v1beta1.Ingress, currIng *v1beta1.Ingress, frontendGCAlgorithm utils.FrontendGCAlgorithm) error { + var lbErr, err error + var errs []error + switch frontendGCAlgorithm { + case utils.CleanupV2FrontendResources: + lbErr = s.controller.GCv2LoadBalancer(currIng) + + defer func() { + if err != nil { + return + } + err = s.controller.EnsureDeleteV2Finalizer(currIng) + }() + case utils.CleanupV1FrontendResources: + // Filter GCE ingresses that use v1 naming scheme. + v1Ingresses := operator.Ingresses(ings).Filter(func(ing *v1beta1.Ingress) bool { + return namer.FrontendNamingScheme(ing) == namer.V1NamingScheme + }) + // Partition these into ingresses those need cleanup and those don't. + toCleanupV1, toKeepV1 := v1Ingresses.Partition(utils.NeedsCleanup) + // Note that only GCE ingress associated resources are managed by this controller. + toKeepV1Gce := toKeepV1.Filter(utils.IsGCEIngress) + lbErr = s.controller.GCv1LoadBalancers(toKeepV1Gce.AsList()) + + defer func() { + if err != nil { + return + } + err = s.controller.EnsureDeleteV1Finalizers(toCleanupV1.AsList()) + }() + case utils.NoCleanUpNeeded: + default: + lbErr = fmt.Errorf("unexpected frontend GC algorithm %v", frontendGCAlgorithm) + } + if lbErr != nil { + errs = append(errs, fmt.Errorf("error running load balancer garbage collection routine: %v", lbErr)) + } + // Filter ingresses that needs to exist after GC. // An Ingress is considered to exist and not considered for cleanup, if: // 1) It is a GCLB Ingress. - // 2) It is not a candidate for deletion. - toCleanup, toKeep := operator.Ingresses(ings).Partition(utils.NeedsCleanup) - toKeepIngresses := toKeep.AsList() - lbErr := s.controller.GCLoadBalancers(toKeepIngresses) - beErr := s.controller.GCBackends(toKeepIngresses) - if lbErr != nil { - return fmt.Errorf("error running load balancer garbage collection routine: %v", lbErr) + // 2) It is not a deletion candidate. A deletion candidate is an ingress + // with deletion stamp and a finalizer. + toKeep := operator.Ingresses(ings).Filter(func(ing *v1beta1.Ingress) bool { + return !utils.NeedsCleanup(ing) + }).AsList() + if beErr := s.controller.GCBackends(toKeep); beErr != nil { + errs = append(errs, fmt.Errorf("error running backend garbage collection routine: %v", beErr)) } - if beErr != nil { - return fmt.Errorf("error running backend garbage collection routine: %v", beErr) + if errs != nil { + err = utils.JoinErrs(errs) } - - return s.controller.MaybeRemoveFinalizers(toCleanup.AsList()) + return err } diff --git a/pkg/test/utils.go b/pkg/test/utils.go index 4fcb4bdb16..3636a8ac66 100644 --- a/pkg/test/utils.go +++ b/pkg/test/utils.go @@ -12,6 +12,12 @@ import ( "k8s.io/ingress-gce/pkg/utils" ) +const ( + FinalizerAddFlag = flag("enable-finalizer-add") + FinalizerRemoveFlag = flag("enable-finalizer-remove") + EnableV2FrontendNamerFlag = flag("enable-v2-frontend-namer") +) + var ( BackendPort = intstr.IntOrString{Type: intstr.Int, IntVal: 80} DefaultBeSvcPort = utils.ServicePort{ @@ -80,3 +86,27 @@ func DecodeIngress(data []byte) (*v1beta1.Ingress, error) { return obj.(*v1beta1.Ingress), nil } + +// flag is a type representing controller flag. +type flag string + +// FlagSaver is an utility type to capture the value of a flag and reset back to the saved value. +type FlagSaver struct{ flags map[flag]bool } + +// NewFlagSaver returns a flag saver by initializing the map. +func NewFlagSaver() FlagSaver { + return FlagSaver{make(map[flag]bool)} +} + +// Save captures the value of given flag. +func (s *FlagSaver) Save(key flag, flagPointer *bool) { + s.flags[key] = *flagPointer +} + +// Reset resets the value of given flag to a previously saved value. +// This does nothing if the flag value was not captured. +func (s *FlagSaver) Reset(key flag, flagPointer *bool) { + if val, ok := s.flags[key]; ok { + *flagPointer = val + } +} diff --git a/pkg/utils/common/common.go b/pkg/utils/common/common.go index f908792268..0fb6663e3e 100644 --- a/pkg/utils/common/common.go +++ b/pkg/utils/common/common.go @@ -17,6 +17,9 @@ limitations under the License. package common import ( + "crypto/sha256" + "strconv" + "k8s.io/api/networking/v1beta1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/cache" @@ -27,6 +30,32 @@ var ( KeyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc ) +// charMap is a list of string represented values of first 10 integers and lowercase +// alphabets. This is a lookup table to maintain entropy when converting bytes to string. +var charMap []string + +// WARNING: PLEASE DO NOT CHANGE THE HASH FUNCTION. +func init() { + for i := 0; i < 10; i++ { + charMap = append(charMap, strconv.Itoa(i)) + } + for i := 0; i < 26; i++ { + charMap = append(charMap, string('a'+rune(i))) + } +} + +// ContentHash creates a content hash string of length n of s utilizing sha256. +// WARNING: PLEASE DO NOT CHANGE THE HASH FUNCTION. +func ContentHash(s string, n int) string { + var h string + bytes := sha256.Sum256(([]byte)(s)) + for i := 0; i < n && i < len(bytes); i++ { + idx := int(bytes[i]) % len(charMap) + h += charMap[idx] + } + return h +} + // NamespacedName returns namespaced name string of a given ingress. // Note: This is used for logging. func NamespacedName(ing *v1beta1.Ingress) string { diff --git a/pkg/utils/common/finalizer.go b/pkg/utils/common/finalizer.go index d8fd7c4fd0..49f18b26ca 100644 --- a/pkg/utils/common/finalizer.go +++ b/pkg/utils/common/finalizer.go @@ -23,52 +23,63 @@ import ( "k8s.io/kubernetes/pkg/util/slice" ) -// FinalizerKey is the string representing the Ingress finalizer. -const FinalizerKey = "networking.gke.io/ingress-finalizer" +const ( + // FinalizerKey is the string representing the Ingress finalizer. + FinalizerKey = "networking.gke.io/ingress-finalizer" + // FinalizerKeyV2 is the string representing the Ingress finalizer version. + // Ingress with V2 finalizer uses V2 frontend naming scheme. + FinalizerKeyV2 = "networking.gke.io/ingress-finalizer-V2" +) + +// IsDeletionCandidate is true if the passed in meta contains an ingress finalizer. +func IsDeletionCandidate(m meta_v1.ObjectMeta) bool { + return IsDeletionCandidateForGivenFinalizer(m, FinalizerKey) || IsDeletionCandidateForGivenFinalizer(m, FinalizerKeyV2) +} -// IsDeletionCandidate is true if the passed in meta contains the specified finalizer. -func IsDeletionCandidate(m meta_v1.ObjectMeta, key string) bool { - return m.DeletionTimestamp != nil && HasFinalizer(m, key) +// IsDeletionCandidateForGivenFinalizer is true if the passed in meta contains the specified finalizer. +func IsDeletionCandidateForGivenFinalizer(m meta_v1.ObjectMeta, key string) bool { + return m.DeletionTimestamp != nil && HasGivenFinalizer(m, key) } -// NeedToAddFinalizer is true if the passed in meta does not contain the specified finalizer. -func NeedToAddFinalizer(m meta_v1.ObjectMeta, key string) bool { - return m.DeletionTimestamp == nil && !HasFinalizer(m, key) +// HasFinalizer is true if the passed in meta has an ingress finalizer. +func HasFinalizer(m meta_v1.ObjectMeta) bool { + return HasGivenFinalizer(m, FinalizerKey) || HasGivenFinalizer(m, FinalizerKeyV2) } -// HasFinalizer is true if the passed in meta has the specified finalizer. -func HasFinalizer(m meta_v1.ObjectMeta, key string) bool { +// HasGivenFinalizer is true if the passed in meta has the specified finalizer. +func HasGivenFinalizer(m meta_v1.ObjectMeta, key string) bool { return slice.ContainsString(m.Finalizers, key, nil) } -// AddFinalizer tries to add a finalizer to an Ingress. If a finalizer -// already exists, it does nothing. -func AddFinalizer(ing *v1beta1.Ingress, ingClient client.IngressInterface) error { - ingKey := FinalizerKey - if NeedToAddFinalizer(ing.ObjectMeta, ingKey) { +// EnsureFinalizer ensures that the specified finalizer exists on given Ingress. +func EnsureFinalizer(ing *v1beta1.Ingress, ingClient client.IngressInterface, finalizerKey string) error { + if needToAddFinalizer(ing.ObjectMeta, finalizerKey) { updated := ing.DeepCopy() - updated.ObjectMeta.Finalizers = append(updated.ObjectMeta.Finalizers, ingKey) + updated.ObjectMeta.Finalizers = append(updated.ObjectMeta.Finalizers, finalizerKey) + // TODO(smatti): Make this optimistic concurrency control compliant on write. + // Refer: https://github.com/eBay/Kubernetes/blob/master/docs/devel/api-conventions.md#concurrency-control-and-consistency if _, err := ingClient.Update(updated); err != nil { return fmt.Errorf("error updating Ingress %s/%s: %v", ing.Namespace, ing.Name, err) } - klog.V(3).Infof("Added finalizer %q for Ingress %s/%s", ingKey, ing.Namespace, ing.Name) + klog.V(2).Infof("Added finalizer %q for Ingress %s/%s", finalizerKey, ing.Namespace, ing.Name) } - return nil } -// RemoveFinalizer tries to remove a Finalizer from an Ingress. If a -// finalizer is not on the Ingress, it does nothing. -func RemoveFinalizer(ing *v1beta1.Ingress, ingClient client.IngressInterface) error { - ingKey := FinalizerKey - if HasFinalizer(ing.ObjectMeta, ingKey) { +// needToAddFinalizer is true if the passed in meta does not contain the specified finalizer. +func needToAddFinalizer(m meta_v1.ObjectMeta, key string) bool { + return m.DeletionTimestamp == nil && !HasGivenFinalizer(m, key) +} + +// EnsureDeleteFinalizer ensures that the specified finalizer is deleted from given Ingress. +func EnsureDeleteFinalizer(ing *v1beta1.Ingress, ingClient client.IngressInterface, finalizerKey string) error { + if HasGivenFinalizer(ing.ObjectMeta, finalizerKey) { updated := ing.DeepCopy() - updated.ObjectMeta.Finalizers = slice.RemoveString(updated.ObjectMeta.Finalizers, ingKey, nil) + updated.ObjectMeta.Finalizers = slice.RemoveString(updated.ObjectMeta.Finalizers, finalizerKey, nil) if _, err := ingClient.Update(updated); err != nil { return fmt.Errorf("error updating Ingress %s/%s: %v", ing.Namespace, ing.Name, err) } - klog.V(3).Infof("Removed finalizer %q for Ingress %s/%s", ingKey, ing.Namespace, ing.Name) + klog.V(2).Infof("Removed finalizer %q for Ingress %s/%s", finalizerKey, ing.Namespace, ing.Name) } - return nil } diff --git a/pkg/utils/namer/frontendnamer.go b/pkg/utils/namer/frontendnamer.go index 653d0cfc2f..970b3e7331 100644 --- a/pkg/utils/namer/frontendnamer.go +++ b/pkg/utils/namer/frontendnamer.go @@ -14,13 +14,42 @@ limitations under the License. package namer import ( + "fmt" + "strings" + "k8s.io/api/networking/v1beta1" + "k8s.io/apimachinery/pkg/types" "k8s.io/ingress-gce/pkg/utils/common" + "k8s.io/klog" ) const ( + // V1NamingScheme is v1 frontend naming scheme. V1NamingScheme = Scheme("v1") + // V2NamingScheme is v2 frontend naming scheme. V2NamingScheme = Scheme("v2") + // schemaVersionV2 is suffix to be appended to resource prefix for v2 naming scheme. + schemaVersionV2 = "2" + // maximumAllowedCombinedLength is the maximum combined length of namespace and + // name portions in the resource name. + // This is computed by subtracting: k8s1 - 4, dashes - 5, resource prefix - 2, + // clusterUID - 8, suffix hash- 8. + // maximumAllowedCombinedLength = maximum name length of gce resource(63) - 27 + maximumAllowedCombinedLength = 36 + // urlMapPrefixV2 is URL map prefix for v2 naming scheme. + urlMapPrefixV2 = "um" + // forwardingRulePrefixV2 is http forwarding rule prefix for v2 naming scheme. + forwardingRulePrefixV2 = "fr" + // httpsForwardingRulePrefixV2 is https forwarding rule prefix for v2 naming scheme. + httpsForwardingRulePrefixV2 = "fs" + // targetHTTPProxyPrefixV2 is target http proxy prefix for v2 naming scheme. + targetHTTPProxyPrefixV2 = "tp" + // targetHTTPSProxyPrefixV2 is target https proxy prefix for v2 naming scheme. + targetHTTPSProxyPrefixV2 = "ts" + // sslCertPrefixV2 is ssl certificate prefix for v2 naming scheme. + sslCertPrefixV2 = "cr" + // clusterUIDLength is length of cluster UID to be included in resource names. + clusterUIDLength = 8 ) // Scheme is ingress frontend name scheme. @@ -79,25 +108,142 @@ func (ln *V1IngressFrontendNamer) LbName() string { return ln.lbName } +// V2IngressFrontendNamer implements IngressFrontendNamer. +type V2IngressFrontendNamer struct { + ing *v1beta1.Ingress + // prefix for all resource names (ex.: "k8s"). + prefix string + // Load balancer name to be included in resource name. + lbName string + // clusterUID is an 8 character hash to be included in resource names. + // This is immutable after the cluster is created. Kube-system uid which is + // immutable is used as cluster UID for v2 naming scheme. + clusterUID string +} + +// newV2IngressFrontendNamer returns a v2 frontend namer for given ingress, cluster uid and prefix. +// Example: +// For Ingress - namespace/ingress, clusterUID - uid0123, prefix - k8s +// The resource names are - +// LoadBalancer : uid0123-namespace-ingress-cysix1wq +// HTTP Forwarding Rule : k8s2-fr-uid0123-namespace-ingress-cysix1wq +// HTTPS Forwarding Rule : k8s2-fs-uid0123-namespace-ingress-cysix1wq +// Target HTTP Proxy : k8s2-tp-uid0123-namespace-ingress-cysix1wq +// Target HTTPS Proxy : k8s2-ts-uid0123-namespace-ingress-cysix1wq +// URL Map : k8s2-um-uid0123-namespace-ingress-cysix1wq +// SSL Certificate : k8s2-cr-uid0123-- +func newV2IngressFrontendNamer(ing *v1beta1.Ingress, clusterUID string, prefix string) IngressFrontendNamer { + namer := &V2IngressFrontendNamer{ing: ing, prefix: prefix, clusterUID: clusterUID} + // Initialize LbName. + truncFields := TrimFieldsEvenly(maximumAllowedCombinedLength, ing.Namespace, ing.Name) + truncNamespace := truncFields[0] + truncName := truncFields[1] + suffix := namer.suffix(clusterUID, ing.Namespace, ing.Name) + namer.lbName = fmt.Sprintf("%s-%s-%s-%s", clusterUID, truncNamespace, truncName, suffix) + return namer +} + +// ForwardingRule returns the name of forwarding rule based on given protocol. +func (vn *V2IngressFrontendNamer) ForwardingRule(protocol NamerProtocol) string { + switch protocol { + case HTTPProtocol: + return fmt.Sprintf("%s%s-%s-%s", vn.prefix, schemaVersionV2, forwardingRulePrefixV2, vn.lbName) + case HTTPSProtocol: + return fmt.Sprintf("%s%s-%s-%s", vn.prefix, schemaVersionV2, httpsForwardingRulePrefixV2, vn.lbName) + default: + klog.Fatalf("invalid ForwardingRule protocol: %q", protocol) + return "invalid" + } +} + +// TargetProxy returns the name of target proxy based on given protocol. +func (vn *V2IngressFrontendNamer) TargetProxy(protocol NamerProtocol) string { + switch protocol { + case HTTPProtocol: + return fmt.Sprintf("%s%s-%s-%s", vn.prefix, schemaVersionV2, targetHTTPProxyPrefixV2, vn.lbName) + case HTTPSProtocol: + return fmt.Sprintf("%s%s-%s-%s", vn.prefix, schemaVersionV2, targetHTTPSProxyPrefixV2, vn.lbName) + default: + klog.Fatalf("invalid TargetProxy protocol: %q", protocol) + return "invalid" + } +} + +// UrlMap returns the name of URL map. +func (vn *V2IngressFrontendNamer) UrlMap() string { + return fmt.Sprintf("%s%s-%s-%s", vn.prefix, schemaVersionV2, urlMapPrefixV2, vn.lbName) +} + +// SSLCertName returns the name of the certificate. +func (vn *V2IngressFrontendNamer) SSLCertName(secretHash string) string { + return fmt.Sprintf("%s%s-%s-%s-%s-%s", vn.prefix, schemaVersionV2, sslCertPrefixV2, vn.clusterUID, vn.lbNameToHash(), secretHash) +} + +// IsCertNameForLB returns true if the certName belongs to this cluster's ingress. +// It checks that the hashed lbName exists. +func (vn *V2IngressFrontendNamer) IsCertNameForLB(certName string) bool { + prefix := fmt.Sprintf("%s%s-%s-%s-%s", vn.prefix, schemaVersionV2, sslCertPrefixV2, vn.clusterUID, vn.lbNameToHash()) + return strings.HasPrefix(certName, prefix) +} + +// IsLegacySSLCert always return false as v2 naming scheme does not use legacy certs. +func (vn *V2IngressFrontendNamer) IsLegacySSLCert(certName string) bool { + return false +} + +// LbName returns loadbalancer name. +// Note that this is used for generating GCE resource names. +func (vn *V2IngressFrontendNamer) LbName() string { + return vn.lbName +} + +// setLbName sets loadbalancer name. +func (vn *V2IngressFrontendNamer) setLbName() { + truncFields := TrimFieldsEvenly(maximumAllowedCombinedLength, vn.ing.Namespace, vn.ing.Name) + truncNamespace := truncFields[0] + truncName := truncFields[1] + suffix := vn.suffix(vn.clusterUID, vn.ing.Namespace, vn.ing.Name) + vn.lbName = fmt.Sprintf("%s-%s-%s-%s", vn.clusterUID, truncNamespace, truncName, suffix) +} + +// suffix returns hash string of length 8 of a concatenated string generated from +// uid (an 8 character hash of kube-system uid), namespace and name. +// These fields define an ingress/ load-balancer uniquely. +func (vn *V2IngressFrontendNamer) suffix(uid, namespace, name string) string { + lbString := strings.Join([]string{uid, namespace, name}, ";") + return common.ContentHash(lbString, 8) +} + +// lbNameToHash returns hash string of length 16 of lbName. +func (vn *V2IngressFrontendNamer) lbNameToHash() string { + return common.ContentHash(vn.lbName, 16) +} + // FrontendNamerFactory implements IngressFrontendNamerFactory. type FrontendNamerFactory struct { namer *Namer + // clusterUID is an 8 character hash of kube-system UID that is included + // in resource names. + // Note that this is used only for V2IngressFrontendNamer. + clusterUID string } -// NewFrontendNamerFactory returns IngressFrontendNamerFactory given a v1 namer. -func NewFrontendNamerFactory(namer *Namer) IngressFrontendNamerFactory { - return &FrontendNamerFactory{namer: namer} +// NewFrontendNamerFactory returns IngressFrontendNamerFactory given a v1 namer and uid. +func NewFrontendNamerFactory(namer *Namer, uid types.UID) IngressFrontendNamerFactory { + clusterUID := common.ContentHash(string(uid), clusterUIDLength) + return &FrontendNamerFactory{namer: namer, clusterUID: clusterUID} } // Namer implements IngressFrontendNamerFactory. func (rn *FrontendNamerFactory) Namer(ing *v1beta1.Ingress) IngressFrontendNamer { - switch frontendNamingScheme(ing) { + namingScheme := FrontendNamingScheme(ing) + switch namingScheme { case V1NamingScheme: return newV1IngressFrontendNamer(ing, rn.namer) case V2NamingScheme: - // TODO(smatti): return V2 Namer. - return nil + return newV2IngressFrontendNamer(ing, rn.clusterUID, rn.namer.prefix) default: + klog.Errorf("Unexpected frontend naming scheme %s", namingScheme) return newV1IngressFrontendNamer(ing, rn.namer) } } diff --git a/pkg/utils/namer/frontendnamer_test.go b/pkg/utils/namer/frontendnamer_test.go index b72a1de868..641d83bb60 100644 --- a/pkg/utils/namer/frontendnamer_test.go +++ b/pkg/utils/namer/frontendnamer_test.go @@ -26,7 +26,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -const clusterUID = "uid1" +const ( + clusterUID = "uid1" + kubeSystemUID = "ksuid123" +) func newIngress(namespace, name string) *v1beta1.Ingress { return &v1beta1.Ingress{ @@ -144,7 +147,7 @@ func TestV1IngressFrontendNamer(t *testing.T) { "%s-um-01234567890123456789012345678901234567890123456789-01230", }, } - for _, prefix := range []string{"k8s", "mci"} { + for _, prefix := range []string{"k8s", "xyz"} { oldNamer := NewNamerWithPrefix(prefix, clusterUID, "") secretHash := fmt.Sprintf("%x", sha256.Sum256([]byte("test123")))[:16] for _, tc := range testCases { @@ -237,3 +240,152 @@ func TestV1IngressFrontendNamer(t *testing.T) { } } } + +// TestV2IngressFrontendNamer tests v2 frontend namer workflow. +func TestV2IngressFrontendNamer(t *testing.T) { + longString := "01234567890123456789012345678901234567890123456789" + testCases := []struct { + desc string + namespace string + name string + // Expected values. + lbName string + targetHTTPProxy string + targetHTTPSProxy string + sslCert string + forwardingRuleHTTP string + forwardingRuleHTTPS string + urlMap string + }{ + { + "simple case", + "namespace", + "name", + "ksuid123-namespace-name-wddys49o", + "%s2-tp-ksuid123-namespace-name-wddys49o", + "%s2-ts-ksuid123-namespace-name-wddys49o", + "%s2-cr-ksuid123-lb75yy4dn9xa8ib0-%s", + "%s2-fr-ksuid123-namespace-name-wddys49o", + "%s2-fs-ksuid123-namespace-name-wddys49o", + "%s2-um-ksuid123-namespace-name-wddys49o", + }, + { + "62 characters", + longString[:23], + longString[:24], + "ksuid123-012345678901234567-012345678901234567-sce8socf", + "%s2-tp-ksuid123-012345678901234567-012345678901234567-sce8socf", + "%s2-ts-ksuid123-012345678901234567-012345678901234567-sce8socf", + "%s2-cr-ksuid123-dlz1pf382qzmdac9-%s", + "%s2-fr-ksuid123-012345678901234567-012345678901234567-sce8socf", + "%s2-fs-ksuid123-012345678901234567-012345678901234567-sce8socf", + "%s2-um-ksuid123-012345678901234567-012345678901234567-sce8socf", + }, + { + "63 characters", + longString[:24], + longString[:24], + "ksuid123-012345678901234567-012345678901234567-tabdkrlv", + "%s2-tp-ksuid123-012345678901234567-012345678901234567-tabdkrlv", + "%s2-ts-ksuid123-012345678901234567-012345678901234567-tabdkrlv", + "%s2-cr-ksuid123-8ohtur3hfgw0qe3f-%s", + "%s2-fr-ksuid123-012345678901234567-012345678901234567-tabdkrlv", + "%s2-fs-ksuid123-012345678901234567-012345678901234567-tabdkrlv", + "%s2-um-ksuid123-012345678901234567-012345678901234567-tabdkrlv", + }, + { + "64 characters", + longString[:24], + longString[:25], + "ksuid123-012345678901234567-012345678901234567-dhfwuws7", + "%s2-tp-ksuid123-012345678901234567-012345678901234567-dhfwuws7", + "%s2-ts-ksuid123-012345678901234567-012345678901234567-dhfwuws7", + "%s2-cr-ksuid123-1vm2cunkgqy1exss-%s", + "%s2-fr-ksuid123-012345678901234567-012345678901234567-dhfwuws7", + "%s2-fs-ksuid123-012345678901234567-012345678901234567-dhfwuws7", + "%s2-um-ksuid123-012345678901234567-012345678901234567-dhfwuws7", + }, + { + "long namespace", + longString, + "0", + "ksuid123-012345678901234567890123456789012345--4mg0wxbi", + "%s2-tp-ksuid123-012345678901234567890123456789012345--4mg0wxbi", + "%s2-ts-ksuid123-012345678901234567890123456789012345--4mg0wxbi", + "%s2-cr-ksuid123-wfasfag894xpvmnr-%s", + "%s2-fr-ksuid123-012345678901234567890123456789012345--4mg0wxbi", + "%s2-fs-ksuid123-012345678901234567890123456789012345--4mg0wxbi", + "%s2-um-ksuid123-012345678901234567890123456789012345--4mg0wxbi", + }, + { + "long name", + "0", + longString, + "ksuid123-0-01234567890123456789012345678901234-mebui9t8", + "%s2-tp-ksuid123-0-01234567890123456789012345678901234-mebui9t8", + "%s2-ts-ksuid123-0-01234567890123456789012345678901234-mebui9t8", + "%s2-cr-ksuid123-50ba1vszcf9aeh73-%s", + "%s2-fr-ksuid123-0-01234567890123456789012345678901234-mebui9t8", + "%s2-fs-ksuid123-0-01234567890123456789012345678901234-mebui9t8", + "%s2-um-ksuid123-0-01234567890123456789012345678901234-mebui9t8", + }, + { + "long name and namespace", + longString, + longString, + "ksuid123-012345678901234567-012345678901234567-4mwbp6m5", + "%s2-tp-ksuid123-012345678901234567-012345678901234567-4mwbp6m5", + "%s2-ts-ksuid123-012345678901234567-012345678901234567-4mwbp6m5", + "%s2-cr-ksuid123-7trbunxyryt39g1b-%s", + "%s2-fr-ksuid123-012345678901234567-012345678901234567-4mwbp6m5", + "%s2-fs-ksuid123-012345678901234567-012345678901234567-4mwbp6m5", + "%s2-um-ksuid123-012345678901234567-012345678901234567-4mwbp6m5", + }, + } + for _, prefix := range []string{"k8s", "xyz"} { + oldNamer := NewNamerWithPrefix(prefix, clusterUID, "") + secretHash := fmt.Sprintf("%x", sha256.Sum256([]byte("test123")))[:16] + for _, tc := range testCases { + tc.desc = fmt.Sprintf("%s namespaceLength %d nameLength %d prefix %s", tc.desc, len(tc.namespace), len(tc.name), prefix) + t.Run(tc.desc, func(t *testing.T) { + ing := newIngress(tc.namespace, tc.name) + namer := newV2IngressFrontendNamer(ing, kubeSystemUID, oldNamer.prefix) + tc.targetHTTPProxy = fmt.Sprintf(tc.targetHTTPProxy, prefix) + tc.targetHTTPSProxy = fmt.Sprintf(tc.targetHTTPSProxy, prefix) + tc.sslCert = fmt.Sprintf(tc.sslCert, prefix, secretHash) + tc.forwardingRuleHTTP = fmt.Sprintf(tc.forwardingRuleHTTP, prefix) + tc.forwardingRuleHTTPS = fmt.Sprintf(tc.forwardingRuleHTTPS, prefix) + tc.urlMap = fmt.Sprintf(tc.urlMap, prefix) + + // Test behavior of v2 Namer. + if diff := cmp.Diff(tc.lbName, namer.LbName()); diff != "" { + t.Errorf("namer.GetLbName() mismatch (-want +got):\n%s", diff) + } + name := namer.TargetProxy(HTTPProtocol) + if diff := cmp.Diff(tc.targetHTTPProxy, name); diff != "" { + t.Errorf("namer.TargetProxy(HTTPProtocol) mismatch (-want +got):\n%s", diff) + } + name = namer.TargetProxy(HTTPSProtocol) + if diff := cmp.Diff(tc.targetHTTPSProxy, name); diff != "" { + t.Errorf("namer.TargetProxy(HTTPSProtocol) mismatch (-want +got):\n%s", diff) + } + name = namer.SSLCertName(secretHash) + if diff := cmp.Diff(tc.sslCert, name); diff != "" { + t.Errorf("namer.SSLCertName(%q) mismatch (-want +got):\n%s", secretHash, diff) + } + name = namer.ForwardingRule(HTTPProtocol) + if diff := cmp.Diff(tc.forwardingRuleHTTP, name); diff != "" { + t.Errorf("namer.ForwardingRule(HTTPProtocol) mismatch (-want +got):\n%s", diff) + } + name = namer.ForwardingRule(HTTPSProtocol) + if diff := cmp.Diff(tc.forwardingRuleHTTPS, name); diff != "" { + t.Errorf("namer.ForwardingRule(HTTPSProtocol) mismatch (-want +got):\n%s", diff) + } + name = namer.UrlMap() + if diff := cmp.Diff(tc.urlMap, name); diff != "" { + t.Errorf("namer.UrlMap() mismatch (-want +got):\n%s", diff) + } + }) + } + } +} diff --git a/pkg/utils/namer/utils.go b/pkg/utils/namer/utils.go index 3d286b0518..7947183437 100644 --- a/pkg/utils/namer/utils.go +++ b/pkg/utils/namer/utils.go @@ -14,6 +14,8 @@ limitations under the License. package namer import ( + "fmt" + "k8s.io/api/networking/v1beta1" "k8s.io/ingress-gce/pkg/utils/common" "k8s.io/klog" @@ -56,14 +58,27 @@ func TrimFieldsEvenly(max int, fields ...string) []string { return ret } -// frontendNamingScheme returns naming scheme for given ingress. -func frontendNamingScheme(ing *v1beta1.Ingress) Scheme { - // TODO(smatti): return V2NamingScheme if ingress has V2 finalizer +// FrontendNamingScheme returns naming scheme for given ingress based on its finalizer. +func FrontendNamingScheme(ing *v1beta1.Ingress) Scheme { switch { - case common.HasFinalizer(ing.ObjectMeta, common.FinalizerKey): + case common.HasGivenFinalizer(ing.ObjectMeta, common.FinalizerKeyV2): + return V2NamingScheme + case common.HasGivenFinalizer(ing.ObjectMeta, common.FinalizerKey): return V1NamingScheme default: - klog.V(3).Infof("No finalizer found on Ingress %v, using Legacy naming scheme", ing) + klog.V(3).Infof("No finalizer found on Ingress %v, using v1 naming scheme", ing) return V1NamingScheme } } + +// FinalizerForNamingScheme returns finalizer corresponding to given frontend naming scheme. +func FinalizerForNamingScheme(scheme Scheme) (string, error) { + switch scheme { + case V1NamingScheme: + return common.FinalizerKey, nil + case V2NamingScheme: + return common.FinalizerKeyV2, nil + default: + return "", fmt.Errorf("unexpected naming scheme: %s", scheme) + } +} diff --git a/pkg/utils/namer/utils_test.go b/pkg/utils/namer/utils_test.go index 088b7cc293..1a97db11a8 100644 --- a/pkg/utils/namer/utils_test.go +++ b/pkg/utils/namer/utils_test.go @@ -13,7 +13,13 @@ limitations under the License. package namer -import "testing" +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "k8s.io/ingress-gce/pkg/utils/common" +) func TestTrimFieldsEvenly(t *testing.T) { t.Parallel() @@ -87,3 +93,28 @@ func TestTrimFieldsEvenly(t *testing.T) { } } } + +// TestFrontendNamingScheme asserts that correct naming scheme is returned for given ingress. +func TestFrontendNamingScheme(t *testing.T) { + testCases := []struct { + finalizer string + expectScheme Scheme + }{ + {"", V1NamingScheme}, + {common.FinalizerKey, V1NamingScheme}, + {common.FinalizerKeyV2, V2NamingScheme}, + } + for _, tc := range testCases { + desc := fmt.Sprintf("Finalizer %q", tc.finalizer) + t.Run(desc, func(t *testing.T) { + ing := newIngress("namespace", "name") + if tc.finalizer != "" { + ing.ObjectMeta.Finalizers = []string{tc.finalizer} + } + + if diff := cmp.Diff(tc.expectScheme, FrontendNamingScheme(ing)); diff != "" { + t.Fatalf("Got diff for Frontend naming scheme (-want +got):\n%s", diff) + } + }) + } +} diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 460bce0a01..3e73a064e1 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -72,6 +72,20 @@ const ( ToBeDeletedTaint = "ToBeDeletedByClusterAutoscaler" ) +// FrontendGCAlgorithm species GC algorithm used for ingress frontend resources. +type FrontendGCAlgorithm int + +const ( + // NoCleanUpNeeded specifies that frontend resources need not be deleted. + NoCleanUpNeeded FrontendGCAlgorithm = iota + // CleanupV1FrontendResources specifies that frontend resources for ingresses + // that use v1 naming scheme need to be deleted. + CleanupV1FrontendResources + // CleanupV2FrontendResources specifies that frontend resources for ingresses + // that use v2 naming scheme need to be deleted. + CleanupV2FrontendResources +) + // FakeGoogleAPIForbiddenErr creates a Forbidden error with type googleapi.Error func FakeGoogleAPIForbiddenErr() *googleapi.Error { return &googleapi.Error{Code: http.StatusForbidden} @@ -383,5 +397,16 @@ func ServiceKeyFunc(namespace, name string) string { // NeedsCleanup returns true if the ingress needs to have its associated resources deleted. func NeedsCleanup(ing *v1beta1.Ingress) bool { - return common.IsDeletionCandidate(ing.ObjectMeta, common.FinalizerKey) || !IsGLBCIngress(ing) + return common.IsDeletionCandidate(ing.ObjectMeta) || !IsGLBCIngress(ing) +} + +// HasVIP returns true if given ingress has a vip. +func HasVIP(ing *v1beta1.Ingress) bool { + if ing == nil { + return false + } + if lbIPs := ing.Status.LoadBalancer.Ingress; len(lbIPs) == 0 || lbIPs[0].IP == "" { + return false + } + return true } diff --git a/pkg/utils/utils_test.go b/pkg/utils/utils_test.go index 56e0b3cccc..a979ac5f32 100644 --- a/pkg/utils/utils_test.go +++ b/pkg/utils/utils_test.go @@ -649,3 +649,73 @@ func TestNeedsCleanup(t *testing.T) { }) } } + +func TestHasVIP(t *testing.T) { + for _, tc := range []struct { + desc string + ing *v1beta1.Ingress + expectHasVIP bool + }{ + {"nil", nil, false}, + {"empty ingress status", &v1beta1.Ingress{ + Status: v1beta1.IngressStatus{}, + }, + false, + }, + {"empty load-balancer status", &v1beta1.Ingress{ + Status: v1beta1.IngressStatus{ + LoadBalancer: api_v1.LoadBalancerStatus{}, + }, + }, + false, + }, + {"empty load-balancer ingress", &v1beta1.Ingress{ + Status: v1beta1.IngressStatus{ + LoadBalancer: api_v1.LoadBalancerStatus{ + Ingress: []api_v1.LoadBalancerIngress{}, + }, + }, + }, + false, + }, + {"empty IP", &v1beta1.Ingress{ + Status: v1beta1.IngressStatus{ + LoadBalancer: api_v1.LoadBalancerStatus{ + Ingress: []api_v1.LoadBalancerIngress{ + {IP: ""}, + }, + }, + }, + }, + false, + }, + {"valid IP", &v1beta1.Ingress{ + Status: v1beta1.IngressStatus{ + LoadBalancer: api_v1.LoadBalancerStatus{ + Ingress: []api_v1.LoadBalancerIngress{ + {IP: "0.0.0.0"}, + }, + }, + }, + }, + true, + }, + {"random", &v1beta1.Ingress{ + Status: v1beta1.IngressStatus{ + LoadBalancer: api_v1.LoadBalancerStatus{ + Ingress: []api_v1.LoadBalancerIngress{ + {IP: "xxxxxx"}, + }, + }, + }, + }, + true, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + if gotHasVIP := HasVIP(tc.ing); tc.expectHasVIP != gotHasVIP { + t.Errorf("Got diff HasVIP, expected %t got %t", tc.expectHasVIP, gotHasVIP) + } + }) + } +}