diff --git a/azure-pipelines.yaml b/azure-pipelines.yaml index 2d87db1ca..0982c5e2f 100644 --- a/azure-pipelines.yaml +++ b/azure-pipelines.yaml @@ -32,6 +32,7 @@ variables: operatorImageName: 'f5networks/f5-cis-operator-devel' operatorBundleImageName: 'f5networks/f5-cis-operator-bundle-devel' operatorIndexImage: 'f5networks/f5-cis-operator-index-devel' + goVersion: 1.22.7 stages: - stage: PreCheck diff --git a/build-tools/Dockerfile.debian b/build-tools/Dockerfile.debian index b4ed61da2..5d87718cc 100755 --- a/build-tools/Dockerfile.debian +++ b/build-tools/Dockerfile.debian @@ -1,4 +1,4 @@ -FROM golang:1.21 as builder +FROM golang:1.22 as builder ARG REPOPATH=$GOPATH/src/github.com/F5Networks/k8s-bigip-ctlr ARG RUN_TESTS diff --git a/build-tools/Dockerfile.debug b/build-tools/Dockerfile.debug index e2e687600..ec1133f00 100644 --- a/build-tools/Dockerfile.debug +++ b/build-tools/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.21 as builder +FROM golang:1.22 as builder ARG REPOPATH=$GOPATH/src/github.com/F5Networks/k8s-bigip-ctlr ARG RUN_TESTS diff --git a/build-tools/Dockerfile.ubi b/build-tools/Dockerfile.ubi index fd65c95f7..3e6ddbed4 100644 --- a/build-tools/Dockerfile.ubi +++ b/build-tools/Dockerfile.ubi @@ -1,4 +1,4 @@ -FROM golang:1.21 as builder +FROM golang:1.22 as builder ARG REPOPATH=$GOPATH/src/github.com/F5Networks/k8s-bigip-ctlr ARG RUN_TESTS diff --git a/build-tools/rel-build.sh b/build-tools/rel-build.sh index d2a03abcc..dba383ee4 100755 --- a/build-tools/rel-build.sh +++ b/build-tools/rel-build.sh @@ -16,9 +16,8 @@ RUN_TESTS=${RUN_TESTS:-1} if [ $RUN_TESTS -eq 1 ]; then go install github.com/onsi/ginkgo/v2/ginkgo go install github.com/onsi/gomega - GO111MODULE=off - go get github.com/wadey/gocovmerge - go get github.com/mattn/goveralls + go install github.com/wadey/gocovmerge@latest + go install github.com/mattn/goveralls@latest echo "Gathering unit test code coverage for 'release' build..." ginkgo_test_with_coverage # push coverage data to coveralls if F5 repo or if configured for fork. diff --git a/config/apis/cis/v1/types.go b/config/apis/cis/v1/types.go index 9d27b8a37..ebd763dab 100644 --- a/config/apis/cis/v1/types.go +++ b/config/apis/cis/v1/types.go @@ -21,8 +21,10 @@ type VirtualServer struct { // VirtualServerStatus is the status of the VirtualServer resource. type VirtualServerStatus struct { - VSAddress string `json:"vsAddress,omitempty"` - StatusOk string `json:"status,omitempty"` + VSAddress string `json:"vsAddress,omitempty"` + Status string `json:"status,omitempty"` + LastUpdated metav1.Time `json:"lastUpdated,omitempty"` + Error string `json:"error,omitempty"` } // VirtualServerSpec is the spec of the VirtualServer resource. @@ -257,7 +259,10 @@ type IngressLink struct { // IngressLinkStatus is the status of the ingressLink resource. type IngressLinkStatus struct { - VSAddress string `json:"vsAddress,omitempty"` + VSAddress string `json:"vsAddress,omitempty"` + LastUpdated metav1.Time `json:"lastUpdated,omitempty"` + Error string `json:"error,omitempty"` + Status string `json:"status,omitempty"` } // IngressLinkSpec is Spec for IngressLink @@ -297,8 +302,10 @@ type TransportServer struct { // TransportServerStatus is the status of the VirtualServer resource. type TransportServerStatus struct { - VSAddress string `json:"vsAddress,omitempty"` - StatusOk string `json:"status,omitempty"` + VSAddress string `json:"vsAddress,omitempty"` + Status string `json:"status,omitempty"` + LastUpdated metav1.Time `json:"lastUpdated,omitempty"` + Error string `json:"error,omitempty"` } // TransportServerSpec is the spec of the VirtualServer resource. diff --git a/docs/RELEASE-NOTES.rst b/docs/RELEASE-NOTES.rst index 81c5003d3..890300a26 100644 --- a/docs/RELEASE-NOTES.rst +++ b/docs/RELEASE-NOTES.rst @@ -8,6 +8,9 @@ Added Functionality ``````````````````` **What's new:** * CRD + * `Issue 3536 `_: Support CRD status for VS, TS and IngressLink + * Support for custom partition and Pool settings for ServiceTypeLB service. `Examples `_ + Bug Fixes ```````````` @@ -18,6 +21,8 @@ Bug Fixes * `Issue 3501 `_: CIS with oneconnect and TLS breaks some connections * Remove pool members of GTM when host removed or updated on transport server, ingresslink, and, service type lb * `Issue 3535 `_: CIS with namespace-label is not working correctly in multicluster mode +* `Issue 3508 `_: Fix to disable default uid in F5 BIG-IP Controller Operator +* Fix for handling resource deletion in case of multiple VS/TS sharing the service Upgrade notes `````````````` @@ -50,8 +55,6 @@ Bug Fixes * `Issue 3396 `_: Fix adding pool members from external clusters in nodeportLocal mc mode * `Issue 3351 `_: improve message handling when getting HTTP/401 from AS3 * Fix pool members not getting updated for VS/TS on re-deployment of application with different servicePort and targetPort. -* `Issue 3508 `_: Fix to disable default uid in F5 BIG-IP Controller Operator - Upgrade notes `````````````` diff --git a/docs/config_examples/customResource/serviceTypeLB/README.md b/docs/config_examples/customResource/serviceTypeLB/README.md index f78986b40..198c8e1c7 100644 --- a/docs/config_examples/customResource/serviceTypeLB/README.md +++ b/docs/config_examples/customResource/serviceTypeLB/README.md @@ -42,6 +42,7 @@ Annotation supported for service type LoadBalancer: | cis.f5.com/policyName | Optional | Name of Policy CR to attach profiles/policies defined in it. | service-type-lb-with-policyname.yaml | | cis.f5.com/ip | Mandatory | Specify the ip address for the ltm virtual server. | example-service-type-lb-staic-ip.yaml | | cis.f5.com/host | Optional | Specify the hostname for configuring the WideIP pools on the GTM server, It works along with the EDNS CR. | service-type-lb-with-hostname.yaml | +| cis.f5.com/partition | Optional | The BIG-IP partition in which the Controller should create/update/delete objects for this ServiceTypeLB. | service-type-lb-with-custom-partition.yaml | Note:- diff --git a/docs/config_examples/customResource/serviceTypeLB/ServiceTypeLBWithPoolSettings/policy-with-pool-settings.yaml b/docs/config_examples/customResource/serviceTypeLB/ServiceTypeLBWithPoolSettings/policy-with-pool-settings.yaml new file mode 100644 index 000000000..3b36437dc --- /dev/null +++ b/docs/config_examples/customResource/serviceTypeLB/ServiceTypeLBWithPoolSettings/policy-with-pool-settings.yaml @@ -0,0 +1,18 @@ +apiVersion: cis.f5.com/v1 +kind: Policy +metadata: + labels: + f5cr: "true" + name: test-policy + namespace: default +spec: + poolSettings: + # reselectTries specifies the maximum number of attempts to find a responsive member for a connection + # Supported values: [0, 65535] + reselectTries: 1 + # serviceDownAction specifies connection handling when member is non-responsive + # Supported values: “drop”, “none”, “reselect”, “reset” + serviceDownAction: reselect + # BIG-IP AS3 sets the connection rate to a newly-active member slowly during this interval (seconds) + # Supported values: [0, 900] + slowRampTime: 20 \ No newline at end of file diff --git a/docs/config_examples/customResource/serviceTypeLB/ServiceTypeLBWithPoolSettings/service-type-lb-with-poolsettings-in-policycr.yaml b/docs/config_examples/customResource/serviceTypeLB/ServiceTypeLBWithPoolSettings/service-type-lb-with-poolsettings-in-policycr.yaml new file mode 100644 index 000000000..c8ab3c7f2 --- /dev/null +++ b/docs/config_examples/customResource/serviceTypeLB/ServiceTypeLBWithPoolSettings/service-type-lb-with-poolsettings-in-policycr.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + cis.f5.com/ip: 10.1.1.1 + cis.f5.com/policyName: test-policy + labels: + app: svc-lb1 + name: svc-lb1 + namespace: default +spec: + ports: + - name: svc-lb1-80 + port: 80 + protocol: TCP + targetPort: 80 + selector: + app: svc-lb1 + type: LoadBalancer \ No newline at end of file diff --git a/docs/config_examples/customResource/serviceTypeLB/service-type-lb-with-custom-partition.yaml b/docs/config_examples/customResource/serviceTypeLB/service-type-lb-with-custom-partition.yaml new file mode 100644 index 000000000..0e908d84c --- /dev/null +++ b/docs/config_examples/customResource/serviceTypeLB/service-type-lb-with-custom-partition.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + cis.f5.com/ip: 10.10.1.66 + cis.f5.com/partition: newPartition + labels: + app: svc-lb1 + name: svc-lb1 + namespace: default +spec: + ports: + - name: svc-lb1-80 + port: 80 + protocol: TCP + targetPort: 80 + selector: + app: svc-lb1 + type: LoadBalancer diff --git a/docs/config_examples/customResourceDefinitions/incubator/customresourcedefinitions.yml b/docs/config_examples/customResourceDefinitions/incubator/customresourcedefinitions.yml index ec7ba1b26..08018ac91 100644 --- a/docs/config_examples/customResourceDefinitions/incubator/customresourcedefinitions.yml +++ b/docs/config_examples/customResourceDefinitions/incubator/customresourcedefinitions.yml @@ -425,6 +425,10 @@ spec: status: type: string default: Pending + lastUpdated: + type: string + error: + type: string additionalPrinterColumns: - name: host type: string @@ -806,6 +810,10 @@ spec: status: type: string default: Pending + lastUpdated: + type: string + error: + type: string additionalPrinterColumns: - name: virtualServerAddress type: string @@ -1027,6 +1035,13 @@ spec: properties: vsAddress: type: string + status: + type: string + default: pending + lastUpdated: + type: string + error: + type: string additionalPrinterColumns: - name: IPAMVSAddress type: string diff --git a/go.mod b/go.mod index ad1256724..04a807ab3 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/F5Networks/k8s-bigip-ctlr/v2 -go 1.21.12 +go 1.22.7 require ( github.com/F5Networks/f5-ipam-controller v0.1.8 diff --git a/pkg/controller/backend.go b/pkg/controller/backend.go index 58935f9ef..ec9c92b47 100644 --- a/pkg/controller/backend.go +++ b/pkg/controller/backend.go @@ -258,6 +258,8 @@ func (agent *Agent) agentWorker() { if len(agent.incomingTenantDeclMap) == 0 { log.Infof("%v[AS3] No tenants found in request", getRequestPrefix(rsConfig.reqId)) + // notify resourceStatusUpdate response handler for resourcestatus update + agent.notifyRscStatusHandler(rsConfig.reqId, false) agent.declUpdate.Unlock() continue } @@ -357,10 +359,10 @@ func (agent *Agent) notifyRscStatusHandler(id int, overwriteCfg bool) { rscUpdateMeta := resourceStatusMeta{ id, - make(map[string]struct{}), + make(map[string]tenantResponse), } for tenant := range agent.retryTenantDeclMap { - rscUpdateMeta.failedTenants[tenant] = struct{}{} + rscUpdateMeta.failedTenants[tenant] = agent.retryTenantDeclMap[tenant].tenantResponse } // If triggerred from retry block, process the previous successful request completely if !overwriteCfg { diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index f9e52a750..be307b7b0 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -116,6 +116,7 @@ const ( LBServiceIPAMLabelAnnotation = "cis.f5.com/ipamLabel" LBServiceIPAnnotation = "cis.f5.com/ip" LBServiceHostAnnotation = "cis.f5.com/host" + LBServicePartitionAnnotation = "cis.f5.com/partition" HealthMonitorAnnotation = "cis.f5.com/health" LBServicePolicyNameAnnotation = "cis.f5.com/policyName" LegacyHealthMonitorAnnotation = "virtual-server.f5.com/health" diff --git a/pkg/controller/informers.go b/pkg/controller/informers.go index b69730b4f..ea3b75da3 100644 --- a/pkg/controller/informers.go +++ b/pkg/controller/informers.go @@ -1127,11 +1127,21 @@ func (ctlr *Controller) enqueueUpdatedService(obj, cur interface{}, clusterName } } + // Check partition update for LoadBalancer service + partitionUpdate := false + if svc.Spec.Type == corev1.ServiceTypeLoadBalancer { + oldPartition, _ := svc.Annotations[LBServicePartitionAnnotation] + newPartition, _ := curSvc.Annotations[LBServicePartitionAnnotation] + if oldPartition != newPartition { + partitionUpdate = true + } + } + if (svc.Spec.Type != curSvc.Spec.Type && svc.Spec.Type == corev1.ServiceTypeLoadBalancer) || (svc.Spec.Type == corev1.ServiceTypeLoadBalancer && (svc.Annotations[LBServiceIPAnnotation] != curSvc.Annotations[LBServiceIPAnnotation] || svc.Annotations[LBServiceHostAnnotation] != curSvc.Annotations[LBServiceHostAnnotation])) || (svc.Annotations[LBServiceIPAMLabelAnnotation] != curSvc.Annotations[LBServiceIPAMLabelAnnotation]) || !reflect.DeepEqual(svc.Labels, curSvc.Labels) || !reflect.DeepEqual(svc.Spec.Ports, curSvc.Spec.Ports) || - !reflect.DeepEqual(svc.Spec.Selector, curSvc.Spec.Selector) { + !reflect.DeepEqual(svc.Spec.Selector, curSvc.Spec.Selector) || partitionUpdate { log.Debugf("Enqueueing Old Service: %v %v", svc, getClusterLog(clusterName)) key := &rqKey{ namespace: svc.ObjectMeta.Namespace, diff --git a/pkg/controller/informers_test.go b/pkg/controller/informers_test.go index e2e1f07f8..087ae8d7d 100644 --- a/pkg/controller/informers_test.go +++ b/pkg/controller/informers_test.go @@ -174,7 +174,7 @@ var _ = Describe("Informers Tests", func() { VirtualServerAddress: "5.6.7.8", SNAT: "none", }) - updatedStatusVS.Status.StatusOk = "OK" + updatedStatusVS.Status.Status = StatusOk mockCtlr.enqueueUpdatedVirtualServer(updatedVS2, updatedStatusVS) Expect(mockCtlr.resourceQueue.Len()).To(Equal(0), "VS status update should be skipped") @@ -259,7 +259,7 @@ var _ = Describe("Informers Tests", func() { // Verify TS status update event is not queued for processing queueLen := mockCtlr.resourceQueue.Len() updatedStatusTS := tsWithPartition.DeepCopy() - updatedStatusTS.Status.StatusOk = "Ok" + updatedStatusTS.Status.Status = StatusOk mockCtlr.enqueueUpdatedTransportServer(tsWithPartition, updatedStatusTS) Expect(mockCtlr.resourceQueue.Len()).To(Equal(queueLen), "TS status update should be skipped") diff --git a/pkg/controller/multiClusterInformers.go b/pkg/controller/multiClusterInformers.go index 2d6614203..618ac5009 100644 --- a/pkg/controller/multiClusterInformers.go +++ b/pkg/controller/multiClusterInformers.go @@ -284,6 +284,13 @@ func (ctlr *Controller) setupAndStartHAClusterInformers(clusterName string) erro // updateMultiClusterInformers starts/stops the informers for the given namespace for external clusters including HA peer cluster func (ctlr *Controller) updateMultiClusterInformers(namespace string, startInformer bool) error { for clusterName, config := range ctlr.multiClusterConfigs.ClusterConfigs { + // For local cluster maintain some placeholder value, as the informers are already maintained in the controller object + if clusterName == "" { + if ctlr.multiClusterPoolInformers[""] == nil { + ctlr.multiClusterPoolInformers[""] = make(map[string]*MultiClusterPoolInformer) + } + return nil + } restClient := config.KubeClient.CoreV1().RESTClient() // Setup informer with the namespace if err := ctlr.addMultiClusterNamespacedInformers(clusterName, namespace, restClient, startInformer); err != nil { diff --git a/pkg/controller/postManager.go b/pkg/controller/postManager.go index cfaab151d..848ed0ca2 100644 --- a/pkg/controller/postManager.go +++ b/pkg/controller/postManager.go @@ -204,13 +204,13 @@ func (postMgr *PostManager) httpPOST(request *http.Request) (*http.Response, map return httpResp, response } -func (postMgr *PostManager) updateTenantResponseCode(code int, id string, tenant string, isDeleted bool) { +func (postMgr *PostManager) updateTenantResponseCode(code int, id string, tenant string, isDeleted bool, message string) { // Update status for a specific tenant if mentioned, else update the response for all tenants if tenant != "" { - postMgr.tenantResponseMap[tenant] = tenantResponse{code, id, isDeleted} + postMgr.tenantResponseMap[tenant] = tenantResponse{code, id, isDeleted, message} } else { for tenant := range postMgr.tenantResponseMap { - postMgr.tenantResponseMap[tenant] = tenantResponse{code, id, false} + postMgr.tenantResponseMap[tenant] = tenantResponse{code, id, false, message} } } } @@ -227,7 +227,7 @@ func (postMgr *PostManager) handleResponseStatusOK(responseMap map[string]interf tenant, ok2 := v["tenant"].(string) if ok1 && ok2 { log.Debugf("[AS3]%v Response from BIG-IP: code: %v --- tenant:%v --- message: %v", postMgr.postManagerPrefix, v["code"], v["tenant"], v["message"]) - postMgr.updateTenantResponseCode(int(code), "", tenant, updateTenantDeletion(tenant, declaration)) + postMgr.updateTenantResponseCode(int(code), "", tenant, updateTenantDeletion(tenant, declaration), "") } else { unknownResponse = true } @@ -275,7 +275,7 @@ func (postMgr *PostManager) getTenantConfigStatus(id string) { return } // reset task id, so that any unknownResponse failed will go to post call in the next retry - postMgr.updateTenantResponseCode(int(code), "", tenant, updateTenantDeletion(tenant, declaration)) + postMgr.updateTenantResponseCode(int(code), "", tenant, updateTenantDeletion(tenant, declaration), "") if _, ok := v["response"]; ok { log.Debugf("[AS3]%v Response from BIG-IP: code: %v --- tenant:%v --- message: %v %v", postMgr.postManagerPrefix, v["code"], v["tenant"], v["message"], v["response"]) } else { @@ -297,7 +297,7 @@ func (postMgr *PostManager) getTenantConfigStatus(id string) { } } else if httpResp.StatusCode != http.StatusServiceUnavailable { // reset task id, so that any failed tenants will go to post call in the next retry - postMgr.updateTenantResponseCode(httpResp.StatusCode, "", "", false) + postMgr.updateTenantResponseCode(httpResp.StatusCode, "", "", false, "") } if !postMgr.LogAS3Response && unknownResponse { postMgr.logAS3Response(responseMap) @@ -315,10 +315,10 @@ func (postMgr *PostManager) handleMultiStatus(responseMap map[string]interface{} tenant, ok2 := v["tenant"].(string) if ok1 && ok2 { if code != 200 { - postMgr.updateTenantResponseCode(int(code), "", tenant, false) + postMgr.updateTenantResponseCode(int(code), "", tenant, false, fmt.Sprintf("Big-IP Responded with error code: %v -- verify the logs for detailed error", v["code"])) log.Errorf("%v[AS3]%v Error response from BIG-IP: code: %v --- tenant:%v --- message: %v", getRequestPrefix(id), postMgr.postManagerPrefix, v["code"], v["tenant"], v["message"]) } else { - postMgr.updateTenantResponseCode(int(code), "", tenant, updateTenantDeletion(tenant, declaration)) + postMgr.updateTenantResponseCode(int(code), "", tenant, updateTenantDeletion(tenant, declaration), "") log.Debugf("[AS3]%v Response from BIG-IP: code: %v --- tenant:%v --- message: %v", postMgr.postManagerPrefix, v["code"], v["tenant"], v["message"]) } } else { @@ -339,7 +339,7 @@ func (postMgr *PostManager) handleMultiStatus(responseMap map[string]interface{} func (postMgr *PostManager) handleResponseAccepted(responseMap map[string]interface{}) { // traverse all response results if respId, ok := (responseMap["id"]).(string); ok { - postMgr.updateTenantResponseCode(http.StatusAccepted, respId, "", false) + postMgr.updateTenantResponseCode(http.StatusAccepted, respId, "", false, "") log.Debugf("[AS3]%v Response from BIG-IP: code 201 id %v, waiting %v seconds to poll response", postMgr.postManagerPrefix, respId, timeoutMedium) } else { postMgr.logAS3Response(responseMap) @@ -347,30 +347,36 @@ func (postMgr *PostManager) handleResponseAccepted(responseMap map[string]interf } func (postMgr *PostManager) handleResponseStatusServiceUnavailable(responseMap map[string]interface{}, id int) { + var message string if err, ok := (responseMap["error"]).(map[string]interface{}); ok { log.Errorf("%v[AS3]%v Big-IP Responded with error code: %v", getRequestPrefix(id), postMgr.postManagerPrefix, err["code"]) + message = fmt.Sprintf("Big-IP Responded with error code: %v -- verify the logs for detailed error", err["code"]) } else { postMgr.logAS3Response(responseMap) } log.Debugf("[AS3]%v Response from BIG-IP: BIG-IP is busy, waiting %v seconds and re-posting the declaration", postMgr.postManagerPrefix, timeoutMedium) - postMgr.updateTenantResponseCode(http.StatusServiceUnavailable, "", "", false) + postMgr.updateTenantResponseCode(http.StatusServiceUnavailable, "", "", false, message) } func (postMgr *PostManager) handleResponseStatusNotFound(responseMap map[string]interface{}, id int) { var unknownResponse bool + var message string if err, ok := (responseMap["error"]).(map[string]interface{}); ok { log.Errorf("%v[AS3]%v Big-IP Responded with error code: %v", getRequestPrefix(id), postMgr.postManagerPrefix, err["code"]) + message = fmt.Sprintf("Big-IP Responded with error code: %v -- verify the logs for detailed error", err["code"]) } else { unknownResponse = true + message = "Big-IP Responded with error -- verify the logs for detailed error" } if postMgr.LogAS3Response || unknownResponse { postMgr.logAS3Response(responseMap) } - postMgr.updateTenantResponseCode(http.StatusNotFound, "", "", false) + postMgr.updateTenantResponseCode(http.StatusNotFound, "", "", false, message) } func (postMgr *PostManager) handleResponseStatusUnAuthorized(responseMap map[string]interface{}, id int) { var unknownResponse bool + var message string if _, ok := responseMap["code"].(float64); ok { if _, ok := responseMap["message"].(string); ok { log.Errorf("%v[AS3]%v authentication failed,"+ @@ -379,14 +385,16 @@ func (postMgr *PostManager) handleResponseStatusUnAuthorized(responseMap map[str log.Errorf("%v[AS3]%v authentication failed,"+ " Error response from BIGIP with status code: 401", getRequestPrefix(id), postMgr.postManagerPrefix) } + message = "authentication failed, Error response from BIGIP with status code: 401 -- verify the logs for detailed error" } else { unknownResponse = true + message = "Big-IP Responded with error -- verify the logs for detailed error" } if postMgr.LogAS3Response || unknownResponse { postMgr.logAS3Response(responseMap) } - postMgr.updateTenantResponseCode(http.StatusUnauthorized, "", "", false) + postMgr.updateTenantResponseCode(http.StatusUnauthorized, "", "", false, message) } func (postMgr *PostManager) handleResponseOthers(responseMap map[string]interface{}, id int) { @@ -398,26 +406,25 @@ func (postMgr *PostManager) handleResponseOthers(responseMap map[string]interfac tenant, ok2 := v["tenant"].(string) if ok1 && ok2 { log.Errorf("%v[AS3]%v Response from BIG-IP: code: %v --- tenant:%v --- message: %v", getRequestPrefix(id), postMgr.postManagerPrefix, v["code"], v["tenant"], v["message"]) - postMgr.updateTenantResponseCode(int(code), "", tenant, false) + postMgr.updateTenantResponseCode(int(code), "", tenant, false, fmt.Sprintf("Big-IP Responded with error code: %v -- verify the logs for detailed error", code)) } else { unknownResponse = true } } else { unknownResponse = true } - } } else if err, ok := (responseMap["error"]).(map[string]interface{}); ok { log.Errorf("%v[AS3]%v Big-IP Responded with error code: %v", getRequestPrefix(id), postMgr.postManagerPrefix, err["code"]) if code, ok := err["code"].(float64); ok { - postMgr.updateTenantResponseCode(int(code), "", "", false) + postMgr.updateTenantResponseCode(int(code), "", "", false, fmt.Sprintf("Big-IP Responded with error code: %v -- verify the logs for detailed error", err["code"])) } else { unknownResponse = true } } else { unknownResponse = true if code, ok := responseMap["code"].(float64); ok { - postMgr.updateTenantResponseCode(int(code), "", "", false) + postMgr.updateTenantResponseCode(int(code), "", "", false, fmt.Sprintf("Big-IP Responded with error code: %v -- verify the logs for detailed error", code)) } } if postMgr.LogAS3Response || unknownResponse { @@ -775,7 +782,7 @@ func (postMgr *PostManager) updateRetryMap(tenant string, resp tenantResponse, t } else { postMgr.retryTenantDeclMap[tenant] = &tenantParams{ tenDecl, - tenantResponse{resp.agentResponseCode, resp.taskId, false}, + tenantResponse{resp.agentResponseCode, resp.taskId, false, resp.message}, } } } diff --git a/pkg/controller/responseHandler.go b/pkg/controller/responseHandler.go index 1816c0c37..d4761228b 100644 --- a/pkg/controller/responseHandler.go +++ b/pkg/controller/responseHandler.go @@ -2,6 +2,7 @@ package controller import ( "container/list" + "errors" ficV1 "github.com/F5Networks/f5-ipam-controller/pkg/ipamapis/apis/fic/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "strings" @@ -75,9 +76,12 @@ func (ctlr *Controller) responseHandler(respChan chan resourceStatusMeta) { } virtual := obj.(*cisapiv1.VirtualServer) if virtual.Namespace+"/"+virtual.Name == rscKey { - if _, found := rscUpdateMeta.failedTenants[partition]; !found { + if tenantResponse, found := rscUpdateMeta.failedTenants[partition]; found { + // update the status for virtual server as tenant posting is failed + ctlr.updateResourceStatus(VirtualServer, virtual, "", StatusError, errors.New(tenantResponse.message)) + } else { // update the status for virtual server as tenant posting is success - ctlr.updateVirtualServerStatus(virtual, virtual.Status.VSAddress, "Ok") + ctlr.updateResourceStatus(VirtualServer, virtual, virtual.Status.VSAddress, StatusOk, nil) // Update Corresponding Service Status of Type LB for _, pool := range virtual.Spec.Pools { var svcNamespace string @@ -114,9 +118,12 @@ func (ctlr *Controller) responseHandler(respChan chan resourceStatusMeta) { } virtual := obj.(*cisapiv1.TransportServer) if virtual.Namespace+"/"+virtual.Name == rscKey { - if _, found := rscUpdateMeta.failedTenants[partition]; !found { + if tenantResponse, found := rscUpdateMeta.failedTenants[partition]; found { + // update the status for transport server as tenant posting is failed + ctlr.updateResourceStatus(TransportServer, virtual, "", StatusError, errors.New(tenantResponse.message)) + } else { // update the status for transport server as tenant posting is success - ctlr.updateTransportServerStatus(virtual, virtual.Status.VSAddress, "Ok") + ctlr.updateResourceStatus(TransportServer, virtual, virtual.Status.VSAddress, StatusOk, nil) // Update Corresponding Service Status of Type LB var svcNamespace string if virtual.Spec.Pool.ServiceNamespace != "" { @@ -132,6 +139,34 @@ func (ctlr *Controller) responseHandler(respChan chan resourceStatusMeta) { } } } + + case IngressLink: + // update status + crInf, ok := ctlr.getNamespacedCRInformer(ns) + if !ok { + log.Debugf("IngressLink Informer not found for namespace: %v", ns) + continue + } + obj, exist, err := crInf.ilInformer.GetIndexer().GetByKey(rscKey) + if err != nil { + log.Debugf("Could not fetch IngressLink: %v: %v", rscKey, err) + continue + } + if !exist { + log.Debugf("IngressLink Not Found: %v", rscKey) + continue + } + il := obj.(*cisapiv1.IngressLink) + if il.Namespace+"/"+il.Name == rscKey { + if tenantResponse, found := rscUpdateMeta.failedTenants[partition]; found { + // update the status for ingresslink as tenant posting is failed + ctlr.updateResourceStatus(IngressLink, il, "", StatusError, errors.New(tenantResponse.message)) + } else { + // update the status for ingresslink as tenant posting is success + ctlr.updateResourceStatus(IngressLink, il, il.Status.VSAddress, StatusOk, nil) + } + } + case Route: if _, found := rscUpdateMeta.failedTenants[partition]; found { // TODO : distinguish between a 503 and an actual failure diff --git a/pkg/controller/types.go b/pkg/controller/types.go index 007ca18dd..1d81718dd 100644 --- a/pkg/controller/types.go +++ b/pkg/controller/types.go @@ -417,7 +417,7 @@ type ( resourceStatusMeta struct { id int - failedTenants map[string]struct{} + failedTenants map[string]tenantResponse } resourceRef struct { @@ -842,6 +842,7 @@ type ( agentResponseCode int taskId string isDeleted bool + message string } tenantParams struct { @@ -1387,6 +1388,12 @@ const ( ) type HAModeType string + +const ( + StatusOk = "OK" + StatusError = "ERROR" +) + type AutoMonitorType string const ( diff --git a/pkg/controller/validate.go b/pkg/controller/validate.go index bb6841359..e755e97e9 100644 --- a/pkg/controller/validate.go +++ b/pkg/controller/validate.go @@ -17,6 +17,7 @@ package controller import ( + "errors" "fmt" cisapiv1 "github.com/F5Networks/k8s-bigip-ctlr/v2/config/apis/cis/v1" log "github.com/F5Networks/k8s-bigip-ctlr/v2/pkg/vlogger" @@ -30,28 +31,37 @@ func (ctlr *Controller) checkValidVirtualServer( vsNamespace := vsResource.ObjectMeta.Namespace vsName := vsResource.ObjectMeta.Name vkey := fmt.Sprintf("%s/%s", vsNamespace, vsName) + var err string crInf, ok := ctlr.getNamespacedCRInformer(vsNamespace) if !ok { - log.Errorf("%v Informer not found for namespace: %v", ctlr.getMultiClusterLog(), vsNamespace) + err = fmt.Sprintf("%v Informer not found for namespace: %v", ctlr.getMultiClusterLog(), vsNamespace) + log.Errorf(err) + ctlr.updateResourceStatus(VirtualServer, vsResource, "", StatusError, errors.New(err)) return false } // Check if the virtual exists and valid for us. _, virtualFound, _ := crInf.vsInformer.GetIndexer().GetByKey(vkey) if !virtualFound { - log.Infof("VirtualServer %s is invalid", vsName) + err = fmt.Sprintf("VirtualServer %s is invalid", vsName) + log.Errorf(err) + ctlr.updateResourceStatus(VirtualServer, vsResource, "", StatusError, errors.New(err)) return false } // Check if Partition is set as Common if vsResource.Spec.Partition == CommonPartition { - log.Errorf("VirtualServer %s cannot be created in Common partition", vsName) + err = fmt.Sprintf("VirtualServer %s cannot be created in Common partition", vsName) + log.Errorf(err) + ctlr.updateResourceStatus(VirtualServer, vsResource, "", StatusError, errors.New(err)) return false } // Check if HTTPTraffic is set for insecure VS if vsResource.Spec.TLSProfileName == "" && vsResource.Spec.HTTPTraffic != "" { - log.Errorf("HTTPTraffic not allowed to be set for insecure VirtualServer: %v", vsName) + err = fmt.Sprintf("HTTPTraffic not allowed to be set for insecure VirtualServer: %v", vsName) + log.Errorf(err) + ctlr.updateResourceStatus(VirtualServer, vsResource, "", StatusError, errors.New(err)) return false } @@ -61,13 +71,17 @@ func (ctlr *Controller) checkValidVirtualServer( // This ensures that pool-only mode only logs the message below the first // time we see a config. if bindAddr == "" { - log.Infof("No IP was specified for the virtual server %s", vsName) + err = fmt.Sprintf("No IP was specified for the virtual server %s", vsName) + log.Errorf(err) + ctlr.updateResourceStatus(VirtualServer, vsResource, "", StatusError, errors.New(err)) return false } } else { ipamLabel := vsResource.Spec.IPAMLabel if ipamLabel == "" && bindAddr == "" { - log.Infof("No ipamLabel was specified for the virtual server %s", vsName) + err = fmt.Sprintf("No ipamLabel was specified for the virtual server %s", vsName) + log.Errorf(err) + ctlr.updateResourceStatus(VirtualServer, vsResource, "", StatusError, errors.New(err)) return false } } @@ -95,22 +109,29 @@ func (ctlr *Controller) checkValidTransportServer( vsNamespace := tsResource.ObjectMeta.Namespace vsName := tsResource.ObjectMeta.Name vkey := fmt.Sprintf("%s/%s", vsNamespace, vsName) + var err string crInf, ok := ctlr.getNamespacedCRInformer(vsNamespace) if !ok { - log.Errorf("%v Informer not found for namespace: %v", ctlr.getMultiClusterLog(), vsNamespace) + err = fmt.Sprintf("%v Informer not found for namespace: %v", ctlr.getMultiClusterLog(), vsNamespace) + log.Errorf(err) + ctlr.updateResourceStatus(TransportServer, tsResource, "", StatusError, errors.New(err)) return false } // Check if the virtual exists and valid for us. _, virtualFound, _ := crInf.tsInformer.GetIndexer().GetByKey(vkey) if !virtualFound { - log.Infof("TransportServer %s is invalid", vsName) + err = fmt.Sprintf("TransportServer %s is invalid", vsName) + log.Errorf(err) + ctlr.updateResourceStatus(TransportServer, tsResource, "", StatusError, errors.New(err)) return false } // Check if Partition is set as Common if tsResource.Spec.Partition == CommonPartition { - log.Errorf("TransportServer %s cannot be created in Common partition", vsName) + err = fmt.Sprintf("TransportServer %s cannot be created in Common partition", vsName) + log.Errorf(err) + ctlr.updateResourceStatus(TransportServer, tsResource, "", StatusError, errors.New(err)) return false } @@ -120,13 +141,17 @@ func (ctlr *Controller) checkValidTransportServer( // This ensures that pool-only mode only logs the message below the first // time we see a config. if bindAddr == "" { - log.Infof("No IP was specified for the transport server %s", vsName) + err = fmt.Sprintf("No IP was specified for the transport server %s", vsName) + log.Errorf(err) + ctlr.updateResourceStatus(TransportServer, tsResource, "", StatusError, errors.New(err)) return false } } else { ipamLabel := tsResource.Spec.IPAMLabel if ipamLabel == "" && bindAddr == "" { - log.Infof("No ipamLabel was specified for the transport server %s", vsName) + err = fmt.Sprintf("No ipamLabel was specified for the transport server %s", vsName) + log.Errorf(err) + ctlr.updateResourceStatus(TransportServer, tsResource, "", StatusError, errors.New(err)) return false } } @@ -134,7 +159,9 @@ func (ctlr *Controller) checkValidTransportServer( if tsResource.Spec.Type == "" { tsResource.Spec.Type = "tcp" } else if !(tsResource.Spec.Type == "udp" || tsResource.Spec.Type == "tcp" || tsResource.Spec.Type == "sctp") { - log.Errorf("Invalid type value for transport server %s. Supported values are tcp, udp and sctp only", vsName) + err = fmt.Sprintf("Invalid type value for transport server %s. Supported values are tcp, udp and sctp only", vsName) + log.Errorf(err) + ctlr.updateResourceStatus(TransportServer, tsResource, "", StatusError, errors.New(err)) return false } if tsResource.Spec.Pool.MultiClusterServices != nil { @@ -157,22 +184,29 @@ func (ctlr *Controller) checkValidIngressLink( ilNamespace := il.ObjectMeta.Namespace ilName := il.ObjectMeta.Name ilkey := fmt.Sprintf("%s/%s", ilNamespace, ilName) + var err string crInf, ok := ctlr.getNamespacedCRInformer(ilNamespace) if !ok { - log.Errorf("%v Informer not found for namespace: %v", ctlr.getMultiClusterLog(), ilNamespace) + err = fmt.Sprintf("%v Informer not found for namespace: %v", ctlr.getMultiClusterLog(), ilNamespace) + log.Errorf(err) + ctlr.updateResourceStatus(IngressLink, il, "", StatusError, errors.New(err)) return false } // Check if the virtual exists and valid for us. _, virtualFound, _ := crInf.ilInformer.GetIndexer().GetByKey(ilkey) if !virtualFound { - log.Infof("IngressLink %s is invalid", ilName) + err = fmt.Sprintf("IngressLink %s is invalid", ilName) + log.Errorf(err) + ctlr.updateResourceStatus(IngressLink, il, "", StatusError, errors.New(err)) return false } // Check if Partition is set as Common if il.Spec.Partition == CommonPartition { - log.Errorf("IngressLink %s cannot be created in Common partition", ilName) + err = fmt.Sprintf("IngressLink %s cannot be created in Common partition", ilName) + log.Errorf(err) + ctlr.updateResourceStatus(IngressLink, il, "", StatusError, errors.New(err)) return false } @@ -180,13 +214,17 @@ func (ctlr *Controller) checkValidIngressLink( if ctlr.ipamCli == nil { if bindAddr == "" { - log.Infof("No IP was specified for ingresslink %s", ilName) + err = fmt.Sprintf("No IP was specified for ingresslink %s", ilName) + log.Errorf(err) + ctlr.updateResourceStatus(IngressLink, il, "", StatusError, errors.New(err)) return false } } else { ipamLabel := il.Spec.IPAMLabel if ipamLabel == "" && bindAddr == "" { - log.Infof("No ipamLabel was specified for the il server %s", ilName) + err = fmt.Sprintf("No ipamLabel was specified for the il server %s", ilName) + log.Errorf(err) + ctlr.updateResourceStatus(IngressLink, il, "", StatusError, errors.New(err)) return false } } diff --git a/pkg/controller/worker.go b/pkg/controller/worker.go index 7bb42f2bb..b85e419a9 100644 --- a/pkg/controller/worker.go +++ b/pkg/controller/worker.go @@ -21,6 +21,7 @@ import ( "crypto/tls" "crypto/x509" "encoding/json" + "errors" "fmt" "os" "reflect" @@ -635,6 +636,18 @@ func (ctlr *Controller) processResources() bool { default: if rscDelete { for _, vrt := range ctlr.getAllVirtualServers(nsName) { + // Cleanup processedNativeResources cache this will ensure this resource can be process again on create event + rscRefKey := resourceRef{ + kind: VirtualServer, + name: vrt.Name, + namespace: vrt.Namespace, + } + if _, ok := ctlr.resources.processedNativeResources[rscRefKey]; ok { + // Remove resource key from processedNativeResources on delete event + delete(ctlr.resources.processedNativeResources, rscRefKey) + } + // update the poolMem cache, clusterSvcResource & resource-svc maps + ctlr.deleteResourceExternalClusterSvcRouteReference(rscRefKey) err := ctlr.processVirtualServers(vrt, true) if err != nil { // TODO @@ -644,6 +657,18 @@ func (ctlr *Controller) processResources() bool { } for _, ts := range ctlr.getAllTransportServers(nsName) { + // Cleanup processedNativeResources cache this will ensure this resource can be process again on create event + rscRefKey := resourceRef{ + kind: TransportServer, + name: ts.Name, + namespace: ts.Namespace, + } + if _, ok := ctlr.resources.processedNativeResources[rscRefKey]; ok { + // Remove resource key from processedNativeResources on delete event + delete(ctlr.resources.processedNativeResources, rscRefKey) + } + // update the poolMem cache, clusterSvcResource & resource-svc maps + ctlr.deleteResourceExternalClusterSvcRouteReference(rscRefKey) err := ctlr.processTransportServers(ts, true) if err != nil { // TODO @@ -1130,6 +1155,7 @@ func (ctlr *Controller) processVirtualServers( var ip string var status int + var altErr string partition := ctlr.getCRPartition(virtual.Spec.Partition) if ctlr.ipamCli != nil { if isVSDeleted && len(virtuals) == 0 && virtual.Spec.VirtualServerAddress == "" { @@ -1158,35 +1184,52 @@ func (ctlr *Controller) processVirtualServers( switch status { case NotEnabled: - log.Debug("IPAM Custom Resource Not Available") + altErr = "[IPAM] IPAM Custom Resource Not Available" + log.Error(altErr) + ctlr.updateResourceStatus(VirtualServer, virtual, "", StatusError, errors.New(altErr)) return nil case InvalidInput: - log.Debugf("IPAM Invalid IPAM Label: %v for Virtual Server: %s/%s", ipamLabel, virtual.Namespace, virtual.Name) + altErr = fmt.Sprintf("IPAM Invalid IPAM Label: %v for Virtual Server: %s/%s", ipamLabel, virtual.Namespace, virtual.Name) + log.Error(altErr) + ctlr.updateResourceStatus(VirtualServer, virtual, "", StatusError, errors.New(altErr)) return nil case NotRequested: - return fmt.Errorf("unable make do IPAM Request, will be re-requested soon") + altErr = "unable to make IPAM Request, will be re-requested soon" + log.Error(altErr) + ctlr.updateResourceStatus(VirtualServer, virtual, "", StatusError, errors.New(altErr)) + return fmt.Errorf("%s", altErr) case Requested: - log.Debugf("IP address requested for service: %s/%s", virtual.Namespace, virtual.Name) + altErr = fmt.Sprintf("IP address requested for service: %s/%s", virtual.Namespace, virtual.Name) + log.Error(altErr) + ctlr.updateResourceStatus(VirtualServer, virtual, "", StatusError, errors.New(altErr)) return nil } } } else { if virtual.Spec.HostGroup == "" { if virtual.Spec.VirtualServerAddress == "" { - return fmt.Errorf("No VirtualServer address or IPAM found.") + altErr = "no VirtualServer address or IPAM found" + log.Error(altErr) + ctlr.updateResourceStatus(VirtualServer, virtual, "", StatusError, errors.New(altErr)) + return fmt.Errorf("%s", altErr) } ip = virtual.Spec.VirtualServerAddress } else { var err error ip, err = getVirtualServerAddress(virtuals) if err != nil { - log.Errorf("Error in virtualserver address: %s", err.Error()) + altErr = fmt.Sprintf("Error in virtualserver address: %s", err.Error()) + log.Errorf(altErr) + ctlr.updateResourceStatus(VirtualServer, virtual, "", StatusError, errors.New(altErr)) return err } if ip == "" { ip = virtual.Spec.VirtualServerAddress if ip == "" { - return fmt.Errorf("No VirtualServer address found for: %s", virtual.Name) + altErr = fmt.Sprintf("No VirtualServer address found for: %s", virtual.Name) + log.Errorf(altErr) + ctlr.updateResourceStatus(VirtualServer, virtual, "", StatusError, errors.New(altErr)) + return fmt.Errorf(altErr) } } } @@ -1284,12 +1327,14 @@ func (ctlr *Controller) processVirtualServers( err := ctlr.handleVSResourceConfigForPolicy(rsCfg, plc) if err != nil { processingError = true + ctlr.updateResourceStatus(VirtualServer, virtual, "", StatusError, err) break } } if err != nil { processingError = true log.Errorf("%v", err) + ctlr.updateResourceStatus(VirtualServer, virtual, "", StatusError, err) break } @@ -1323,6 +1368,7 @@ func (ctlr *Controller) processVirtualServers( // Processing failed // Stop processing further virtuals processingError = true + ctlr.updateResourceStatus(VirtualServer, virtual, "", StatusError, errors.New("TLS profile not found for the Virtual Server")) break } else { tlsTermination = tlsProf.Spec.TLS.Termination @@ -1343,6 +1389,7 @@ func (ctlr *Controller) processVirtualServers( ) if err != nil { processingError = true + ctlr.updateResourceStatus(VirtualServer, virtual, "", StatusError, err) break } // handle pool settings from policy cr @@ -1351,6 +1398,7 @@ func (ctlr *Controller) processVirtualServers( err := ctlr.handlePoolResourceConfigForPolicy(rsCfg, plc) if err != nil { processingError = true + ctlr.updateResourceStatus(VirtualServer, virtual, "", StatusError, err) break } } @@ -1372,6 +1420,7 @@ func (ctlr *Controller) processVirtualServers( // Processing failed // Stop processing further virtuals processingError = true + ctlr.updateResourceStatus(VirtualServer, virtual, "", StatusError, errors.New("error while handling TLS Virtual Server")) break } @@ -1473,6 +1522,7 @@ func (ctlr *Controller) getAssociatedVirtualServers( // that particular VirtualServer will be skipped. var virtuals []*cisapiv1.VirtualServer + var err string // {hostname: {path: }} uniqueHostPathMap := make(map[string]map[string]struct{}) currentVSPartition := ctlr.getCRPartition(currentVS.Spec.Partition) @@ -1488,14 +1538,18 @@ func (ctlr *Controller) getAssociatedVirtualServers( if currentVS.Spec.VirtualServerAddress != "" && currentVS.Spec.VirtualServerAddress == vrt.Spec.VirtualServerAddress && currentVSPartition != ctlr.getCRPartition(vrt.Spec.Partition) { - log.Errorf("Multiple Virtual Servers %v,%v are configured with same VirtualServerAddress : %v with different partitions", currentVS.Name, vrt.Name, vrt.Spec.VirtualServerAddress) + err = fmt.Sprintf("Multiple Virtual Servers %v,%v are configured with same VirtualServerAddress : %v with different partitions", currentVS.Name, vrt.Name, vrt.Spec.VirtualServerAddress) + log.Error(err) + ctlr.updateResourceStatus(VirtualServer, currentVS, "", StatusError, errors.New(err)) return nil } // skip the virtuals in other HostGroups if vrt.Spec.HostGroup != currentVS.Spec.HostGroup { if currentVS.Spec.VirtualServerAddress != "" && vrt.Spec.VirtualServerAddress != "" && currentVS.Spec.VirtualServerAddress == vrt.Spec.VirtualServerAddress { - log.Errorf("Multiple Virtual Servers %v, %v are configured with same VirtualServerAddress: %v", currentVS.Name, vrt.Name, vrt.Spec.VirtualServerAddress) + err = fmt.Sprintf("Multiple Virtual Servers %v, %v are configured with same VirtualServerAddress: %v", currentVS.Name, vrt.Name, vrt.Spec.VirtualServerAddress) + log.Error(err) + ctlr.updateResourceStatus(VirtualServer, currentVS, "", StatusError, errors.New(err)) return nil } continue @@ -1503,17 +1557,23 @@ func (ctlr *Controller) getAssociatedVirtualServers( if vrt.Spec.HostGroup != "" && currentVS.Spec.HostGroup != "" && vrt.Spec.HostGroup == currentVS.Spec.HostGroup { if currentVS.Spec.VirtualServerAddress != "" && vrt.Spec.VirtualServerAddress != "" && currentVS.Spec.VirtualServerAddress != vrt.Spec.VirtualServerAddress { - log.Errorf("Multiple Virtual Servers %v, %v are configured with different VirtualServerAddress: %v %v", currentVS.Name, vrt.Name, currentVS.Spec.VirtualServerAddress, vrt.Spec.VirtualServerAddress) + err = fmt.Sprintf("Multiple Virtual Servers %v, %v are configured with different VirtualServerAddress: %v %v", currentVS.Name, vrt.Name, currentVS.Spec.VirtualServerAddress, vrt.Spec.VirtualServerAddress) + log.Error(err) + ctlr.updateResourceStatus(VirtualServer, currentVS, "", StatusError, errors.New(err)) return nil } if currentVS.Spec.IPAMLabel != "" && vrt.Spec.IPAMLabel != "" && currentVS.Spec.IPAMLabel != vrt.Spec.IPAMLabel { - log.Errorf("Multiple Virtual Servers %v, %v are configured with different IPAM Labels: %v %v", currentVS.Name, vrt.Name, currentVS.Spec.IPAMLabel, vrt.Spec.IPAMLabel) + err = fmt.Sprintf("Multiple Virtual Servers %v, %v are configured with different IPAM Labels: %v %v", currentVS.Name, vrt.Name, currentVS.Spec.IPAMLabel, vrt.Spec.IPAMLabel) + log.Error(err) + ctlr.updateResourceStatus(VirtualServer, currentVS, "", StatusError, errors.New(err)) return nil } } if currentVS.Spec.HostGroup != "" && vrt.Spec.HostGroup == currentVS.Spec.HostGroup && vrt.Spec.HostGroupVirtualServerName != currentVS.Spec.HostGroupVirtualServerName { - log.Errorf("Same host %v is configured with different HostGroupVirtualServerNames : %v ", vrt.Spec.HostGroup, vrt.Spec.HostGroupVirtualServerName) + err = fmt.Sprintf("Same host %v is configured with different HostGroupVirtualServerNames : %v ", vrt.Spec.HostGroup, vrt.Spec.HostGroupVirtualServerName) + log.Error(err) + ctlr.updateResourceStatus(VirtualServer, currentVS, "", StatusError, errors.New(err)) return nil } @@ -1537,7 +1597,9 @@ func (ctlr *Controller) getAssociatedVirtualServers( // Same host with different VirtualServerAddress is invalid if vrt.Spec.VirtualServerAddress != currentVS.Spec.VirtualServerAddress { if vrt.Spec.Host != "" && vrt.Spec.Host == currentVS.Spec.Host { - log.Errorf("Same host %v is configured with different VirtualServerAddress : %v ", vrt.Spec.Host, vrt.Spec.VirtualServerName) + err = fmt.Sprintf("Same host %v is configured with different VirtualServerAddress : %v ", vrt.Spec.Host, vrt.Spec.VirtualServerName) + log.Error(err) + ctlr.updateResourceStatus(VirtualServer, currentVS, "", StatusError, errors.New(err)) return nil } // In case of empty host name or host names not matching, skip the virtual with other VirtualServerAddress @@ -1546,7 +1608,9 @@ func (ctlr *Controller) getAssociatedVirtualServers( //with additonalVirtualServerAddresses, skip the virtuals if ip list doesn't match if !reflect.DeepEqual(currentVS.Spec.AdditionalVirtualServerAddresses, vrt.Spec.AdditionalVirtualServerAddresses) { if vrt.Spec.Host != "" { - log.Errorf("Same host %v is configured with different AdditionalVirtualServerAddress : %v ", vrt.Spec.Host, vrt.ObjectMeta.Name) + err = fmt.Sprintf("Same host %v is configured with different AdditionalVirtualServerAddress : %v ", vrt.Spec.Host, vrt.ObjectMeta.Name) + log.Error(err) + ctlr.updateResourceStatus(VirtualServer, currentVS, "", StatusError, errors.New(err)) return nil } // In case of empty host name, skip the virtual with other AdditionalVirtualServerAddress @@ -1556,19 +1620,25 @@ func (ctlr *Controller) getAssociatedVirtualServers( if ctlr.ipamCli != nil { if currentVS.Spec.HostGroup == "" && vrt.Spec.IPAMLabel != currentVS.Spec.IPAMLabel { - log.Errorf("Same host %v is configured with different IPAM labels: %v, %v. Unable to process %v", vrt.Spec.Host, vrt.Spec.IPAMLabel, currentVS.Spec.IPAMLabel, currentVS.Name) + err = fmt.Sprintf("Same host %v is configured with different IPAM labels: %v, %v. Unable to process %v", vrt.Spec.Host, vrt.Spec.IPAMLabel, currentVS.Spec.IPAMLabel, currentVS.Name) + log.Error(err) + ctlr.updateResourceStatus(VirtualServer, currentVS, "", StatusError, errors.New(err)) return nil } // Empty host and hostGroup with IPAM label is invalid for a Virtual Server if vrt.Spec.IPAMLabel != "" && vrt.Spec.Host == "" && vrt.Spec.HostGroup == "" { - log.Errorf("Hostless VS %v is configured with IPAM label: %v and missing HostGroup", vrt.ObjectMeta.Name, vrt.Spec.IPAMLabel) + err = fmt.Sprintf("Hostless VS %v is configured with IPAM label: %v and missing HostGroup", vrt.ObjectMeta.Name, vrt.Spec.IPAMLabel) + log.Error(err) + ctlr.updateResourceStatus(VirtualServer, currentVS, "", StatusError, errors.New(err)) return nil } // Empty host with empty IPAM label is invalid if vrt.Spec.Host == "" && vrt.Spec.VirtualServerAddress == "" && len(vrt.Spec.AdditionalVirtualServerAddresses) == 0 { if vrt.Spec.IPAMLabel == "" && vrt.Spec.HostGroup != "" { - log.Errorf("Hostless VS %v is configured with missing IPAM label", vrt.ObjectMeta.Name) + err = fmt.Sprintf("Hostless VS %v is configured with missing IPAM label", vrt.ObjectMeta.Name) + log.Error(err) + ctlr.updateResourceStatus(VirtualServer, currentVS, "", StatusError, errors.New(err)) return nil } if vrt.Spec.IPAMLabel == "" { @@ -1632,8 +1702,10 @@ func (ctlr *Controller) validateTSWithSameVSAddress( if currentTS.Spec.VirtualServerAddress != "" && currentTS.Spec.VirtualServerAddress == vrt.Spec.VirtualServerAddress && currentTSPartition != ctlr.getCRPartition(vrt.Spec.Partition) { - log.Errorf("Multiple Transport Servers %v,%v are configured with same VirtualServerAddress : %v "+ + err := fmt.Errorf("Multiple Transport Servers %v,%v are configured with same VirtualServerAddress : %v "+ "with different partitions", currentTS.Name, vrt.Name, vrt.Spec.VirtualServerAddress) + log.Errorf("%s", err) + ctlr.updateResourceStatus(TransportServer, currentTS, "", StatusError, err) return false } } @@ -1654,8 +1726,10 @@ func (ctlr *Controller) validateILsWithSameVSAddress( if currentIL.Spec.VirtualServerAddress != "" && currentIL.Spec.VirtualServerAddress == vrt.Spec.VirtualServerAddress && currentILPartition != ctlr.getCRPartition(vrt.Spec.Partition) { - log.Errorf("Multiple Ingress Links %v,%v are configured with same VirtualServerAddress : %v "+ + err := fmt.Errorf("Multiple Ingress Links %v,%v are configured with same VirtualServerAddress : %v "+ "with different partitions", currentIL.Name, vrt.Name, vrt.Spec.VirtualServerAddress) + log.Errorf("%s", err) + ctlr.updateResourceStatus(IngressLink, currentIL, "", StatusError, err) return false } } @@ -2096,7 +2170,6 @@ func (ctlr *Controller) updatePoolMembersForService(svcKey MultiClusterServiceKe // update the poolMem cache, clusterSvcResource & resource-svc maps ctlr.deleteResourceExternalClusterSvcRouteReference(poolId.rsKey) ctlr.processRoutes(routeGroup, false) - return } case VirtualServer: var item interface{} @@ -2110,7 +2183,6 @@ func (ctlr *Controller) updatePoolMembersForService(svcKey MultiClusterServiceKe if found { _ = ctlr.processVirtualServers(virtual, false) } - return case TransportServer: var item interface{} inf, _ := ctlr.getNamespacedCRInformer(poolId.rsKey.namespace) @@ -2123,7 +2195,6 @@ func (ctlr *Controller) updatePoolMembersForService(svcKey MultiClusterServiceKe if found { _ = ctlr.processTransportServers(virtual, false) } - return case IngressLink: var item interface{} inf, _ := ctlr.getNamespacedCRInformer(poolId.rsKey.namespace) @@ -2782,6 +2853,7 @@ func (ctlr *Controller) processTransportServers( var ip string var key string var status int + var altErr string partition := ctlr.getCRPartition(virtual.Spec.Partition) key = ctlr.ipamClusterLabel + virtual.ObjectMeta.Namespace + "/" + virtual.ObjectMeta.Name + "_ts" if ctlr.ipamCli != nil { @@ -2797,22 +2869,34 @@ func (ctlr *Controller) processTransportServers( switch status { case NotEnabled: - log.Debug("[IPAM] IPAM Custom Resource Not Available") + altErr = "[IPAM] IPAM Custom Resource Not Available" + log.Error(altErr) + ctlr.updateResourceStatus(TransportServer, virtual, "", StatusError, errors.New(altErr)) return nil case InvalidInput: - log.Debugf("[IPAM] IPAM Invalid IPAM Label: %v for Transport Server: %s/%s", + altErr = fmt.Sprintf("[IPAM] IPAM Invalid IPAM Label: %v for Transport Server: %s/%s", virtual.Spec.IPAMLabel, virtual.Namespace, virtual.Name) + log.Error(altErr) + ctlr.updateResourceStatus(TransportServer, virtual, "", StatusError, errors.New(altErr)) return nil case NotRequested: - return fmt.Errorf("[IPAM] unable to make IPAM Request, will be re-requested soon") + altErr = "[IPAM] unable to make IPAM Request, will be re-requested soon" + log.Error(altErr) + ctlr.updateResourceStatus(TransportServer, virtual, "", StatusError, errors.New(altErr)) + return fmt.Errorf("%s", altErr) case Requested: - log.Debugf("[IPAM] IP address requested for Transport Server: %s/%s", virtual.Namespace, virtual.Name) + altErr = fmt.Sprintf("[IPAM] IP address requested for Transport Server: %s/%s", virtual.Namespace, virtual.Name) + log.Error(altErr) + ctlr.updateResourceStatus(TransportServer, virtual, "", StatusError, errors.New(altErr)) return nil } } } else { if virtual.Spec.VirtualServerAddress == "" { - return fmt.Errorf("No VirtualServer address in TS or IPAM found.") + altErr = "no VirtualServer address in TS or IPAM found" + log.Error(altErr) + ctlr.updateResourceStatus(TransportServer, virtual, "", StatusError, errors.New(altErr)) + return fmt.Errorf("%s", altErr) } ip = virtual.Spec.VirtualServerAddress } @@ -2875,11 +2959,13 @@ func (ctlr *Controller) processTransportServers( err := ctlr.handleTSResourceConfigForPolicy(rsCfg, plc) if err != nil { log.Errorf("%v", err) + ctlr.updateResourceStatus(TransportServer, virtual, "", StatusError, err) return nil } } if err != nil { log.Errorf("%v", err) + ctlr.updateResourceStatus(TransportServer, virtual, "", StatusError, err) return nil } @@ -2892,6 +2978,7 @@ func (ctlr *Controller) processTransportServers( ) if err != nil { log.Errorf("Cannot Publish TransportServer %s", virtual.ObjectMeta.Name) + ctlr.updateResourceStatus(TransportServer, virtual, "", StatusError, err) return nil } // handle pool settings from policy cr @@ -2899,10 +2986,9 @@ func (ctlr *Controller) processTransportServers( if plc.Spec.PoolSettings != (cisapiv1.PoolSettingsSpec{}) { err := ctlr.handlePoolResourceConfigForPolicy(rsCfg, plc) if err != nil { - if err != nil { - log.Errorf("%v", err) - return nil - } + log.Errorf("%v", err) + ctlr.updateResourceStatus(TransportServer, virtual, "", StatusError, err) + return nil } } } @@ -2915,7 +3001,7 @@ func (ctlr *Controller) processTransportServers( rsMap := ctlr.resources.getPartitionResourceMap(partition) rsMap[rsName] = rsCfg - + ctlr.updateResourceStatus(TransportServer, virtual, ip, "", nil) if len(rsCfg.MetaData.hosts) > 0 { ctlr.ProcessAssociatedExternalDNS(rsCfg.MetaData.hosts) } @@ -3003,6 +3089,13 @@ func (ctlr *Controller) processLBServices( isSVCDeleted bool, ) error { + // Read the partition annotation if provided + var partition string + if partitionValue, partitionSpecified := svc.Annotations[LBServicePartitionAnnotation]; partitionSpecified { + partition = partitionValue + } else { + partition = ctlr.Partition + } ip, ok1 := svc.Annotations[LBServiceIPAnnotation] ipamLabel, ok2 := svc.Annotations[LBServiceIPAMLabelAnnotation] if !ok1 && !ok2 { @@ -3057,12 +3150,12 @@ func (ctlr *Controller) processLBServices( rsName := AS3NameFormatter(fmt.Sprintf("vs_lb_svc_%s_%s_%s_%v", svc.Namespace, svc.Name, ip, portSpec.Port)) if isSVCDeleted { - rsMap := ctlr.resources.getPartitionResourceMap(ctlr.Partition) + rsMap := ctlr.resources.getPartitionResourceMap(partition) var hostnames []string if _, ok := rsMap[rsName]; ok { hostnames = rsMap[rsName].MetaData.hosts } - ctlr.deleteVirtualServer(ctlr.Partition, rsName) + ctlr.deleteVirtualServer(partition, rsName) if len(hostnames) > 0 { ctlr.ProcessAssociatedExternalDNS(hostnames) } @@ -3070,7 +3163,7 @@ func (ctlr *Controller) processLBServices( } rsCfg := &ResourceConfig{} - rsCfg.Virtual.Partition = ctlr.Partition + rsCfg.Virtual.Partition = partition rsCfg.Virtual.IpProtocol = strings.ToLower(string(portSpec.Protocol)) rsCfg.MetaData.ResourceType = TransportServer rsCfg.MetaData.namespace = svc.ObjectMeta.Namespace @@ -3107,7 +3200,18 @@ func (ctlr *Controller) processLBServices( _ = ctlr.prepareRSConfigFromLBService(rsCfg, svc, portSpec) - rsMap := ctlr.resources.getPartitionResourceMap(ctlr.Partition) + // handle pool settings from policy cr + if plc != nil { + if plc.Spec.PoolSettings != (cisapiv1.PoolSettingsSpec{}) { + err := ctlr.handlePoolResourceConfigForPolicy(rsCfg, plc) + if err != nil { + log.Errorf("%v", err) + return nil + } + } + } + + rsMap := ctlr.resources.getPartitionResourceMap(partition) rsMap[rsName] = rsCfg if len(rsCfg.MetaData.hosts) > 0 { @@ -3774,6 +3878,7 @@ func (ctlr *Controller) processIngressLink( var ip string var key string var status int + var altErr string partition := ctlr.getCRPartition(ingLink.Spec.Partition) key = ctlr.ipamClusterLabel + ingLink.ObjectMeta.Namespace + "/" + ingLink.ObjectMeta.Name + "_il" if ctlr.ipamCli != nil { @@ -3786,29 +3891,42 @@ func (ctlr *Controller) processIngressLink( switch status { case NotEnabled: - log.Debug("[IPAM] IPAM Custom Resource Not Available") + altErr = "[IPAM] IPAM Custom Resource Not Available" + log.Error(altErr) + ctlr.updateResourceStatus(IngressLink, ingLink, "", StatusError, errors.New(altErr)) return nil case InvalidInput: - log.Debugf("[IPAM] IPAM Invalid IPAM Label: %v for IngressLink: %s/%s", + altErr = fmt.Sprintf("[IPAM] IPAM Invalid IPAM Label: %v for IngressLink: %s/%s", ingLink.Spec.IPAMLabel, ingLink.Namespace, ingLink.Name) + log.Error(altErr) + ctlr.updateResourceStatus(IngressLink, ingLink, "", StatusError, errors.New(altErr)) return nil case NotRequested: - return fmt.Errorf("[IPAM] unable to make IPAM Request, will be re-requested soon") + altErr = "[IPAM] unable to make IPAM Request, will be re-requested soon" + log.Error(altErr) + ctlr.updateResourceStatus(IngressLink, ingLink, "", StatusError, errors.New(altErr)) + return fmt.Errorf("%s", altErr) case Requested: - log.Debugf("[IPAM] IP address requested for IngressLink: %s/%s", ingLink.Namespace, ingLink.Name) + altErr = fmt.Sprintf("[IPAM] IP address requested for IngressLink: %s/%s", ingLink.Namespace, ingLink.Name) + log.Error(altErr) + ctlr.updateResourceStatus(IngressLink, ingLink, "", StatusError, errors.New(altErr)) return nil } log.Debugf("[IPAM] requested IP for ingLink %v is: %v", ingLink.ObjectMeta.Name, ip) if ip == "" { - log.Debugf("[IPAM] requested IP for ingLink %v is empty.", ingLink.ObjectMeta.Name) + altErr = fmt.Sprintf("[IPAM] requested IP for ingLink %v is empty.", ingLink.ObjectMeta.Name) + log.Error(altErr) + ctlr.updateResourceStatus(IngressLink, ingLink, "", StatusError, errors.New(altErr)) return nil } - ctlr.updateIngressLinkStatus(ingLink, ip) + // ctlr.updateIngressLinkStatus(ingLink, ip) svc, err := ctlr.getKICServiceOfIngressLink(ingLink) if err != nil { + ctlr.updateResourceStatus(IngressLink, ingLink, "", StatusError, err) return err } if svc == nil { + ctlr.updateResourceStatus(IngressLink, ingLink, "", StatusError, errors.New("ingress service not found")) return nil } if _, ok := ctlr.shouldProcessServiceTypeLB(svc); ok { @@ -3817,7 +3935,10 @@ func (ctlr *Controller) processIngressLink( } } else { if ingLink.Spec.VirtualServerAddress == "" { - return fmt.Errorf("No VirtualServer address in ingLink or IPAM found.") + altErr = "no VirtualServer address in ingLink or IPAM found" + log.Error(altErr) + ctlr.updateResourceStatus(IngressLink, ingLink, "", StatusError, errors.New(altErr)) + return fmt.Errorf("%s", altErr) } ip = ingLink.Spec.VirtualServerAddress } @@ -3856,10 +3977,12 @@ func (ctlr *Controller) processIngressLink( ctlr.TeemData.Unlock() svc, err := ctlr.getKICServiceOfIngressLink(ingLink) if err != nil { + ctlr.updateResourceStatus(IngressLink, ingLink, "", StatusError, err) return err } if svc == nil { + ctlr.updateResourceStatus(IngressLink, ingLink, "", StatusError, errors.New("ingress service not found")) return nil } targetPort := nginxMonitorPort @@ -3961,7 +4084,6 @@ func (ctlr *Controller) processIngressLink( ctlr.ProcessAssociatedExternalDNS(hostnames) } } - return nil } @@ -4217,11 +4339,11 @@ func getNodeport(svc *v1.Service, servicePort int32) int32 { // Update virtual server status with virtual server address func (ctlr *Controller) updateVirtualServerStatus(vs *cisapiv1.VirtualServer, ip string, statusOk string) { // Set the vs status to include the virtual IP address - vsStatus := cisapiv1.VirtualServerStatus{VSAddress: ip, StatusOk: statusOk} + vsStatus := cisapiv1.VirtualServerStatus{VSAddress: ip, Status: statusOk} log.Debugf("Updating VirtualServer Status with %v for resource name:%v , namespace: %v", vsStatus, vs.Name, vs.Namespace) vs.Status = vsStatus vs.Status.VSAddress = ip - vs.Status.StatusOk = statusOk + vs.Status.Status = statusOk _, updateErr := ctlr.kubeCRClient.CisV1().VirtualServers(vs.ObjectMeta.Namespace).UpdateStatus(context.TODO(), vs, metav1.UpdateOptions{}) if nil != updateErr { log.Debugf("Error while updating virtual server status:%v", updateErr) @@ -4232,11 +4354,11 @@ func (ctlr *Controller) updateVirtualServerStatus(vs *cisapiv1.VirtualServer, ip // Update Transport server status with virtual server address func (ctlr *Controller) updateTransportServerStatus(ts *cisapiv1.TransportServer, ip string, statusOk string) { // Set the vs status to include the virtual IP address - tsStatus := cisapiv1.TransportServerStatus{VSAddress: ip, StatusOk: statusOk} + tsStatus := cisapiv1.TransportServerStatus{VSAddress: ip, Status: statusOk} log.Debugf("Updating VirtualServer Status with %v for resource name:%v , namespace: %v", tsStatus, ts.Name, ts.Namespace) ts.Status = tsStatus ts.Status.VSAddress = ip - ts.Status.StatusOk = statusOk + ts.Status.Status = statusOk _, updateErr := ctlr.kubeCRClient.CisV1().TransportServers(ts.ObjectMeta.Namespace).UpdateStatus(context.TODO(), ts, metav1.UpdateOptions{}) if nil != updateErr { log.Debugf("Error while updating Transport server status:%v", updateErr) @@ -4926,3 +5048,121 @@ func (ctlr *Controller) isAddingPoolRestricted(cluster string) bool { } return false } + +func (ctlr *Controller) updateResourceStatus(rscType string, obj interface{}, ip string, status string, err error) { + unmonitoredOptions := metav1.ListOptions{ + LabelSelector: strings.ReplaceAll(ctlr.customResourceSelector.String(), " in ", " notin "), + } + switch rscType { + case VirtualServer: + vs := obj.(*cisapiv1.VirtualServer) + vsStatus := cisapiv1.VirtualServerStatus{ + Status: status, + LastUpdated: metav1.Now(), + } + if err != nil { + vsStatus.Error = err.Error() + } else if ip != "" { + vsStatus.VSAddress = ip + } else { + vsStatus.Error = fmt.Sprintf("Missing label f5cr on VS %v/%v", vs.Namespace, vs.Name) + } + vs.Status = vsStatus + _, updateErr := ctlr.kubeCRClient.CisV1().VirtualServers(vs.ObjectMeta.Namespace).UpdateStatus(context.TODO(), vs, metav1.UpdateOptions{}) + if nil != updateErr { + log.Errorf("Error while updating VS status:%v", updateErr) + } + unmonitoredVS, err := ctlr.kubeCRClient.CisV1().VirtualServers("").List(context.TODO(), unmonitoredOptions) + if err != nil { + log.Errorf("Error while fetching unmonitored virtual servers: %v %v", err, unmonitoredVS) + } + + for _, virtualServer := range unmonitoredVS.Items { + erased := false + for retryCount := 0; !erased && retryCount < 3; retryCount++ { + virtual, getErr := ctlr.kubeCRClient.CisV1().VirtualServers(virtualServer.ObjectMeta.Namespace).Get(context.TODO(), virtualServer.ObjectMeta.Name, metav1.GetOptions{}) + if getErr != nil { + log.Errorf("Error while fetching virtual server %v/%v: %v", virtualServer.ObjectMeta.Namespace, virtualServer.ObjectMeta.Name, getErr) + } + if virtual == nil { + break + } + virtual.Status = cisapiv1.VirtualServerStatus{ + Error: fmt.Sprintf("Missing label f5cr on VS %v/%v", virtual.Namespace, virtual.Name), + } + _, err := ctlr.kubeCRClient.CisV1().VirtualServers(virtualServer.ObjectMeta.Namespace).UpdateStatus(context.TODO(), virtual, metav1.UpdateOptions{}) + if err != nil { + log.Errorf("Error while Erasing Virtual Server Status: %v\n", err) + } else { + erased = true + log.Debugf("Status Erased for Virtual Server - %v\n", virtual.ObjectMeta.Name) + } + } + } + + case TransportServer: + ts := obj.(*cisapiv1.TransportServer) + tsStatus := cisapiv1.TransportServerStatus{ + Status: status, + LastUpdated: metav1.Now(), + } + if err != nil { + tsStatus.Error = err.Error() + } else if ip != "" { + tsStatus.VSAddress = ip + } else { + tsStatus.Error = fmt.Sprintf("Missing label f5cr on TS %v/%v", ts.Namespace, ts.Name) + } + ts.Status = tsStatus + _, updateErr := ctlr.kubeCRClient.CisV1().TransportServers(ts.ObjectMeta.Namespace).UpdateStatus(context.TODO(), ts, metav1.UpdateOptions{}) + if nil != updateErr { + log.Errorf("Error while updating TS status:%v", updateErr) + } + + unmonitoredTS, err := ctlr.kubeCRClient.CisV1().TransportServers("").List(context.TODO(), unmonitoredOptions) + if err != nil { + log.Errorf("Error while fetching unmonitored transport servers: %v %v", err, unmonitoredTS) + } + + for _, transportServer := range unmonitoredTS.Items { + erased := false + for retryCount := 0; !erased && retryCount < 3; retryCount++ { + virtual, getErr := ctlr.kubeCRClient.CisV1().TransportServers(transportServer.ObjectMeta.Namespace).Get(context.TODO(), transportServer.ObjectMeta.Name, metav1.GetOptions{}) + if getErr != nil { + log.Errorf("Error while fetching transport server %v/%v: %v", transportServer.ObjectMeta.Namespace, transportServer.ObjectMeta.Name, getErr) + } + if virtual == nil { + break + } + virtual.Status = cisapiv1.TransportServerStatus{ + Error: fmt.Sprintf("Missing label f5cr on TS %v/%v", virtual.Namespace, virtual.Name), + } + _, err := ctlr.kubeCRClient.CisV1().TransportServers(transportServer.ObjectMeta.Namespace).UpdateStatus(context.TODO(), virtual, metav1.UpdateOptions{}) + if err != nil { + log.Errorf("Error while Erasing Transport Server Status: %v\n", err) + } else { + erased = true + log.Debugf("Status Erased for Transport Server - %v\n", virtual.ObjectMeta.Name) + } + } + } + case IngressLink: + il := obj.(*cisapiv1.IngressLink) + ilStatus := cisapiv1.IngressLinkStatus{ + Status: status, + LastUpdated: metav1.Now(), + } + if err != nil { + ilStatus.Error = err.Error() + } else if ip != "" { + ilStatus.VSAddress = ip + } else { + ilStatus.Error = fmt.Sprintf("Missing label f5cr on il %v/%v", il.Namespace, il.Name) + } + il.Status = ilStatus + _, updateErr := ctlr.kubeCRClient.CisV1().IngressLinks(il.ObjectMeta.Namespace).UpdateStatus(context.TODO(), il, metav1.UpdateOptions{}) + if nil != updateErr { + log.Errorf("Error while updating il status:%v", updateErr) + } + } +} diff --git a/pkg/controller/worker_test.go b/pkg/controller/worker_test.go index ac070960c..3ce400369 100644 --- a/pkg/controller/worker_test.go +++ b/pkg/controller/worker_test.go @@ -102,6 +102,7 @@ var _ = Describe("Worker Tests", func() { mockCtlr.crInformers = make(map[string]*CRInformer) mockCtlr.comInformers = make(map[string]*CommonInformer) mockCtlr.nativeResourceSelector, _ = createLabelSelector(DefaultCustomResourceLabel) + mockCtlr.customResourceSelector, _ = createLabelSelector(DefaultCustomResourceLabel) _ = mockCtlr.addNamespacedInformers("default", false) mockCtlr.resourceQueue = workqueue.NewNamedRateLimitingQueue( workqueue.DefaultControllerRateLimiter(), "custom-resource-controller") @@ -1032,6 +1033,31 @@ var _ = Describe("Worker Tests", func() { Expect(mockCtlr.resources.ltmConfig["default"].ResourceMap["vs_lb_svc_default_svc1_10_10_10_1_80"]).NotTo(BeNil(), "Invalid Resource Configs") }) + It("Processing ServiceTypeLoadBalancer with partition annotation", func() { + // initialise mockCtlr + mockCtlr.Partition = "test" + mockCtlr.eventNotifier = apm.NewEventNotifier(nil) + mockCtlr.resources.Init() + + // Create the serviceTypeLB resource + svc1.Spec.Type = v1.ServiceTypeLoadBalancer + svc1.Annotations = make(map[string]string) + svc1.Annotations[LBServiceIPAnnotation] = "10.10.10.2" + partition := "partition1" + svc1.Annotations[LBServicePartitionAnnotation] = partition + // Process the serviceTypeLB + _ = mockCtlr.processLBServices(svc1, false) + Expect(len(mockCtlr.resources.ltmConfig)).To(Equal(1), "Invalid Resource Configs") + Expect(mockCtlr.resources.ltmConfig[partition].ResourceMap["vs_lb_svc_default_svc1_10_10_10_2_80"]).NotTo(BeNil(), "Invalid Resource Configs") + // Delete the serviceTypeLB + _ = mockCtlr.processLBServices(svc1, true) + // Update new partition annotation in the serviceTypeLB + newPartition := "partition2" + svc1.Annotations[LBServicePartitionAnnotation] = newPartition + _ = mockCtlr.processLBServices(svc1, false) + Expect(len(mockCtlr.resources.ltmConfig[partition].ResourceMap)).To(Equal(0), "Invalid Resource Configs") + Expect(mockCtlr.resources.ltmConfig[newPartition].ResourceMap["vs_lb_svc_default_svc1_10_10_10_2_80"]).NotTo(BeNil(), "Invalid Resource Configs") + }) It("Processing External DNS", func() { mockCtlr.resources.Init() @@ -1573,7 +1599,6 @@ var _ = Describe("Worker Tests", func() { mockCtlr.crInformers = make(map[string]*CRInformer) mockCtlr.nsInformers = make(map[string]*NSInformer) mockCtlr.comInformers = make(map[string]*CommonInformer) - mockCtlr.customResourceSelector, _ = createLabelSelector(DefaultCustomResourceLabel) mockCtlr.resourceQueue = workqueue.NewNamedRateLimitingQueue( workqueue.DefaultControllerRateLimiter(), "custom-resource-controller") mockCtlr.resources = NewResourceStore() @@ -2132,7 +2157,7 @@ var _ = Describe("Worker Tests", func() { rscUpdateMeta := resourceStatusMeta{ 0, - make(map[string]struct{}), + make(map[string]tenantResponse), } time.Sleep(10 * time.Millisecond) @@ -2147,7 +2172,7 @@ var _ = Describe("Worker Tests", func() { config.reqId = mockCtlr.Controller.enqueueReq(config) mockCtlr.Agent.respChan <- rscUpdateMeta - rscUpdateMeta.failedTenants["test"] = struct{}{} + rscUpdateMeta.failedTenants["test"] = tenantResponse{} mockCtlr.Agent.respChan <- rscUpdateMeta time.Sleep(10 * time.Millisecond) @@ -2387,7 +2412,7 @@ var _ = Describe("Worker Tests", func() { rscUpdateMeta := resourceStatusMeta{ 0, - make(map[string]struct{}), + make(map[string]tenantResponse), } mockCtlr.Agent.respChan <- rscUpdateMeta @@ -2402,7 +2427,7 @@ var _ = Describe("Worker Tests", func() { rscUpdateMeta.id = 3 mockCtlr.Agent.respChan <- rscUpdateMeta - rscUpdateMeta.failedTenants["test"] = struct{}{} + rscUpdateMeta.failedTenants["test"] = tenantResponse{} config.reqId = mockCtlr.Controller.enqueueReq(config) config.reqId = mockCtlr.Controller.enqueueReq(config) rscUpdateMeta.id = 3 @@ -3886,7 +3911,7 @@ extendedRouteSpec: //Expect(len(mockCtlr.getOrderedRoutes(""))).To(Equal(1), "Invalid no of Routes") rscUpdateMeta := resourceStatusMeta{ 0, - make(map[string]struct{}), + make(map[string]tenantResponse), } mockCtlr.routeClientV1.Routes("default").Create(context.TODO(), route1, metav1.CreateOptions{}) @@ -3932,6 +3957,7 @@ extendedRouteSpec: mockCtlr.crInformers = make(map[string]*CRInformer) mockCtlr.comInformers = make(map[string]*CommonInformer) mockCtlr.nativeResourceSelector, _ = createLabelSelector(DefaultCustomResourceLabel) + mockCtlr.customResourceSelector, _ = createLabelSelector(DefaultCustomResourceLabel) _ = mockCtlr.addNamespacedInformers("default", false) mockCtlr.resourceQueue = workqueue.NewNamedRateLimitingQueue( workqueue.DefaultControllerRateLimiter(), "custom-resource-controller")