Skip to content

Commit

Permalink
Support for traffic distribution with weights in multiClusterServices…
Browse files Browse the repository at this point in the history
… in default mode for TS
  • Loading branch information
charanm08 committed Oct 3, 2024
1 parent 1243808 commit 041cce2
Show file tree
Hide file tree
Showing 7 changed files with 97 additions and 77 deletions.
14 changes: 7 additions & 7 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ export BUILD_INFO := $(shell ./build-tools/version-tool build-info)
GO_BUILD_FLAGS=-v -ldflags "-extldflags \"-static\" -X main.version=$(BUILD_VERSION) -X main.buildInfo=$(BUILD_INFO)"

# Allow users to pass in BASE_OS build options (debian or rhel7)
BASE_OS ?= debian
BASE_OS ?= ubi

# This is for builds not triggered through Travis CI
LICENSE_STRICT ?= false
Expand Down Expand Up @@ -75,13 +75,13 @@ pre-build:
prod-build: pre-build
@echo "Building with running tests..."

docker build --build-arg RUN_TESTS=1 --build-arg BUILD_VERSION=$(BUILD_VERSION) --build-arg BUILD_INFO=$(BUILD_INFO) -t k8s-bigip-ctlr:latest -f build-tools/Dockerfile.$(BASE_OS) .
docker build --platform linux/amd64 --build-arg RUN_TESTS=1 --build-arg BUILD_VERSION=$(BUILD_VERSION) --build-arg BUILD_INFO=$(BUILD_INFO) -t k8s-bigip-ctlr:latest -f build-tools/Dockerfile.$(BASE_OS) .

prod-quick: prod-build-quick

prod-build-quick: pre-build
@echo "Quick build without running tests..."
docker build --build-arg RUN_TESTS=0 --build-arg BUILD_VERSION=$(BUILD_VERSION) --build-arg BUILD_INFO=$(BUILD_INFO) -t k8s-bigip-ctlr:latest -f build-tools/Dockerfile.$(BASE_OS) .
docker build --platform linux/amd64 --build-arg RUN_TESTS=0 --build-arg BUILD_VERSION=$(BUILD_VERSION) --build-arg BUILD_INFO=$(BUILD_INFO) -t k8s-bigip-ctlr:latest -f build-tools/Dockerfile.$(BASE_OS) .

dev-license: pre-build
@echo "Running with tests and licenses generated will be in all_attributions.txt..."
Expand All @@ -93,7 +93,7 @@ dev-license: pre-build

debug: pre-build
@echo "Building with debug support..."
docker build --build-arg RUN_TESTS=0 --build-arg BUILD_VERSION=$(BUILD_VERSION) --build-arg BUILD_INFO=$(BUILD_INFO) --platform linux/amd64 -t k8s-bigip-ctlr-dbg:latest -f build-tools/Dockerfile.debug .
docker build --platform linux/amd64 --build-arg RUN_TESTS=0 --build-arg BUILD_VERSION=$(BUILD_VERSION) --build-arg BUILD_INFO=$(BUILD_INFO) -t k8s-bigip-ctlr-dbg:latest -f build-tools/Dockerfile.debug .


fmt:
Expand All @@ -105,7 +105,7 @@ vet:
$(CURDIR)/build-tools/vet.sh

devel-image:
docker build --build-arg RUN_TESTS=0 --build-arg BUILD_VERSION=$(BUILD_VERSION) --build-arg BUILD_INFO=$(BUILD_INFO) -t k8s-bigip-ctlr-devel:latest -f build-tools/Dockerfile.$(BASE_OS) .
docker build --platform linux/amd64 --build-arg RUN_TESTS=0 --build-arg BUILD_VERSION=$(BUILD_VERSION) --build-arg BUILD_INFO=$(BUILD_INFO) -t k8s-bigip-ctlr-devel:latest -f build-tools/Dockerfile.$(BASE_OS) .

# Enable certain funtionalities only on a developer build
dev-patch:
Expand Down Expand Up @@ -151,5 +151,5 @@ else
endif

crd-code-gen:
docker run --name crdcodegen -v $(PWD):/go/src/github.com/F5Networks/k8s-bigip-ctlr/v2 quay.io/f5networks/ciscrdcodegen:latest
docker rm crdcodegen
docker run --name crdcodegen -v $(PWD):/go/src/github.com/F5Networks/k8s-bigip-ctlr/v3 quay.io/f5networks/ciscrdcodegen:v1
docker rm crdcodegen
6 changes: 4 additions & 2 deletions build-tools/Dockerfile.debug
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,14 @@ ARG BUILD_VERSION
ARG BUILD_INFO

WORKDIR $REPOPATH
ENV CGO_ENABLED=0
ENV GO111MODULE on
COPY . .

RUN $REPOPATH/build-tools/rel-build.sh && \
go install github.com/go-delve/delve/cmd/dlv@latest
go install github.com/go-delve/delve/cmd/dlv@v1.21.2

FROM python:3.10.14-slim
FROM python:3.10-slim-buster

ENV APPPATH /app

Expand All @@ -35,6 +36,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
&& pip install --no-cache-dir --upgrade pip==20.0.2 \
&& pip install --no-cache-dir -r /tmp/requirements.txt \
&& apt-get remove -y git \
&& apt-get remove -y libidn11 \
&& echo "{\"version\": \"${BUILD_VERSION}\", \"build\": \"${BUILD_INFO}\"}" > $APPPATH/vendor/src/f5/VERSION_BUILD.json

COPY schemas/*.json $APPPATH/vendor/src/f5/schemas/
Expand Down
1 change: 1 addition & 0 deletions docs/RELEASE-NOTES.rst
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ Added Functionality
**What's new:**
* Multi Cluster
* Support to add HA services in multiClusterServices for Transport Servers.
* Support for traffic distribution with weights in multiClusterServices in default mode for TS
* CRD
* `Issue 3536 <https://github.com/F5Networks/k8s-bigip-ctlr/issues/3536>`_: Support CRD status for VS, TS and IngressLink
* Support for custom partition and Pool settings for ServiceTypeLB service. `Examples <https://raw.githubusercontent.com/F5Networks/k8s-bigip-ctlr/2.x-master/docs/config_examples/customResource/serviceTypeLB/>`_
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/multiClusterInformers.go
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ func (ctlr *Controller) getNamespaceMultiClusterPoolInformer(
namespace string, clusterName string,
) (*MultiClusterPoolInformer, bool) {
// CIS may be watching all namespaces in case of HA clusters only
if clusterName == ctlr.multiClusterConfigs.HAPairClusterName && ctlr.watchingAllNamespaces() {
if clusterName == ctlr.multiClusterConfigs.HAPairClusterName && ctlr.watchingAllNamespaces() && ctlr.discoveryMode != DefaultMode {
namespace = ""
}
nsPoolInf, ok := ctlr.multiClusterPoolInformers[clusterName]
Expand Down
15 changes: 9 additions & 6 deletions pkg/controller/resourceConfig.go
Original file line number Diff line number Diff line change
Expand Up @@ -2303,12 +2303,15 @@ func (ctlr *Controller) prepareRSConfigFromTransportServer(
}
pool.MultiClusterServices = multiClusterServices
}
// update the multicluster resource serviceMap with local cluster services
ctlr.updateMultiClusterResourceServiceMap(rsCfg, rsRef, vs.Spec.Pool.Service, vs.Spec.Pool.Path, pool, vs.Spec.Pool.ServicePort, "")
// update the multicluster resource serviceMap with HA pair cluster services
if ctlr.discoveryMode == Active && ctlr.multiClusterConfigs.HAPairClusterName != "" {
ctlr.updateMultiClusterResourceServiceMap(rsCfg, rsRef, vs.Spec.Pool.Service, "", pool, vs.Spec.Pool.ServicePort,
ctlr.multiClusterConfigs.HAPairClusterName)

if ctlr.discoveryMode != DefaultMode {
// update the multicluster resource serviceMap with local cluster services
ctlr.updateMultiClusterResourceServiceMap(rsCfg, rsRef, vs.Spec.Pool.Service, vs.Spec.Pool.Path, pool, vs.Spec.Pool.ServicePort, "")
// update the multicluster resource serviceMap with HA pair cluster services
if ctlr.discoveryMode == Active && ctlr.multiClusterConfigs.HAPairClusterName != "" {
ctlr.updateMultiClusterResourceServiceMap(rsCfg, rsRef, vs.Spec.Pool.Service, "", pool, vs.Spec.Pool.ServicePort,
ctlr.multiClusterConfigs.HAPairClusterName)
}
}
} else {
ctlr.updateMultiClusterResourceServiceMap(rsCfg, rsRef, vs.Spec.Pool.Service, vs.Spec.Pool.Path, pool, vs.Spec.Pool.ServicePort, "")
Expand Down
131 changes: 72 additions & 59 deletions pkg/controller/worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,10 @@ func (ctlr *Controller) processResources() bool {
if ctlr.mode != OpenShiftMode {
break
}
if ctlr.discoveryMode == DefaultMode {
log.Errorf("Routes are not supported with multiCluster HA mode: %v", ctlr.discoveryMode)
break
}
route := rKey.rsc.(*routeapi.Route)
// processRoutes knows when to delete a VS (in the event of global config update and route delete)
// so should not trigger delete from here
Expand Down Expand Up @@ -303,6 +307,10 @@ func (ctlr *Controller) processResources() bool {
if ctlr.mode == OpenShiftMode || ctlr.mode == KubernetesMode {
break
}
if ctlr.discoveryMode == DefaultMode {
log.Errorf("Virtual servers are not supported with multiCluster HA mode: %v", ctlr.discoveryMode)
break
}
virtual := rKey.rsc.(*cisapiv1.VirtualServer)
rscRefKey := resourceRef{
kind: VirtualServer,
Expand Down Expand Up @@ -2247,46 +2255,49 @@ func (ctlr *Controller) fetchService(svcKey MultiClusterServiceKey) (error, *v1.
func (ctlr *Controller) updatePoolMembersForResources(pool *Pool) {
var poolMembers []PoolMember
var clsSvcPoolMemMap = make(map[MultiClusterServiceKey][]PoolMember)
// for local cluster
// Skip adding the pool members if adding pool member is restricted for local cluster in multi cluster mode
if pool.Cluster == "" && !ctlr.isAddingPoolRestricted(pool.Cluster) {
pms := ctlr.fetchPoolMembersForService(pool.ServiceName, pool.ServiceNamespace, pool.ServicePort,
pool.NodeMemberLabel, "", pool.ConnectionLimit, pool.BigIPRouteDomain)
poolMembers = append(poolMembers, pms...)
if len(ctlr.clusterRatio) > 0 && !pool.SinglePoolRatioEnabled {
pool.Members = pms
return
}

if pool.SinglePoolRatioEnabled {
clsSvcPoolMemMap[MultiClusterServiceKey{serviceName: pool.ServiceName, namespace: pool.ServiceNamespace,
clusterName: ""}] = pms
if ctlr.discoveryMode != DefaultMode {
// for local cluster
// Skip adding the pool members if adding pool member is restricted for local cluster in multi cluster mode
if pool.Cluster == "" && !ctlr.isAddingPoolRestricted(pool.Cluster) {
pms := ctlr.fetchPoolMembersForService(pool.ServiceName, pool.ServiceNamespace, pool.ServicePort,
pool.NodeMemberLabel, "", pool.ConnectionLimit, pool.BigIPRouteDomain)
poolMembers = append(poolMembers, pms...)
if len(ctlr.clusterRatio) > 0 && !pool.SinglePoolRatioEnabled {
pool.Members = pms
return
}

if pool.SinglePoolRatioEnabled {
clsSvcPoolMemMap[MultiClusterServiceKey{serviceName: pool.ServiceName, namespace: pool.ServiceNamespace,
clusterName: ""}] = pms
}
}
}

// for HA cluster pair service
// Skip adding the pool members for the HA peer cluster if adding pool member is restricted for HA peer cluster in multi cluster mode
// Process HA cluster in active / ratio mode only with - SinglePoolRatioEnabled(ts)
if (ctlr.discoveryMode == Active || (len(ctlr.clusterRatio) > 0 && pool.SinglePoolRatioEnabled)) && ctlr.multiClusterConfigs.HAPairClusterName != "" &&
!ctlr.isAddingPoolRestricted(ctlr.multiClusterConfigs.HAPairClusterName) {
pms := ctlr.fetchPoolMembersForService(pool.ServiceName, pool.ServiceNamespace, pool.ServicePort,
pool.NodeMemberLabel, ctlr.multiClusterConfigs.HAPairClusterName, pool.ConnectionLimit, pool.BigIPRouteDomain)
poolMembers = append(poolMembers, pms...)
// for HA cluster pair service
// Skip adding the pool members for the HA peer cluster if adding pool member is restricted for HA peer cluster in multi cluster mode
// Process HA cluster in active / ratio mode only with - SinglePoolRatioEnabled(ts)
if (ctlr.discoveryMode == Active || (len(ctlr.clusterRatio) > 0 && pool.SinglePoolRatioEnabled)) && ctlr.multiClusterConfigs.HAPairClusterName != "" &&
!ctlr.isAddingPoolRestricted(ctlr.multiClusterConfigs.HAPairClusterName) {
pms := ctlr.fetchPoolMembersForService(pool.ServiceName, pool.ServiceNamespace, pool.ServicePort,
pool.NodeMemberLabel, ctlr.multiClusterConfigs.HAPairClusterName, pool.ConnectionLimit, pool.BigIPRouteDomain)
poolMembers = append(poolMembers, pms...)

if pool.SinglePoolRatioEnabled {
clsSvcPoolMemMap[MultiClusterServiceKey{serviceName: pool.ServiceName, namespace: pool.ServiceNamespace,
clusterName: ctlr.multiClusterConfigs.HAPairClusterName}] = pms
if pool.SinglePoolRatioEnabled {
clsSvcPoolMemMap[MultiClusterServiceKey{serviceName: pool.ServiceName, namespace: pool.ServiceNamespace,
clusterName: ctlr.multiClusterConfigs.HAPairClusterName}] = pms
}
}
}

// In case of ratio mode unique pools are created for each service so only update the pool members for this backend
// pool associated with the HA peer cluster or external cluster and return
if len(ctlr.clusterRatio) > 0 && !pool.SinglePoolRatioEnabled {
poolMembers = append(poolMembers,
ctlr.fetchPoolMembersForService(pool.ServiceName, pool.ServiceNamespace, pool.ServicePort,
pool.NodeMemberLabel, pool.Cluster, pool.ConnectionLimit, pool.BigIPRouteDomain)...)
pool.Members = poolMembers
return
// In case of ratio mode unique pools are created for each service so only update the pool members for this backend
// pool associated with the HA peer cluster or external cluster and return
if len(ctlr.clusterRatio) > 0 && !pool.SinglePoolRatioEnabled {
poolMembers = append(poolMembers,
ctlr.fetchPoolMembersForService(pool.ServiceName, pool.ServiceNamespace, pool.ServicePort,
pool.NodeMemberLabel, pool.Cluster, pool.ConnectionLimit, pool.BigIPRouteDomain)...)
pool.Members = poolMembers
return
}
}

// For multiCluster services
Expand Down Expand Up @@ -2318,7 +2329,7 @@ func (ctlr *Controller) updatePoolMembersForResources(pool *Pool) {
}
}

if !ctlr.isAddingPoolRestricted(pool.Cluster) {
if ctlr.discoveryMode != DefaultMode && !ctlr.isAddingPoolRestricted(pool.Cluster) {
for _, svc := range pool.AlternateBackends {
pms := ctlr.fetchPoolMembersForService(svc.Service, svc.ServiceNamespace, pool.ServicePort,
pool.NodeMemberLabel, pool.Cluster, pool.ConnectionLimit, pool.BigIPRouteDomain)
Expand Down Expand Up @@ -2359,41 +2370,43 @@ func (ctlr *Controller) updatePoolMemberWeights(svcMemMap map[MultiClusterServic

// in non ratio mode don't do any ratio calculation
// assign simple weights
if len(ctlr.clusterRatio) == 0 {
if len(ctlr.clusterRatio) == 0 || ctlr.discoveryMode == DefaultMode {
// for each service - pool members
for svcKey, plMem := range svcMemMap {
// for local or ha cluster check config
if (svcKey.clusterName == pool.Cluster || svcKey.clusterName == ctlr.multiClusterConfigs.HAPairClusterName) && svcKey.serviceName == pool.ServiceName &&
svcKey.namespace == pool.ServiceNamespace {
if pool.Weight > 0 {
ratio = int(float32(pool.Weight) / float32(len(plMem)))
}
for idx, _ := range plMem {
if pool.Weight == 0 {
plMem[idx].AdminState = "disable"
} else {
plMem[idx].Ratio = ratio
}
}
poolMem = append(poolMem, plMem...)
}

for _, svc := range pool.AlternateBackends {
if ctlr.discoveryMode != DefaultMode {
// for local or ha cluster check config
if (svcKey.clusterName == pool.Cluster || svcKey.clusterName == ctlr.multiClusterConfigs.HAPairClusterName) && svcKey.serviceName == svc.Service &&
svcKey.namespace == svc.ServiceNamespace {
if svc.Weight > 0 {
ratio = int(float32(svc.Weight) / float32(len(plMem)))
if (svcKey.clusterName == pool.Cluster || svcKey.clusterName == ctlr.multiClusterConfigs.HAPairClusterName) && svcKey.serviceName == pool.ServiceName &&
svcKey.namespace == pool.ServiceNamespace {
if pool.Weight > 0 {
ratio = int(float32(pool.Weight) / float32(len(plMem)))
}
for idx, _ := range plMem {
if svc.Weight == 0 {
if pool.Weight == 0 {
plMem[idx].AdminState = "disable"
} else {
plMem[idx].Ratio = ratio
}
}
poolMem = append(poolMem, plMem...)
break
}

for _, svc := range pool.AlternateBackends {
// for local or ha cluster check config
if (svcKey.clusterName == pool.Cluster || svcKey.clusterName == ctlr.multiClusterConfigs.HAPairClusterName) && svcKey.serviceName == svc.Service &&
svcKey.namespace == svc.ServiceNamespace {
if svc.Weight > 0 {
ratio = int(float32(svc.Weight) / float32(len(plMem)))
}
for idx, _ := range plMem {
if svc.Weight == 0 {
plMem[idx].AdminState = "disable"
} else {
plMem[idx].Ratio = ratio
}
}
poolMem = append(poolMem, plMem...)
break
}
}
}

Expand Down
5 changes: 3 additions & 2 deletions pkg/controller/worker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4067,8 +4067,9 @@ extendedRouteSpec:
VirtualServerAddress: "10.1.1.1",
HostGroup: "invalid",
Pool: cisapiv1.TSPool{
Name: "pool1",
Service: svc1,
Name: "pool1",
Service: svc1,
ServicePort: intstr.IntOrString{StrVal: "port-80"},
},
},
)
Expand Down

0 comments on commit 041cce2

Please sign in to comment.