diff --git a/api/v1beta1/awscluster_conversion.go b/api/v1beta1/awscluster_conversion.go index 37dafa53e7..547d439218 100644 --- a/api/v1beta1/awscluster_conversion.go +++ b/api/v1beta1/awscluster_conversion.go @@ -103,6 +103,7 @@ func (src *AWSCluster) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.NetworkSpec.VPC.EmptyRoutesDefaultVPCSecurityGroup = restored.Spec.NetworkSpec.VPC.EmptyRoutesDefaultVPCSecurityGroup dst.Spec.NetworkSpec.VPC.PrivateDNSHostnameTypeOnLaunch = restored.Spec.NetworkSpec.VPC.PrivateDNSHostnameTypeOnLaunch + dst.Spec.NetworkSpec.VPC.SecondaryCidrBlocks = restored.Spec.NetworkSpec.VPC.SecondaryCidrBlocks // Restore SubnetSpec.ResourceID field, if any. for _, subnet := range restored.Spec.NetworkSpec.Subnets { diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go index eb1f4ad8de..07a332bccf 100644 --- a/api/v1beta1/zz_generated.conversion.go +++ b/api/v1beta1/zz_generated.conversion.go @@ -2292,6 +2292,7 @@ func Convert_v1beta1_VPCSpec_To_v1beta2_VPCSpec(in *VPCSpec, out *v1beta2.VPCSpe func autoConvert_v1beta2_VPCSpec_To_v1beta1_VPCSpec(in *v1beta2.VPCSpec, out *VPCSpec, s conversion.Scope) error { out.ID = in.ID out.CidrBlock = in.CidrBlock + // WARNING: in.SecondaryCidrBlocks requires manual conversion: does not exist in peer-type // WARNING: in.IPAMPool requires manual conversion: does not exist in peer-type if in.IPv6 != nil { in, out := &in.IPv6, &out.IPv6 diff --git a/api/v1beta2/awscluster_webhook.go b/api/v1beta2/awscluster_webhook.go index 26f7be1711..9c9509100b 100644 --- a/api/v1beta2/awscluster_webhook.go +++ b/api/v1beta2/awscluster_webhook.go @@ -264,6 +264,14 @@ func (r *AWSCluster) validateNetwork() field.ErrorList { } } + secondaryCidrBlocks := r.Spec.NetworkSpec.VPC.SecondaryCidrBlocks + secondaryCidrBlocksField := field.NewPath("spec", "network", "vpc", "secondaryCidrBlocks") + for _, cidrBlock := range secondaryCidrBlocks { + if r.Spec.NetworkSpec.VPC.CidrBlock != "" && r.Spec.NetworkSpec.VPC.CidrBlock == cidrBlock.IPv4CidrBlock { + allErrs = append(allErrs, field.Invalid(secondaryCidrBlocksField, secondaryCidrBlocks, fmt.Sprintf("AWSCluster.spec.network.vpc.secondaryCidrBlocks must not contain the primary AWSCluster.spec.network.vpc.cidrBlock %v", r.Spec.NetworkSpec.VPC.CidrBlock))) + } + } + return allErrs } diff --git a/api/v1beta2/network_types.go b/api/v1beta2/network_types.go index ea5d627a61..b5efe45bf2 100644 --- a/api/v1beta2/network_types.go +++ b/api/v1beta2/network_types.go @@ -298,6 +298,13 @@ type IPAMPool struct { NetmaskLength int64 `json:"netmaskLength,omitempty"` } +// VpcCidrBlock defines the CIDR block and settings to associate with the managed VPC. Currently, only IPv4 is supported. +type VpcCidrBlock struct { + // IPv4CidrBlock is the IPv4 CIDR block to associate with the managed VPC. + // +kubebuilder:validation:MinLength=1 + IPv4CidrBlock string `json:"ipv4CidrBlock"` +} + // VPCSpec configures an AWS VPC. type VPCSpec struct { // ID is the vpc-id of the VPC this provider should use to create resources. @@ -308,6 +315,12 @@ type VPCSpec struct { // Mutually exclusive with IPAMPool. CidrBlock string `json:"cidrBlock,omitempty"` + // SecondaryCidrBlocks are additional CIDR blocks to be associated when the provider creates a managed VPC. + // Defaults to none. Mutually exclusive with IPAMPool. This makes sense to use if, for example, you want to use + // a separate IP range for pods (e.g. Cilium ENI mode). + // +optional + SecondaryCidrBlocks []VpcCidrBlock `json:"secondaryCidrBlocks,omitempty"` + // IPAMPool defines the IPAMv4 pool to be used for VPC. // Mutually exclusive with CidrBlock. IPAMPool *IPAMPool `json:"ipamPool,omitempty"` @@ -510,6 +523,17 @@ func (s Subnets) FilterPrivate() (res Subnets) { return } +// FilterNonCni returns the subnets that are NOT intended for usage with the CNI pod network +// (i.e. do NOT have the `sigs.k8s.io/cluster-api-provider-aws/association=secondary` tag). +func (s Subnets) FilterNonCni() (res Subnets) { + for _, x := range s { + if x.Tags[NameAWSSubnetAssociation] != SecondarySubnetTagValue { + res = append(res, x) + } + } + return +} + // FilterPublic returns a slice containing all subnets marked as public. func (s Subnets) FilterPublic() (res Subnets) { for _, x := range s { diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index a41c5393f8..374a7da4db 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -1998,6 +1998,11 @@ func (in *TargetGroupSpec) DeepCopy() *TargetGroupSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VPCSpec) DeepCopyInto(out *VPCSpec) { *out = *in + if in.SecondaryCidrBlocks != nil { + in, out := &in.SecondaryCidrBlocks, &out.SecondaryCidrBlocks + *out = make([]VpcCidrBlock, len(*in)) + copy(*out, *in) + } if in.IPAMPool != nil { in, out := &in.IPAMPool, &out.IPAMPool *out = new(IPAMPool) @@ -2071,3 +2076,18 @@ func (in *Volume) DeepCopy() *Volume { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VpcCidrBlock) DeepCopyInto(out *VpcCidrBlock) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VpcCidrBlock. +func (in *VpcCidrBlock) DeepCopy() *VpcCidrBlock { + if in == nil { + return nil + } + out := new(VpcCidrBlock) + in.DeepCopyInto(out) + return out +} diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml index f2ed7ef9d8..67c83c388e 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml @@ -660,6 +660,26 @@ spec: - ip-name - resource-name type: string + secondaryCidrBlocks: + description: SecondaryCidrBlocks are additional CIDR blocks + to be associated when the provider creates a managed VPC. + Defaults to none. Mutually exclusive with IPAMPool. This + makes sense to use if, for example, you want to use a separate + IP range for pods (e.g. Cilium ENI mode). + items: + description: VpcCidrBlock defines the CIDR block and settings + to associate with the managed VPC. Currently, only IPv4 + is supported. + properties: + ipv4CidrBlock: + description: IPv4CidrBlock is the IPv4 CIDR block to + associate with the managed VPC. + minLength: 1 + type: string + required: + - ipv4CidrBlock + type: object + type: array tags: additionalProperties: type: string @@ -2512,6 +2532,26 @@ spec: - ip-name - resource-name type: string + secondaryCidrBlocks: + description: SecondaryCidrBlocks are additional CIDR blocks + to be associated when the provider creates a managed VPC. + Defaults to none. Mutually exclusive with IPAMPool. This + makes sense to use if, for example, you want to use a separate + IP range for pods (e.g. Cilium ENI mode). + items: + description: VpcCidrBlock defines the CIDR block and settings + to associate with the managed VPC. Currently, only IPv4 + is supported. + properties: + ipv4CidrBlock: + description: IPv4CidrBlock is the IPv4 CIDR block to + associate with the managed VPC. + minLength: 1 + type: string + required: + - ipv4CidrBlock + type: object + type: array tags: additionalProperties: type: string diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml index 523153831e..4a3f04fccd 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml @@ -1494,6 +1494,26 @@ spec: - ip-name - resource-name type: string + secondaryCidrBlocks: + description: SecondaryCidrBlocks are additional CIDR blocks + to be associated when the provider creates a managed VPC. + Defaults to none. Mutually exclusive with IPAMPool. This + makes sense to use if, for example, you want to use a separate + IP range for pods (e.g. Cilium ENI mode). + items: + description: VpcCidrBlock defines the CIDR block and settings + to associate with the managed VPC. Currently, only IPv4 + is supported. + properties: + ipv4CidrBlock: + description: IPv4CidrBlock is the IPv4 CIDR block to + associate with the managed VPC. + minLength: 1 + type: string + required: + - ipv4CidrBlock + type: object + type: array tags: additionalProperties: type: string diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml index e8ef04c449..b796db543d 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml @@ -1119,6 +1119,27 @@ spec: - ip-name - resource-name type: string + secondaryCidrBlocks: + description: SecondaryCidrBlocks are additional CIDR + blocks to be associated when the provider creates + a managed VPC. Defaults to none. Mutually exclusive + with IPAMPool. This makes sense to use if, for example, + you want to use a separate IP range for pods (e.g. + Cilium ENI mode). + items: + description: VpcCidrBlock defines the CIDR block + and settings to associate with the managed VPC. + Currently, only IPv4 is supported. + properties: + ipv4CidrBlock: + description: IPv4CidrBlock is the IPv4 CIDR + block to associate with the managed VPC. + minLength: 1 + type: string + required: + - ipv4CidrBlock + type: object + type: array tags: additionalProperties: type: string diff --git a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook.go b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook.go index 4b44508b65..c46a2ec4ad 100644 --- a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook.go +++ b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook.go @@ -164,7 +164,7 @@ func (r *AWSManagedControlPlane) ValidateUpdate(old runtime.Object) (admission.W if oldAWSManagedControlplane.Spec.NetworkSpec.VPC.IsIPv6Enabled() != r.Spec.NetworkSpec.VPC.IsIPv6Enabled() { allErrs = append(allErrs, - field.Invalid(field.NewPath("spec", "networkSpec", "vpc", "enableIPv6"), r.Spec.NetworkSpec.VPC.IsIPv6Enabled(), "changing IP family is not allowed after it has been set")) + field.Invalid(field.NewPath("spec", "network", "vpc", "enableIPv6"), r.Spec.NetworkSpec.VPC.IsIPv6Enabled(), "changing IP family is not allowed after it has been set")) } if len(allErrs) == 0 { @@ -406,23 +406,53 @@ func (r *AWSManagedControlPlane) validatePrivateDNSHostnameTypeOnLaunch() field. func (r *AWSManagedControlPlane) validateNetwork() field.ErrorList { var allErrs field.ErrorList + // If only `AWSManagedControlPlane.spec.secondaryCidrBlock` is set, no additional checks are done to remain + // backward-compatible. The `VPCSpec.SecondaryCidrBlocks` field was added later - if that list is not empty, we + // require `AWSManagedControlPlane.spec.secondaryCidrBlock` to be listed in there as well. This may allow merging + // the fields later on. + podSecondaryCidrBlock := r.Spec.SecondaryCidrBlock + secondaryCidrBlocks := r.Spec.NetworkSpec.VPC.SecondaryCidrBlocks + secondaryCidrBlocksField := field.NewPath("spec", "network", "vpc", "secondaryCidrBlocks") + if podSecondaryCidrBlock != nil && len(secondaryCidrBlocks) > 0 { + found := false + for _, cidrBlock := range secondaryCidrBlocks { + if cidrBlock.IPv4CidrBlock == *podSecondaryCidrBlock { + found = true + break + } + } + if !found { + allErrs = append(allErrs, field.Invalid(secondaryCidrBlocksField, secondaryCidrBlocks, fmt.Sprintf("AWSManagedControlPlane.spec.secondaryCidrBlock %v must be listed in AWSManagedControlPlane.spec.network.vpc.secondaryCidrBlocks (required if both fields are filled)", *podSecondaryCidrBlock))) + } + } + + if podSecondaryCidrBlock != nil && r.Spec.NetworkSpec.VPC.CidrBlock != "" && r.Spec.NetworkSpec.VPC.CidrBlock == *podSecondaryCidrBlock { + secondaryCidrBlockField := field.NewPath("spec", "vpc", "secondaryCidrBlock") + allErrs = append(allErrs, field.Invalid(secondaryCidrBlockField, secondaryCidrBlocks, fmt.Sprintf("AWSManagedControlPlane.spec.secondaryCidrBlock %v must not be equal to the primary AWSManagedControlPlane.spec.network.vpc.cidrBlock", *podSecondaryCidrBlock))) + } + for _, cidrBlock := range secondaryCidrBlocks { + if r.Spec.NetworkSpec.VPC.CidrBlock != "" && r.Spec.NetworkSpec.VPC.CidrBlock == cidrBlock.IPv4CidrBlock { + allErrs = append(allErrs, field.Invalid(secondaryCidrBlocksField, secondaryCidrBlocks, fmt.Sprintf("AWSManagedControlPlane.spec.network.vpc.secondaryCidrBlocks must not contain the primary AWSManagedControlPlane.spec.network.vpc.cidrBlock %v", r.Spec.NetworkSpec.VPC.CidrBlock))) + } + } + if r.Spec.NetworkSpec.VPC.IsIPv6Enabled() && r.Spec.NetworkSpec.VPC.IPv6.CidrBlock != "" && r.Spec.NetworkSpec.VPC.IPv6.PoolID == "" { - poolField := field.NewPath("spec", "networkSpec", "vpc", "ipv6", "poolId") + poolField := field.NewPath("spec", "network", "vpc", "ipv6", "poolId") allErrs = append(allErrs, field.Invalid(poolField, r.Spec.NetworkSpec.VPC.IPv6.PoolID, "poolId cannot be empty if cidrBlock is set")) } if r.Spec.NetworkSpec.VPC.IsIPv6Enabled() && r.Spec.NetworkSpec.VPC.IPv6.PoolID != "" && r.Spec.NetworkSpec.VPC.IPv6.IPAMPool != nil { - poolField := field.NewPath("spec", "networkSpec", "vpc", "ipv6", "poolId") + poolField := field.NewPath("spec", "network", "vpc", "ipv6", "poolId") allErrs = append(allErrs, field.Invalid(poolField, r.Spec.NetworkSpec.VPC.IPv6.PoolID, "poolId and ipamPool cannot be used together")) } if r.Spec.NetworkSpec.VPC.IsIPv6Enabled() && r.Spec.NetworkSpec.VPC.IPv6.CidrBlock != "" && r.Spec.NetworkSpec.VPC.IPv6.IPAMPool != nil { - cidrBlockField := field.NewPath("spec", "networkSpec", "vpc", "ipv6", "cidrBlock") + cidrBlockField := field.NewPath("spec", "network", "vpc", "ipv6", "cidrBlock") allErrs = append(allErrs, field.Invalid(cidrBlockField, r.Spec.NetworkSpec.VPC.IPv6.CidrBlock, "cidrBlock and ipamPool cannot be used together")) } if r.Spec.NetworkSpec.VPC.IsIPv6Enabled() && r.Spec.NetworkSpec.VPC.IPv6.IPAMPool != nil && r.Spec.NetworkSpec.VPC.IPv6.IPAMPool.ID == "" && r.Spec.NetworkSpec.VPC.IPv6.IPAMPool.Name == "" { - ipamPoolField := field.NewPath("spec", "networkSpec", "vpc", "ipv6", "ipamPool") + ipamPoolField := field.NewPath("spec", "network", "vpc", "ipv6", "ipamPool") allErrs = append(allErrs, field.Invalid(ipamPoolField, r.Spec.NetworkSpec.VPC.IPv6.IPAMPool, "ipamPool must have either id or name")) } diff --git a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook_test.go b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook_test.go index bc3cd5d086..b6aab4d3a8 100644 --- a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook_test.go +++ b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook_test.go @@ -167,15 +167,17 @@ func TestDefaultingWebhook(t *testing.T) { func TestWebhookCreate(t *testing.T) { tests := []struct { //nolint:maligned - name string - eksClusterName string - expectError bool - eksVersion string - hasAddons bool - vpcCNI VpcCni - additionalTags infrav1.Tags - secondaryCidr *string - kubeProxy KubeProxy + name string + eksClusterName string + expectError bool + expectErrorToContain string // if non-empty, the error message must contain this substring + eksVersion string + hasAddons bool + vpcCNI VpcCni + additionalTags infrav1.Tags + secondaryCidr *string + secondaryCidrBlocks []infrav1.VpcCidrBlock + kubeProxy KubeProxy }{ { name: "ekscluster specified", @@ -253,6 +255,15 @@ func TestWebhookCreate(t *testing.T) { vpcCNI: VpcCni{Disable: true}, secondaryCidr: aws.String("100.64.0.0/10"), }, + { + name: "secondary CIDR block not listed in NetworkSpec.VPC.SecondaryCidrBlocks", + eksClusterName: "default_cluster1", + eksVersion: "v1.19", + expectError: true, + expectErrorToContain: "100.64.0.0/16 must be listed in AWSManagedControlPlane.spec.network.vpc.secondaryCidrBlocks", + secondaryCidr: aws.String("100.64.0.0/16"), + secondaryCidrBlocks: []infrav1.VpcCidrBlock{{IPv4CidrBlock: "123.456.0.0/16"}}, + }, { name: "invalid tags not allowed", eksClusterName: "default_cluster1", @@ -327,6 +338,11 @@ func TestWebhookCreate(t *testing.T) { KubeProxy: tc.kubeProxy, AdditionalTags: tc.additionalTags, VpcCni: tc.vpcCNI, + NetworkSpec: infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + SecondaryCidrBlocks: tc.secondaryCidrBlocks, + }, + }, }, } if tc.eksVersion != "" { @@ -353,7 +369,16 @@ func TestWebhookCreate(t *testing.T) { if tc.expectError { g.Expect(err).ToNot(BeNil()) + + if tc.expectErrorToContain != "" && err != nil { + g.Expect(err.Error()).To(ContainSubstring(tc.expectErrorToContain)) + } } else { + if tc.expectErrorToContain != "" { + t.Error("Logic error: expectError=false means that expectErrorToContain must be empty") + t.FailNow() + } + g.Expect(err).To(BeNil()) } }) diff --git a/pkg/cloud/scope/cluster.go b/pkg/cloud/scope/cluster.go index aa988f8825..9efaa16d7a 100644 --- a/pkg/cloud/scope/cluster.go +++ b/pkg/cloud/scope/cluster.go @@ -153,6 +153,17 @@ func (s *ClusterScope) SecondaryCidrBlock() *string { return nil } +// SecondaryCidrBlocks returns the additional CIDR blocks to be associated with the managed VPC. +func (s *ClusterScope) SecondaryCidrBlocks() []infrav1.VpcCidrBlock { + return s.AWSCluster.Spec.NetworkSpec.VPC.SecondaryCidrBlocks +} + +// AllSecondaryCidrBlocks returns all secondary CIDR blocks (combining `SecondaryCidrBlock` and `SecondaryCidrBlocks`). +func (s *ClusterScope) AllSecondaryCidrBlocks() []infrav1.VpcCidrBlock { + // Non-EKS clusters don't have anything in `SecondaryCidrBlock()` + return s.SecondaryCidrBlocks() +} + // Name returns the CAPI cluster name. func (s *ClusterScope) Name() string { return s.Cluster.Name diff --git a/pkg/cloud/scope/managedcontrolplane.go b/pkg/cloud/scope/managedcontrolplane.go index 018cbc7781..3ad342e6b6 100644 --- a/pkg/cloud/scope/managedcontrolplane.go +++ b/pkg/cloud/scope/managedcontrolplane.go @@ -208,6 +208,28 @@ func (s *ManagedControlPlaneScope) SecondaryCidrBlock() *string { return s.ControlPlane.Spec.SecondaryCidrBlock } +// SecondaryCidrBlocks returns the additional CIDR blocks to be associated with the managed VPC. +func (s *ManagedControlPlaneScope) SecondaryCidrBlocks() []infrav1.VpcCidrBlock { + return s.ControlPlane.Spec.NetworkSpec.VPC.SecondaryCidrBlocks +} + +// AllSecondaryCidrBlocks returns all secondary CIDR blocks (combining `SecondaryCidrBlock` and `SecondaryCidrBlocks`). +func (s *ManagedControlPlaneScope) AllSecondaryCidrBlocks() []infrav1.VpcCidrBlock { + secondaryCidrBlocks := s.ControlPlane.Spec.NetworkSpec.VPC.SecondaryCidrBlocks + + // If only `AWSManagedControlPlane.spec.secondaryCidrBlock` is set, no additional checks are done to remain + // backward-compatible. The `VPCSpec.SecondaryCidrBlocks` field was added later - if that list is not empty, we + // require `AWSManagedControlPlane.spec.secondaryCidrBlock` to be listed in there as well (validation done in + // webhook). + if s.ControlPlane.Spec.SecondaryCidrBlock != nil && len(secondaryCidrBlocks) == 0 { + secondaryCidrBlocks = []infrav1.VpcCidrBlock{{ + IPv4CidrBlock: *s.ControlPlane.Spec.SecondaryCidrBlock, + }} + } + + return secondaryCidrBlocks +} + // SecurityGroupOverrides returns the security groups that are overrides in the ControlPlane spec. func (s *ManagedControlPlaneScope) SecurityGroupOverrides() map[infrav1.SecurityGroupRole]string { return s.ControlPlane.Spec.NetworkSpec.SecurityGroupOverrides diff --git a/pkg/cloud/scope/network.go b/pkg/cloud/scope/network.go index 32b02ca0d2..aeb0e34231 100644 --- a/pkg/cloud/scope/network.go +++ b/pkg/cloud/scope/network.go @@ -37,8 +37,14 @@ type NetworkScope interface { CNIIngressRules() infrav1.CNIIngressRules // SecurityGroups returns the cluster security groups as a map, it creates the map if empty. SecurityGroups() map[infrav1.SecurityGroupRole]infrav1.SecurityGroup - // SecondaryCidrBlock returns the optional secondary CIDR block to use for pod IPs + // SecondaryCidrBlock returns the optional secondary CIDR block to use for pod IPs. This may later be renamed since + // it should not be confused with SecondaryCidrBlocks. SecondaryCidrBlock() *string + // SecondaryCidrBlocks returns the additional CIDR blocks to be associated with the managed VPC. + SecondaryCidrBlocks() []infrav1.VpcCidrBlock + // AllSecondaryCidrBlocks returns a unique list of all secondary CIDR blocks (combining `SecondaryCidrBlock` and + // `SecondaryCidrBlocks`). + AllSecondaryCidrBlocks() []infrav1.VpcCidrBlock // Bastion returns the bastion details for the cluster. Bastion() *infrav1.Bastion diff --git a/pkg/cloud/scope/shared.go b/pkg/cloud/scope/shared.go index 865ebfaf52..2521f100de 100644 --- a/pkg/cloud/scope/shared.go +++ b/pkg/cloud/scope/shared.go @@ -100,7 +100,7 @@ func (p *defaultSubnetPlacementStrategy) Place(input *placementInput) ([]string, return subnetIDs, nil } - controlPlaneSubnetIDs := input.ControlplaneSubnets.FilterPrivate().IDs() + controlPlaneSubnetIDs := input.ControlplaneSubnets.FilterPrivate().FilterNonCni().IDs() if len(controlPlaneSubnetIDs) > 0 { p.logger.Debug("using all the private subnets from the control plane") return controlPlaneSubnetIDs, nil @@ -119,9 +119,9 @@ func (p *defaultSubnetPlacementStrategy) getSubnetsForAZs(azs []string, controlP case expinfrav1.AZSubnetTypeAll: // no-op case expinfrav1.AZSubnetTypePublic: - subnets = subnets.FilterPublic() + subnets = subnets.FilterPublic().FilterNonCni() case expinfrav1.AZSubnetTypePrivate: - subnets = subnets.FilterPrivate() + subnets = subnets.FilterPrivate().FilterNonCni() } } if len(subnets) == 0 { diff --git a/pkg/cloud/scope/shared_test.go b/pkg/cloud/scope/shared_test.go index 34d124abf3..8afc051c6c 100644 --- a/pkg/cloud/scope/shared_test.go +++ b/pkg/cloud/scope/shared_test.go @@ -182,6 +182,14 @@ func TestSubnetPlacement(t *testing.T) { AvailabilityZone: "eu-west-1c", IsPublic: false, }, + infrav1.SubnetSpec{ + ID: "subnet-az6", + AvailabilityZone: "eu-west-1c", + IsPublic: false, + Tags: infrav1.Tags{ + infrav1.NameAWSSubnetAssociation: infrav1.SecondarySubnetTagValue, + }, + }, }, logger: logger.NewLogger(klog.Background()), expectedSubnetIDs: []string{"subnet-az3"}, diff --git a/pkg/cloud/services/autoscaling/autoscalinggroup.go b/pkg/cloud/services/autoscaling/autoscalinggroup.go index 9ddd4c086d..d473010d12 100644 --- a/pkg/cloud/services/autoscaling/autoscalinggroup.go +++ b/pkg/cloud/services/autoscaling/autoscalinggroup.go @@ -542,6 +542,12 @@ func (s *Service) SubnetIDs(scope *scope.MachinePoolScope) ([]string, error) { } for _, subnet := range out.Subnets { + tags := converters.TagsToMap(subnet.Tags) + if tags[infrav1.NameAWSSubnetAssociation] == infrav1.SecondarySubnetTagValue { + // Subnet belongs to a secondary CIDR block which won't be used to create instances + continue + } + subnetIDs = append(subnetIDs, *subnet.SubnetId) } diff --git a/pkg/cloud/services/ec2/instances.go b/pkg/cloud/services/ec2/instances.go index b943a493ba..0f9e297aeb 100644 --- a/pkg/cloud/services/ec2/instances.go +++ b/pkg/cloud/services/ec2/instances.go @@ -356,6 +356,13 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) { errMessage += fmt.Sprintf(" subnet %q is a private subnet.", *subnet.SubnetId) continue } + + tags := converters.TagsToMap(subnet.Tags) + if tags[infrav1.NameAWSSubnetAssociation] == infrav1.SecondarySubnetTagValue { + errMessage += fmt.Sprintf(" subnet %q belongs to a secondary CIDR block which won't be used to create instances.", *subnet.SubnetId) + continue + } + filtered = append(filtered, subnet) } // prefer a subnet in the cluster VPC if multiple match @@ -372,7 +379,7 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) { return *filtered[0].SubnetId, nil case failureDomain != nil: if scope.AWSMachine.Spec.PublicIP != nil && *scope.AWSMachine.Spec.PublicIP { - subnets := s.scope.Subnets().FilterPublic().FilterByZone(*failureDomain) + subnets := s.scope.Subnets().FilterPublic().FilterNonCni().FilterByZone(*failureDomain) if len(subnets) == 0 { errMessage := fmt.Sprintf("failed to run machine %q with public IP, no public subnets available in availability zone %q", scope.Name(), *failureDomain) @@ -382,7 +389,7 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) { return subnets[0].GetResourceID(), nil } - subnets := s.scope.Subnets().FilterPrivate().FilterByZone(*failureDomain) + subnets := s.scope.Subnets().FilterPrivate().FilterNonCni().FilterByZone(*failureDomain) if len(subnets) == 0 { errMessage := fmt.Sprintf("failed to run machine %q, no subnets available in availability zone %q", scope.Name(), *failureDomain) @@ -391,7 +398,7 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) { } return subnets[0].GetResourceID(), nil case scope.AWSMachine.Spec.PublicIP != nil && *scope.AWSMachine.Spec.PublicIP: - subnets := s.scope.Subnets().FilterPublic() + subnets := s.scope.Subnets().FilterPublic().FilterNonCni() if len(subnets) == 0 { errMessage := fmt.Sprintf("failed to run machine %q with public IP, no public subnets available", scope.Name()) record.Eventf(scope.AWSMachine, "FailedCreate", errMessage) @@ -403,7 +410,7 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) { // with control plane machines. default: - sns := s.scope.Subnets().FilterPrivate() + sns := s.scope.Subnets().FilterPrivate().FilterNonCni() if len(sns) == 0 { errMessage := fmt.Sprintf("failed to run machine %q, no subnets available", scope.Name()) record.Eventf(s.scope.InfraCluster(), "FailedCreateInstance", errMessage) diff --git a/pkg/cloud/services/elb/loadbalancer.go b/pkg/cloud/services/elb/loadbalancer.go index 80ad95456c..aa77c76495 100644 --- a/pkg/cloud/services/elb/loadbalancer.go +++ b/pkg/cloud/services/elb/loadbalancer.go @@ -262,10 +262,10 @@ func (s *Service) getAPIServerLBSpec(elbName string, lbSpec *infrav1.AWSLoadBala } } else { // The load balancer APIs require us to only attach one subnet for each AZ. - subnets := s.scope.Subnets().FilterPrivate() + subnets := s.scope.Subnets().FilterPrivate().FilterNonCni() if scheme == infrav1.ELBSchemeInternetFacing { - subnets = s.scope.Subnets().FilterPublic() + subnets = s.scope.Subnets().FilterPublic().FilterNonCni() } subnetLoop: @@ -1074,10 +1074,10 @@ func (s *Service) getAPIServerClassicELBSpec(elbName string) (*infrav1.LoadBalan } } else { // The load balancer APIs require us to only attach one subnet for each AZ. - subnets := s.scope.Subnets().FilterPrivate() + subnets := s.scope.Subnets().FilterPrivate().FilterNonCni() if scheme == infrav1.ELBSchemeInternetFacing { - subnets = s.scope.Subnets().FilterPublic() + subnets = s.scope.Subnets().FilterPublic().FilterNonCni() } subnetLoop: diff --git a/pkg/cloud/services/network/natgateways.go b/pkg/cloud/services/network/natgateways.go index 807594f604..b2bfe8333e 100644 --- a/pkg/cloud/services/network/natgateways.go +++ b/pkg/cloud/services/network/natgateways.go @@ -45,7 +45,7 @@ func (s *Service) reconcileNatGateways() error { s.scope.Debug("Reconciling NAT gateways") - if len(s.scope.Subnets().FilterPrivate()) == 0 { + if len(s.scope.Subnets().FilterPrivate().FilterNonCni()) == 0 { s.scope.Debug("No private subnets available, skipping NAT gateways") conditions.MarkFalse( s.scope.InfraCluster(), @@ -54,7 +54,7 @@ func (s *Service) reconcileNatGateways() error { clusterv1.ConditionSeverityWarning, "No private subnets available, skipping NAT gateways") return nil - } else if len(s.scope.Subnets().FilterPublic()) == 0 { + } else if len(s.scope.Subnets().FilterPublic().FilterNonCni()) == 0 { s.scope.Debug("No public subnets available. Cannot create NAT gateways for private subnets, this might be a configuration error.") conditions.MarkFalse( s.scope.InfraCluster(), @@ -73,7 +73,7 @@ func (s *Service) reconcileNatGateways() error { natGatewaysIPs := []string{} subnetIDs := []string{} - for _, sn := range s.scope.Subnets().FilterPublic() { + for _, sn := range s.scope.Subnets().FilterPublic().FilterNonCni() { if sn.GetResourceID() == "" { continue } diff --git a/pkg/cloud/services/network/network.go b/pkg/cloud/services/network/network.go index b2363b5aac..af2ef7aaf7 100644 --- a/pkg/cloud/services/network/network.go +++ b/pkg/cloud/services/network/network.go @@ -37,8 +37,8 @@ func (s *Service) ReconcileNetwork() (err error) { } conditions.MarkTrue(s.scope.InfraCluster(), infrav1.VpcReadyCondition) - // Secondary CIDR - if err := s.associateSecondaryCidr(); err != nil { + // Secondary CIDRs + if err := s.associateSecondaryCidrs(); err != nil { conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, infrav1.SecondaryCidrReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), err.Error()) return err } @@ -184,7 +184,7 @@ func (s *Service) DeleteNetwork() (err error) { // Secondary CIDR. conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") - if err := s.disassociateSecondaryCidr(); err != nil { + if err := s.disassociateSecondaryCidrs(); err != nil { conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, "DisassociateFailed", clusterv1.ConditionSeverityWarning, err.Error()) return err } diff --git a/pkg/cloud/services/network/secondarycidr.go b/pkg/cloud/services/network/secondarycidr.go index 54fb7c5816..829383bf1a 100644 --- a/pkg/cloud/services/network/secondarycidr.go +++ b/pkg/cloud/services/network/secondarycidr.go @@ -20,7 +20,6 @@ import ( "context" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/google/go-cmp/cmp" "github.com/pkg/errors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" @@ -30,8 +29,9 @@ func isVPCPresent(vpcs *ec2.DescribeVpcsOutput) bool { return vpcs != nil && len(vpcs.Vpcs) > 0 } -func (s *Service) associateSecondaryCidr() error { - if s.scope.SecondaryCidrBlock() == nil { +func (s *Service) associateSecondaryCidrs() error { + secondaryCidrBlocks := s.scope.AllSecondaryCidrBlocks() + if len(secondaryCidrBlocks) == 0 { return nil } @@ -46,30 +46,43 @@ func (s *Service) associateSecondaryCidr() error { return errors.Errorf("failed to associateSecondaryCidr as there are no VPCs present") } + // We currently only *add* associations. Here, we do not reconcile exactly against the provided list + // (i.e. disassociate what isn't listed in the spec). existingAssociations := vpcs.Vpcs[0].CidrBlockAssociationSet - for _, existing := range existingAssociations { - if *existing.CidrBlock == *s.scope.SecondaryCidrBlock() { - return nil + for _, desiredCidrBlock := range secondaryCidrBlocks { + desiredCidrBlock := desiredCidrBlock + + found := false + for _, existing := range existingAssociations { + if *existing.CidrBlock == desiredCidrBlock.IPv4CidrBlock { + found = true + break + } + } + if found { + continue } - } - out, err := s.EC2Client.AssociateVpcCidrBlockWithContext(context.TODO(), &ec2.AssociateVpcCidrBlockInput{ - VpcId: &s.scope.VPC().ID, - CidrBlock: s.scope.SecondaryCidrBlock(), - }) - if err != nil { - record.Warnf(s.scope.InfraCluster(), "FailedAssociateSecondaryCidr", "Failed associating secondary CIDR with VPC %v", err) - return err - } + out, err := s.EC2Client.AssociateVpcCidrBlockWithContext(context.TODO(), &ec2.AssociateVpcCidrBlockInput{ + VpcId: &s.scope.VPC().ID, + CidrBlock: &desiredCidrBlock.IPv4CidrBlock, + }) + if err != nil { + record.Warnf(s.scope.InfraCluster(), "FailedAssociateSecondaryCidr", "Failed associating secondary CIDR %q with VPC %v", desiredCidrBlock.IPv4CidrBlock, err) + return err + } - // once IPv6 is supported, we need to modify out.CidrBlockAssociation.AssociationId to out.Ipv6CidrBlockAssociation.AssociationId - record.Eventf(s.scope.InfraCluster(), "SuccessfulAssociateSecondaryCidr", "Associated secondary CIDR with VPC %q", *out.CidrBlockAssociation.AssociationId) + // Once IPv6 is supported, we need to consider both `out.CidrBlockAssociation.AssociationId` and + // `out.Ipv6CidrBlockAssociation.AssociationId` + record.Eventf(s.scope.InfraCluster(), "SuccessfulAssociateSecondaryCidr", "Associated secondary CIDR %q with VPC %q", desiredCidrBlock.IPv4CidrBlock, *out.CidrBlockAssociation.AssociationId) + } return nil } -func (s *Service) disassociateSecondaryCidr() error { - if s.scope.SecondaryCidrBlock() == nil { +func (s *Service) disassociateSecondaryCidrs() error { + secondaryCidrBlocks := s.scope.AllSecondaryCidrBlocks() + if len(secondaryCidrBlocks) == 0 { return nil } @@ -81,17 +94,20 @@ func (s *Service) disassociateSecondaryCidr() error { } if !isVPCPresent(vpcs) { - return errors.Errorf("failed to associateSecondaryCidr as there are no VPCs present") + return errors.Errorf("failed to disassociateSecondaryCidr as there are no VPCs present") } existingAssociations := vpcs.Vpcs[0].CidrBlockAssociationSet - for _, existing := range existingAssociations { - if cmp.Equal(existing.CidrBlock, s.scope.SecondaryCidrBlock()) { - if _, err := s.EC2Client.DisassociateVpcCidrBlockWithContext(context.TODO(), &ec2.DisassociateVpcCidrBlockInput{ - AssociationId: existing.AssociationId, - }); err != nil { - record.Warnf(s.scope.InfraCluster(), "FailedDisassociateSecondaryCidr", "Failed disassociating secondary CIDR with VPC %v", err) - return err + for _, cidrBlockToDelete := range secondaryCidrBlocks { + for _, existing := range existingAssociations { + if *existing.CidrBlock == cidrBlockToDelete.IPv4CidrBlock { + if _, err := s.EC2Client.DisassociateVpcCidrBlockWithContext(context.TODO(), &ec2.DisassociateVpcCidrBlockInput{ + AssociationId: existing.AssociationId, + }); err != nil { + record.Warnf(s.scope.InfraCluster(), "FailedDisassociateSecondaryCidr", "Failed disassociating secondary CIDR %q from VPC %v", cidrBlockToDelete.IPv4CidrBlock, err) + return err + } + break } } } diff --git a/pkg/cloud/services/network/secondarycidr_test.go b/pkg/cloud/services/network/secondarycidr_test.go index 5be6cf441e..ee5ea3f6f2 100644 --- a/pkg/cloud/services/network/secondarycidr_test.go +++ b/pkg/cloud/services/network/secondarycidr_test.go @@ -65,25 +65,33 @@ func TestServiceAssociateSecondaryCidr(t *testing.T) { defer mockCtrl.Finish() tests := []struct { - name string - haveSecondaryCIDR bool - expect func(m *mocks.MockEC2APIMockRecorder) - wantErr bool + name string + fillAWSManagedControlPlaneSecondaryCIDR bool + networkSecondaryCIDRBlocks []infrav1.VpcCidrBlock + expect func(m *mocks.MockEC2APIMockRecorder) + wantErr bool }{ { - name: "Should not associate secondary CIDR if no secondary cidr block info present in control plane", + name: "Should not associate secondary CIDR if no secondary cidr block info present in control plane", + fillAWSManagedControlPlaneSecondaryCIDR: false, + expect: func(m *mocks.MockEC2APIMockRecorder) { + // No calls expected + m.DescribeVpcsWithContext(context.TODO(), gomock.Any()).Times(0) + m.AssociateVpcCidrBlockWithContext(context.TODO(), gomock.Any()).Times(0) + }, + wantErr: false, }, { - name: "Should return error if unable to describe VPC", - haveSecondaryCIDR: true, + name: "Should return error if unable to describe VPC", + fillAWSManagedControlPlaneSecondaryCIDR: true, expect: func(m *mocks.MockEC2APIMockRecorder) { m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(nil, awserrors.NewFailedDependency("dependency-failure")) }, wantErr: true, }, { - name: "Should not associate secondary cidr block if already exist in VPC", - haveSecondaryCIDR: true, + name: "Should not associate secondary cidr block if already exist in VPC", + fillAWSManagedControlPlaneSecondaryCIDR: true, expect: func(m *mocks.MockEC2APIMockRecorder) { m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{ Vpcs: []*ec2.Vpc{ @@ -96,29 +104,102 @@ func TestServiceAssociateSecondaryCidr(t *testing.T) { }, }, { - name: "Should return error if no VPC found", - haveSecondaryCIDR: true, + name: "Should return error if no VPC found", + fillAWSManagedControlPlaneSecondaryCIDR: true, expect: func(m *mocks.MockEC2APIMockRecorder) { - m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(nil, nil) + m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{ + Vpcs: []*ec2.Vpc{}, + }, nil) }, wantErr: true, }, { - name: "Should return error if failed during associating secondary cidr block", - haveSecondaryCIDR: true, + name: "Should return error if failed during associating secondary cidr block", + fillAWSManagedControlPlaneSecondaryCIDR: true, expect: func(m *mocks.MockEC2APIMockRecorder) { m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{ Vpcs: []*ec2.Vpc{ { - CidrBlockAssociationSet: []*ec2.VpcCidrBlockAssociation{ - {CidrBlock: aws.String("secondary-cidr-new")}, - }, + CidrBlockAssociationSet: []*ec2.VpcCidrBlockAssociation{}, }, }}, nil) m.AssociateVpcCidrBlockWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AssociateVpcCidrBlockInput{})).Return(nil, awserrors.NewFailedDependency("dependency-failure")) }, wantErr: true, }, + { + name: "Should successfully associate secondary CIDR block if none is associated yet", + fillAWSManagedControlPlaneSecondaryCIDR: true, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{ + Vpcs: []*ec2.Vpc{ + { + CidrBlockAssociationSet: []*ec2.VpcCidrBlockAssociation{}, + }, + }}, nil) + m.AssociateVpcCidrBlockWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.AssociateVpcCidrBlockInput{})).Return(&ec2.AssociateVpcCidrBlockOutput{ + CidrBlockAssociation: &ec2.VpcCidrBlockAssociation{ + AssociationId: ptr.To[string]("association-id-success"), + }, + }, nil) + }, + wantErr: false, + }, + { + name: "Should successfully associate missing secondary CIDR blocks", + fillAWSManagedControlPlaneSecondaryCIDR: false, + networkSecondaryCIDRBlocks: []infrav1.VpcCidrBlock{ + { + IPv4CidrBlock: "10.0.1.0/24", + }, + { + IPv4CidrBlock: "10.0.2.0/24", + }, + { + IPv4CidrBlock: "10.0.3.0/24", + }, + { + IPv4CidrBlock: "10.0.4.0/24", + }, + }, + expect: func(m *mocks.MockEC2APIMockRecorder) { + // Two are simulated to exist... + m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{ + Vpcs: []*ec2.Vpc{ + { + CidrBlockAssociationSet: []*ec2.VpcCidrBlockAssociation{ + { + AssociationId: ptr.To[string]("association-id-existing-1"), + CidrBlock: ptr.To[string]("10.0.1.0/24"), + }, + { + AssociationId: ptr.To[string]("association-id-existing-3"), + CidrBlock: ptr.To[string]("10.0.3.0/24"), + }, + }, + }, + }}, nil) + + // ...the other two should be created + m.AssociateVpcCidrBlockWithContext(context.TODO(), gomock.Eq(&ec2.AssociateVpcCidrBlockInput{ + CidrBlock: ptr.To[string]("10.0.2.0/24"), + VpcId: ptr.To[string]("vpc-id"), + })).Return(&ec2.AssociateVpcCidrBlockOutput{ + CidrBlockAssociation: &ec2.VpcCidrBlockAssociation{ + AssociationId: ptr.To[string]("association-id-success-2"), + }, + }, nil) + m.AssociateVpcCidrBlockWithContext(context.TODO(), gomock.Eq(&ec2.AssociateVpcCidrBlockInput{ + CidrBlock: ptr.To[string]("10.0.4.0/24"), + VpcId: ptr.To[string]("vpc-id"), + })).Return(&ec2.AssociateVpcCidrBlockOutput{ + CidrBlockAssociation: &ec2.VpcCidrBlockAssociation{ + AssociationId: ptr.To[string]("association-id-success-4"), + }, + }, nil) + }, + wantErr: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -133,9 +214,10 @@ func TestServiceAssociateSecondaryCidr(t *testing.T) { mcpScope, err := setupNewManagedControlPlaneScope(cl) g.Expect(err).NotTo(HaveOccurred()) - if !tt.haveSecondaryCIDR { + if !tt.fillAWSManagedControlPlaneSecondaryCIDR { mcpScope.ControlPlane.Spec.SecondaryCidrBlock = nil } + mcpScope.ControlPlane.Spec.NetworkSpec.VPC.SecondaryCidrBlocks = tt.networkSecondaryCIDRBlocks s := NewService(mcpScope) s.EC2Client = ec2Mock @@ -144,7 +226,7 @@ func TestServiceAssociateSecondaryCidr(t *testing.T) { tt.expect(ec2Mock.EXPECT()) } - err = s.associateSecondaryCidr() + err = s.associateSecondaryCidrs() if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -159,33 +241,41 @@ func TestServiceDiassociateSecondaryCidr(t *testing.T) { defer mockCtrl.Finish() tests := []struct { - name string - haveSecondaryCIDR bool - expect func(m *mocks.MockEC2APIMockRecorder) - wantErr bool + name string + fillAWSManagedControlPlaneSecondaryCIDR bool + networkSecondaryCIDRBlocks []infrav1.VpcCidrBlock + expect func(m *mocks.MockEC2APIMockRecorder) + wantErr bool }{ { - name: "Should not disassociate secondary CIDR if no secondary cidr block info present in control plane", + name: "Should not disassociate secondary CIDR if no secondary cidr block info present in control plane", + fillAWSManagedControlPlaneSecondaryCIDR: false, + expect: func(m *mocks.MockEC2APIMockRecorder) { + // No calls expected + m.DescribeVpcsWithContext(context.TODO(), gomock.Any()).Times(0) + m.DisassociateVpcCidrBlockWithContext(context.TODO(), gomock.Any()).Times(0) + }, + wantErr: false, }, { - name: "Should return error if unable to describe VPC", - haveSecondaryCIDR: true, + name: "Should return error if unable to describe VPC", + fillAWSManagedControlPlaneSecondaryCIDR: true, expect: func(m *mocks.MockEC2APIMockRecorder) { m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(nil, awserrors.NewFailedDependency("dependency-failure")) }, wantErr: true, }, { - name: "Should return error if no VPC found", - haveSecondaryCIDR: true, + name: "Should return error if no VPC found", + fillAWSManagedControlPlaneSecondaryCIDR: true, expect: func(m *mocks.MockEC2APIMockRecorder) { m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(nil, nil) }, wantErr: true, }, { - name: "Should diassociate secondary cidr block if already exist in VPC", - haveSecondaryCIDR: true, + name: "Should diassociate secondary cidr block if already exist in VPC", + fillAWSManagedControlPlaneSecondaryCIDR: true, expect: func(m *mocks.MockEC2APIMockRecorder) { m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{ Vpcs: []*ec2.Vpc{ @@ -199,8 +289,8 @@ func TestServiceDiassociateSecondaryCidr(t *testing.T) { }, }, { - name: "Should return error if failed to diassociate secondary cidr block", - haveSecondaryCIDR: true, + name: "Should return error if failed to diassociate secondary cidr block", + fillAWSManagedControlPlaneSecondaryCIDR: true, expect: func(m *mocks.MockEC2APIMockRecorder) { m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{ Vpcs: []*ec2.Vpc{ @@ -214,6 +304,66 @@ func TestServiceDiassociateSecondaryCidr(t *testing.T) { }, wantErr: true, }, + { + name: "Should successfully return from disassociating secondary CIDR blocks if none is currently associated", + fillAWSManagedControlPlaneSecondaryCIDR: true, + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{ + Vpcs: []*ec2.Vpc{ + { + CidrBlockAssociationSet: []*ec2.VpcCidrBlockAssociation{}, + }, + }}, nil) + + // No calls expected + m.DisassociateVpcCidrBlockWithContext(context.TODO(), gomock.Any()).Times(0) + }, + wantErr: false, + }, + { + name: "Should successfully disassociate existing secondary CIDR blocks", + fillAWSManagedControlPlaneSecondaryCIDR: false, + networkSecondaryCIDRBlocks: []infrav1.VpcCidrBlock{ + { + IPv4CidrBlock: "10.0.1.0/24", + }, + { + IPv4CidrBlock: "10.0.2.0/24", + }, + { + IPv4CidrBlock: "10.0.3.0/24", + }, + { + IPv4CidrBlock: "10.0.4.0/24", + }, + }, + expect: func(m *mocks.MockEC2APIMockRecorder) { + // Two are simulated to exist... + m.DescribeVpcsWithContext(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeVpcsInput{})).Return(&ec2.DescribeVpcsOutput{ + Vpcs: []*ec2.Vpc{ + { + CidrBlockAssociationSet: []*ec2.VpcCidrBlockAssociation{ + { + AssociationId: ptr.To[string]("association-id-existing-1"), + CidrBlock: ptr.To[string]("10.0.1.0/24"), + }, + { + AssociationId: ptr.To[string]("association-id-existing-3"), + CidrBlock: ptr.To[string]("10.0.3.0/24"), + }, + }, + }, + }}, nil) + + m.DisassociateVpcCidrBlockWithContext(context.TODO(), gomock.Eq(&ec2.DisassociateVpcCidrBlockInput{ + AssociationId: ptr.To[string]("association-id-existing-1"), // 10.0.1.0/24 (see above) + })).Return(&ec2.DisassociateVpcCidrBlockOutput{}, nil) + m.DisassociateVpcCidrBlockWithContext(context.TODO(), gomock.Eq(&ec2.DisassociateVpcCidrBlockInput{ + AssociationId: ptr.To[string]("association-id-existing-3"), // 10.0.3.0/24 (see above) + })).Return(&ec2.DisassociateVpcCidrBlockOutput{}, nil) + }, + wantErr: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -228,9 +378,10 @@ func TestServiceDiassociateSecondaryCidr(t *testing.T) { mcpScope, err := setupNewManagedControlPlaneScope(cl) g.Expect(err).NotTo(HaveOccurred()) - if !tt.haveSecondaryCIDR { + if !tt.fillAWSManagedControlPlaneSecondaryCIDR { mcpScope.ControlPlane.Spec.SecondaryCidrBlock = nil } + mcpScope.ControlPlane.Spec.NetworkSpec.VPC.SecondaryCidrBlocks = tt.networkSecondaryCIDRBlocks s := NewService(mcpScope) s.EC2Client = ec2Mock @@ -239,7 +390,7 @@ func TestServiceDiassociateSecondaryCidr(t *testing.T) { tt.expect(ec2Mock.EXPECT()) } - err = s.disassociateSecondaryCidr() + err = s.disassociateSecondaryCidrs() if tt.wantErr { g.Expect(err).To(HaveOccurred()) return