Skip to content

Commit

Permalink
Merge pull request #4648 from cnmcavoy/eks-cluster-autoscaler-secret
Browse files Browse the repository at this point in the history
✨ Add separate eks kubeconfig secret keys for the cluster-autoscaler
  • Loading branch information
k8s-ci-robot authored Jul 22, 2024
2 parents 2a4d434 + f3d1caa commit c3c5a53
Show file tree
Hide file tree
Showing 4 changed files with 348 additions and 14 deletions.
10 changes: 9 additions & 1 deletion docs/book/src/topics/eks/creating-a-cluster.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,4 +34,12 @@ kubectl --namespace=default get secret managed-test-user-kubeconfig \

This kubeconfig is used internally by CAPI and shouldn't be used outside of the management server. It is used by CAPI to perform operations, such as draining a node. The name of the secret that contains the kubeconfig will be `[cluster-name]-kubeconfig` where you need to replace **[cluster-name]** with the name of your cluster. Note that there is NO `-user` in the name.

The kubeconfig is regenerated every `sync-period` as the token that is embedded in the kubeconfig is only valid for a short period of time. When EKS support is enabled the maximum sync period is 10 minutes. If you try to set `--sync-period` to greater than 10 minutes then an error will be raised.
There are three keys in the CAPI kubeconfig for eks clusters:

| keys | purpose |
|-------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| value | contains a complete kubeconfig with the cluster admin user and token embedded |
| relative | contains a kubeconfig with the cluster admin user, referencing the token file in a relative path - assumes you are mounting all the secret keys in the same dir |
| single-file | contains the same token embedded in the complete kubeconfig, it is separated into a single file so that existing APIMachinery can reload the token file when the secret is updated |

The secret contents are regenerated every `sync-period` as the token that is embedded in the kubeconfig and token file is only valid for a short period of time. When EKS support is enabled the maximum sync period is 10 minutes. If you try to set `--sync-period` to greater than 10 minutes then an error will be raised.
83 changes: 70 additions & 13 deletions pkg/cloud/services/eks/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,12 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
"sigs.k8s.io/controller-runtime/pkg/client"

ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
"sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/kubeconfig"
"sigs.k8s.io/cluster-api/util/secret"
)
Expand All @@ -42,6 +45,9 @@ const (
tokenPrefix = "k8s-aws-v1." //nolint:gosec
clusterNameHeader = "x-k8s-aws-id"
tokenAgeMins = 15

relativeKubeconfigKey = "relative"
relativeTokenFileKey = "token-file"
)

func (s *Service) reconcileKubeconfig(ctx context.Context, cluster *eks.Cluster) error {
Expand Down Expand Up @@ -110,28 +116,44 @@ func (s *Service) createCAPIKubeconfigSecret(ctx context.Context, cluster *eks.C
clusterName := s.scope.KubernetesClusterName()
userName := s.getKubeConfigUserName(clusterName, false)

cfg, err := s.createBaseKubeConfig(cluster, userName)
config, err := s.createBaseKubeConfig(cluster, userName)
if err != nil {
return fmt.Errorf("creating base kubeconfig: %w", err)
}
clusterConfig := config.DeepCopy()

token, err := s.generateToken()
if err != nil {
return fmt.Errorf("generating presigned token: %w", err)
}

cfg.AuthInfos = map[string]*api.AuthInfo{
clusterConfig.AuthInfos = map[string]*api.AuthInfo{
userName: {
Token: token,
},
}

out, err := clientcmd.Write(*cfg)
out, err := clientcmd.Write(*clusterConfig)
if err != nil {
return errors.Wrap(err, "failed to serialize config to yaml")
}

kubeconfigSecret := kubeconfig.GenerateSecretWithOwner(*clusterRef, out, controllerOwnerRef)
secretData := make(map[string][]byte)
secretData[secret.KubeconfigDataName] = out

config.AuthInfos = map[string]*api.AuthInfo{
userName: {
TokenFile: "./" + relativeTokenFileKey,
},
}
out, err = clientcmd.Write(*config)
if err != nil {
return errors.Wrap(err, "failed to serialize config to yaml")
}
secretData[relativeKubeconfigKey] = out
secretData[relativeTokenFileKey] = []byte(token)

kubeconfigSecret := generateSecretWithOwner(*clusterRef, secretData, controllerOwnerRef)
if err := s.scope.Client.Create(ctx, kubeconfigSecret); err != nil {
return errors.Wrap(err, "failed to create kubeconfig secret")
}
Expand All @@ -142,32 +164,49 @@ func (s *Service) createCAPIKubeconfigSecret(ctx context.Context, cluster *eks.C

func (s *Service) updateCAPIKubeconfigSecret(ctx context.Context, configSecret *corev1.Secret, cluster *eks.Cluster) error {
s.scope.Debug("Updating EKS kubeconfigs for cluster", "cluster-name", s.scope.KubernetesClusterName())
controllerOwnerRef := *metav1.NewControllerRef(s.scope.ControlPlane, ekscontrolplanev1.GroupVersion.WithKind("AWSManagedControlPlane"))

data, ok := configSecret.Data[secret.KubeconfigDataName]
if !ok {
return errors.Errorf("missing key %q in secret data", secret.KubeconfigDataName)
if !util.HasOwnerRef(configSecret.OwnerReferences, controllerOwnerRef) {
return fmt.Errorf("EKS kubeconfig %s/%s missing expected AWSManagedControlPlane ownership", configSecret.Namespace, configSecret.Name)
}

config, err := clientcmd.Load(data)
clusterName := s.scope.KubernetesClusterName()
userName := s.getKubeConfigUserName(clusterName, false)
config, err := s.createBaseKubeConfig(cluster, userName)
if err != nil {
return errors.Wrap(err, "failed to convert kubeconfig Secret into a clientcmdapi.Config")
return fmt.Errorf("creating base kubeconfig: %w", err)
}
clusterConfig := config.DeepCopy()

token, err := s.generateToken()
if err != nil {
return fmt.Errorf("generating presigned token: %w", err)
}

userName := s.getKubeConfigUserName(*cluster.Name, false)
config.AuthInfos[userName].Token = token
clusterConfig.AuthInfos = map[string]*api.AuthInfo{
userName: {
Token: token,
},
}

out, err := clientcmd.Write(*config)
out, err := clientcmd.Write(*clusterConfig)
if err != nil {
return errors.Wrap(err, "failed to serialize config to yaml")
}

configSecret.Data[secret.KubeconfigDataName] = out

config.AuthInfos = map[string]*api.AuthInfo{
userName: {
TokenFile: "./" + relativeTokenFileKey,
},
}
out, err = clientcmd.Write(*config)
if err != nil {
return errors.Wrap(err, "failed to serialize config to yaml")
}
configSecret.Data[relativeKubeconfigKey] = out
configSecret.Data[relativeTokenFileKey] = []byte(token)

err = s.scope.Client.Update(ctx, configSecret)
if err != nil {
return fmt.Errorf("updating kubeconfig secret: %w", err)
Expand Down Expand Up @@ -283,3 +322,21 @@ func (s *Service) getKubeConfigUserName(clusterName string, isUser bool) string

return fmt.Sprintf("%s-capi-admin", clusterName)
}

// generateSecretWithOwner returns a Kubernetes secret for the given Cluster name, namespace, kubeconfig data, and ownerReference.
func generateSecretWithOwner(clusterName client.ObjectKey, data map[string][]byte, owner metav1.OwnerReference) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secret.Name(clusterName.Name, secret.Kubeconfig),
Namespace: clusterName.Namespace,
Labels: map[string]string{
clusterv1.ClusterNameLabel: clusterName.Name,
},
OwnerReferences: []metav1.OwnerReference{
owner,
},
},
Data: data,
Type: clusterv1.ClusterSecretType,
}
}
Loading

0 comments on commit c3c5a53

Please sign in to comment.