Skip to content

Commit

Permalink
add vmc logic
Browse files Browse the repository at this point in the history
Signed-off-by: Yassine TIJANI <ytijani@vmware.com>
  • Loading branch information
yastij committed Aug 8, 2019
1 parent a8080af commit 54bc5b3
Show file tree
Hide file tree
Showing 3 changed files with 194 additions and 102 deletions.
216 changes: 181 additions & 35 deletions pkg/cloud/vsphere/actuators/cluster/actuator.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"fmt"
"net/url"
"strconv"
"strings"

"github.com/pkg/errors"

Expand All @@ -29,6 +30,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/klog/klogr"
elb "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/services/aws"
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1"
clientv1 "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1"
clusterErr "sigs.k8s.io/cluster-api/pkg/controller/error"
Expand All @@ -41,6 +43,7 @@ import (
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/context"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/services/certificates"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/services/kubeclient"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/services/kubeconfig"
)

//+kubebuilder:rbac:groups=vsphere.cluster.k8s.io,resources=vsphereclusterproviderspecs;vsphereclusterproviderstatuses,verbs=get;list;watch;create;update;patch;delete
Expand Down Expand Up @@ -91,11 +94,22 @@ func (a *Actuator) Reconcile(cluster *clusterv1.Cluster) (opErr error) {
return err
}

if err := a.reconcileCloudConfigSecret(ctx); err != nil {
isVMwareCloud := ctx.ClusterConfig.VmwareCloud != nil
if isVMwareCloud {
if err = a.reconcileLoadBalancers(ctx); err != nil {
return err
}
}

if err := a.reconcileKubeConfig(ctx); err != nil {
return err
}

if err := a.reconcileReadyState(ctx); err != nil {
if err := a.reconcileReadyState(ctx, isVMwareCloud); err != nil {
return err
}

if err := a.reconcileCloudConfigSecret(ctx); err != nil {
return err
}

Expand Down Expand Up @@ -130,6 +144,13 @@ func (a *Actuator) Delete(cluster *clusterv1.Cluster) (opErr error) {
return err
}

// Delete the Load balancer if we are using VMC
if ctx.ClusterConfig.VmwareCloud != nil {
if err := a.deleteLoadBalancer(ctx); err != nil {
return err
}
}

return nil
}

Expand All @@ -140,7 +161,7 @@ func (a *Actuator) reconcilePKI(ctx *context.ClusterContext) error {
return nil
}

func (a *Actuator) reconcileReadyState(ctx *context.ClusterContext) error {
func (a *Actuator) reconcileReadyState(ctx *context.ClusterContext, isVMwareCloud bool) error {

// Always recalculate the API Endpoints.
ctx.Cluster.Status.APIEndpoints = []clusterv1.APIEndpoint{}
Expand All @@ -163,45 +184,45 @@ func (a *Actuator) reconcileReadyState(ctx *context.ClusterContext) error {
ctx.Logger.Error(err, "unable to list nodes for target cluster")
return &clusterErr.RequeueAfterError{RequeueAfter: config.DefaultRequeue}
}
if isVMwareCloud {
// Get the RESTConfig in order to parse its Host to use as the control plane
// endpoint to add to the Cluster's API endpoints.
restConfig := client.RESTConfig()
if restConfig == nil {
ctx.Logger.Error(errors.New("restConfig == nil"), "error getting RESTConfig for kube client")
return &clusterErr.RequeueAfterError{RequeueAfter: config.DefaultRequeue}
}

// Get the RESTConfig in order to parse its Host to use as the control plane
// endpoint to add to the Cluster's API endpoints.
restConfig := client.RESTConfig()
if restConfig == nil {
ctx.Logger.Error(errors.New("restConfig == nil"), "error getting RESTConfig for kube client")
return &clusterErr.RequeueAfterError{RequeueAfter: config.DefaultRequeue}
}

// Calculate the API endpoint for the cluster.
controlPlaneEndpointURL, err := url.Parse(restConfig.Host)
if err != nil {
return errors.Wrapf(err, "unable to parse cluster's restConifg host value: %v", restConfig.Host)
}
// Calculate the API endpoint for the cluster.
controlPlaneEndpointURL, err := url.Parse(restConfig.Host)
if err != nil {
return errors.Wrapf(err, "unable to parse cluster's restConifg host value: %v", restConfig.Host)
}

// The API endpoint may just have a host.
apiEndpoint := clusterv1.APIEndpoint{
Host: controlPlaneEndpointURL.Hostname(),
}
// The API endpoint may just have a host.
apiEndpoint := clusterv1.APIEndpoint{
Host: controlPlaneEndpointURL.Hostname(),
}

// Check to see if there is also a port.
if szPort := controlPlaneEndpointURL.Port(); szPort != "" {
port, err := strconv.Atoi(szPort)
if err != nil {
return errors.Wrapf(err, "unable to get parse host and port for control plane endpoint %q for %q", controlPlaneEndpointURL.Host, ctx)
// Check to see if there is also a port.
if szPort := controlPlaneEndpointURL.Port(); szPort != "" {
port, err := strconv.Atoi(szPort)
if err != nil {
return errors.Wrapf(err, "unable to get parse host and port for control plane endpoint %q for %q", controlPlaneEndpointURL.Host, ctx)
}
apiEndpoint.Port = port
}
apiEndpoint.Port = port
}

// Update the API endpoints.
ctx.Cluster.Status.APIEndpoints = []clusterv1.APIEndpoint{apiEndpoint}
ctx.Logger.V(6).Info("calculated API endpoint for target cluster", "api-endpoint-host", apiEndpoint.Host, "api-endpoint-port", apiEndpoint.Port)
// Update the API endpoints.
ctx.Cluster.Status.APIEndpoints = []clusterv1.APIEndpoint{apiEndpoint}
ctx.Logger.V(6).Info("calculated API endpoint for target cluster", "api-endpoint-host", apiEndpoint.Host, "api-endpoint-port", apiEndpoint.Port)

// Update the kubeadm control plane endpoint with the one from the kubeconfig.
if ctx.ClusterConfig.ClusterConfiguration.ControlPlaneEndpoint != controlPlaneEndpointURL.Host {
ctx.ClusterConfig.ClusterConfiguration.ControlPlaneEndpoint = controlPlaneEndpointURL.Host
ctx.Logger.V(6).Info("stored control plane endpoint in kubeadm cluster config", "control-plane-endpoint", controlPlaneEndpointURL.Host)
// Update the kubeadm control plane endpoint with the one from the kubeconfig.
if ctx.ClusterConfig.ClusterConfiguration.ControlPlaneEndpoint != controlPlaneEndpointURL.Host {
ctx.ClusterConfig.ClusterConfiguration.ControlPlaneEndpoint = controlPlaneEndpointURL.Host
ctx.Logger.V(6).Info("stored control plane endpoint in kubeadm cluster config", "control-plane-endpoint", controlPlaneEndpointURL.Host)
}
}

// Update the ready status.
ctx.ClusterStatus.Ready = true

Expand Down Expand Up @@ -262,3 +283,128 @@ func (a *Actuator) deleteControlPlaneConfigMap(ctx *context.ClusterContext) erro
}
return nil
}

func (a *Actuator) deleteLoadBalancer(ctx *context.ClusterContext) error {
elbSvc := elb.New(ctx.ClusterConfig.VmwareCloud.AwsProvider.Region)
clusterName := ctx.ClusterName()

if err := elbSvc.Delete(clusterName); err != nil {
ctx.Logger.Error(err, "cannot delete load balancers")
return &clusterErr.RequeueAfterError{RequeueAfter: config.DefaultRequeue}
}
return nil
}

func (a *Actuator) reconcileLoadBalancers(ctx *context.ClusterContext) error {

ctx.Logger.V(2).Info("Reconciling load balancers")

clusterName := ctx.ClusterName()
controlPlaneMachines, err := ctx.GetControlPlaneMachines()
if err != nil {
ctx.Logger.Error(err, "error getting control plane machines")
return &clusterErr.RequeueAfterError{RequeueAfter: config.DefaultRequeue}
}
controlPlaneIPs := []string{}
for _, controlPlaneMachine := range controlPlaneMachines {
for _, nodeAddress := range controlPlaneMachine.Status.Addresses {
if nodeAddress.Type == apiv1.NodeExternalIP || nodeAddress.Type == apiv1.NodeInternalIP {
controlPlaneIPs = append(controlPlaneIPs, nodeAddress.Address)
break
}
}
}

awsProviderInfo := ctx.ClusterConfig.VmwareCloud.AwsProvider
elbSvc := elb.New(awsProviderInfo.Region)
vpcID := awsProviderInfo.VpcID
subnets := awsProviderInfo.Subnets
loadBalancerDNS, loadBalancerPort, err := elbSvc.Reconcile(vpcID, controlPlaneIPs, clusterName, subnets)
if err != nil {
ctx.Logger.Error(err, "cannot reconcile load balancer")
return &clusterErr.RequeueAfterError{RequeueAfter: config.DefaultRequeue}
}

apiEndpoint := clusterv1.APIEndpoint{
Host: loadBalancerDNS,
Port: loadBalancerPort,
}

// Update the API endpoints.
ctx.Cluster.Status.APIEndpoints = []clusterv1.APIEndpoint{apiEndpoint}
ctx.Logger.V(6).Info("calculated API endpoint for target cluster", "api-endpoint-host", apiEndpoint.Host, "api-endpoint-port", apiEndpoint.Port)
controlPlaneEndpointURL := strings.Join([]string{apiEndpoint.Host, strconv.Itoa(apiEndpoint.Port)}, ":")
// Update the kubeadm control plane endpoint with the one from the kubeconfig.
if ctx.ClusterConfig.ClusterConfiguration.ControlPlaneEndpoint != controlPlaneEndpointURL {
ctx.ClusterConfig.ClusterConfiguration.ControlPlaneEndpoint = controlPlaneEndpointURL
ctx.Logger.V(6).Info("stored control plane endpoint in kubeadm cluster config", "control-plane-endpoint", controlPlaneEndpointURL)
}

return nil

}

// reconcileKubeConfig creates a secret on the management cluster with
// the kubeconfig for target cluster.
func (a *Actuator) reconcileKubeConfig(ctx *context.ClusterContext) error {

// Get the control plane endpoint.
controlPlaneEndpoint, err := ctx.ControlPlaneEndpoint()
if err != nil {
ctx.Logger.Error(err, "requeueing until control plane endpoint is available")
return &clusterErr.RequeueAfterError{RequeueAfter: config.DefaultRequeue}
}

// Create a new kubeconfig for the target cluster.
ctx.Logger.V(6).Info("generating kubeconfig secret", "controlPlaneEndpoint", controlPlaneEndpoint)
kubeConfig, err := kubeconfig.New(ctx.Cluster.Name, controlPlaneEndpoint, ctx.ClusterConfig.CAKeyPair)
if err != nil {
return errors.Wrapf(err, "error generating kubeconfig for %q", ctx)
}

// Define the kubeconfig secret for the target cluster.
secret := &apiv1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: ctx.Cluster.Namespace,
Name: remotev1.KubeConfigSecretName(ctx.Cluster.Name),
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: ctx.Cluster.APIVersion,
Kind: ctx.Cluster.Kind,
Name: ctx.Cluster.Name,
UID: ctx.Cluster.UID,
},
},
},
StringData: map[string]string{
"value": kubeConfig,
},
}
ctx.Logger.V(6).Info("computed kubeconfig", "kubeconfig", kubeConfig)
if exstingSecret, err := a.coreClient.Secrets(ctx.Cluster.Namespace).Get(secret.Name, metav1.GetOptions{}); err != nil {
if apierrors.IsNotFound(err) {
if _, err := a.coreClient.Secrets(ctx.Cluster.Namespace).Create(secret); err != nil {
if !apierrors.IsAlreadyExists(err) {
ctx.Logger.Error(err, "error creating kubeconfig secret")
return &clusterErr.RequeueAfterError{RequeueAfter: config.DefaultRequeue}
}
ctx.Logger.V(6).Info("kubeconfig secret already exists")
} else {
ctx.Logger.V(4).Info("created kubeconfig secret")
}
} else {
ctx.Logger.Error(err, "cannot get the kubeconfig secret")
return &clusterErr.RequeueAfterError{RequeueAfter: config.DefaultRequeue}
}
} else {
secret.ResourceVersion = exstingSecret.ResourceVersion
updatedKubeConfig, err := a.coreClient.Secrets(ctx.Cluster.Namespace).Update(secret)
if err != nil {
ctx.Logger.Error(err, "cannot update the existing kubeconfig")
return &clusterErr.RequeueAfterError{RequeueAfter: config.DefaultRequeue}
}
ctx.Logger.V(6).Info("updated kubeconfig", "kubeconfig", updatedKubeConfig)
}
// Create the kubeconfig secret.
return nil
}
73 changes: 10 additions & 63 deletions pkg/cloud/vsphere/actuators/machine/actuator.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ import (
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1"
clientv1 "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1"
clusterErr "sigs.k8s.io/cluster-api/pkg/controller/error"
remotev1 "sigs.k8s.io/cluster-api/pkg/controller/remote"
controllerClient "sigs.k8s.io/controller-runtime/pkg/client"

"sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/actuators"
Expand All @@ -40,7 +39,6 @@ import (
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/context"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/services/govmomi"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/services/kubeclient"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/services/kubeconfig"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/tokens"
)

Expand Down Expand Up @@ -107,8 +105,8 @@ func (a *Actuator) Create(
if err := a.reconcilePKI(ctx); err != nil {
return err
}

return a.doInitOrJoin(ctx)
dependsOnEndpoint := ctx.ClusterConfig.VmwareCloud != nil
return a.doInitOrJoin(ctx, dependsOnEndpoint)
}

// Delete removes a machine.
Expand Down Expand Up @@ -177,10 +175,6 @@ func (a *Actuator) Update(
return err
}

if err := a.reconcileKubeConfig(ctx); err != nil {
return err
}

if err := a.reconcileReadyState(ctx); err != nil {
return err
}
Expand Down Expand Up @@ -229,7 +223,14 @@ func (a *Actuator) reconcilePKI(ctx *context.MachineContext) error {
return nil
}

func (a *Actuator) doInitOrJoin(ctx *context.MachineContext) error {
func (a *Actuator) doInitOrJoin(ctx *context.MachineContext, dependsOnEndpoint bool) error {
if dependsOnEndpoint {
_, err := ctx.ControlPlaneEndpoint()
if err != nil {
ctx.Logger.Error(err, "control plane endpoint not set")
return &clusterErr.RequeueAfterError{RequeueAfter: config.DefaultRequeue}
}
}

// Determine whether or not to initialize the control plane, join an
// existing control plane, or join the cluster as a worker node.
Expand Down Expand Up @@ -264,60 +265,6 @@ func (a *Actuator) doInitOrJoin(ctx *context.MachineContext) error {
return govmomi.Create(ctx, token)
}

// reconcileKubeConfig creates a secret on the management cluster with
// the kubeconfig for target cluster.
func (a *Actuator) reconcileKubeConfig(ctx *context.MachineContext) error {
if !ctx.HasControlPlaneRole() {
return nil
}

// Get the control plane endpoint.
controlPlaneEndpoint, err := ctx.ControlPlaneEndpoint()
if err != nil {
ctx.Logger.Error(err, "requeueing until control plane endpoint is available")
return &clusterErr.RequeueAfterError{RequeueAfter: config.DefaultRequeue}
}

// Create a new kubeconfig for the target cluster.
ctx.Logger.V(6).Info("generating kubeconfig secret")
kubeConfig, err := kubeconfig.New(ctx.Cluster.Name, controlPlaneEndpoint, ctx.ClusterConfig.CAKeyPair)
if err != nil {
return errors.Wrapf(err, "error generating kubeconfig for %q", ctx)
}

// Define the kubeconfig secret for the target cluster.
secret := &apiv1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: ctx.Cluster.Namespace,
Name: remotev1.KubeConfigSecretName(ctx.Cluster.Name),
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: ctx.Cluster.APIVersion,
Kind: ctx.Cluster.Kind,
Name: ctx.Cluster.Name,
UID: ctx.Cluster.UID,
},
},
},
StringData: map[string]string{
"value": kubeConfig,
},
}

// Create the kubeconfig secret.
if _, err := a.coreClient.Secrets(ctx.Cluster.Namespace).Create(secret); err != nil {
if !apierrors.IsAlreadyExists(err) {
ctx.Logger.Error(err, "error creating kubeconfig secret")
return &clusterErr.RequeueAfterError{RequeueAfter: config.DefaultRequeue}
}
ctx.Logger.V(6).Info("kubeconfig secret already exists")
} else {
ctx.Logger.V(4).Info("created kubeconfig secret")
}

return nil
}

// reconcileReadyState returns a requeue error until the machine appears
// in the target cluster's list of nodes.
func (a *Actuator) reconcileReadyState(ctx *context.MachineContext) error {
Expand Down
Loading

0 comments on commit 54bc5b3

Please sign in to comment.