Skip to content

Commit

Permalink
Address review comments
Browse files Browse the repository at this point in the history
  • Loading branch information
thunderboltsid committed Jul 25, 2024
1 parent 447cd6a commit ab38e14
Show file tree
Hide file tree
Showing 8 changed files with 56 additions and 54 deletions.
6 changes: 4 additions & 2 deletions api/v1beta1/conditions.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,11 @@ const (

const (
// PrismCentralClientCondition indicates the status of the client used to connect to Prism Central
PrismCentralClientCondition capiv1.ConditionType = "PrismClientInit"
PrismCentralClientCondition capiv1.ConditionType = "PrismClientInit"
PrismCentralV4ClientCondition capiv1.ConditionType = "PrismClientV4Init"

PrismCentralClientInitializationFailed = "PrismClientInitFailed"
PrismCentralClientInitializationFailed = "PrismClientInitFailed"
PrismCentralV4ClientInitializationFailed = "PrismClientV4InitFailed"
)

const (
Expand Down
28 changes: 14 additions & 14 deletions controllers/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,8 @@ const (
subnetTypeOverlay = "OVERLAY"

gpuUnused = "UNUSED"

pollingInterval = time.Second * 2
)

// DeleteVM deletes a VM and is invoked by the NutanixMachineReconciler
Expand Down Expand Up @@ -765,7 +767,7 @@ func GetFailureDomain(failureDomainName string, nutanixCluster *infrav1.NutanixC
return nil, fmt.Errorf("failed to find failure domain %s on nutanix cluster object", failureDomainName)
}

func getPrismCentralV3ClientForCluster(ctx context.Context, cluster *infrav1.NutanixCluster, secretInformer v1.SecretInformer, mapInformer v1.ConfigMapInformer) (*prismclientv3.Client, error) {
func getPrismCentralClientForCluster(ctx context.Context, cluster *infrav1.NutanixCluster, secretInformer v1.SecretInformer, mapInformer v1.ConfigMapInformer) (*prismclientv3.Client, error) {
log := ctrl.LoggerFrom(ctx)

clientHelper := nutanixclient.NewHelper(secretInformer, mapInformer)
Expand All @@ -776,18 +778,18 @@ func getPrismCentralV3ClientForCluster(ctx context.Context, cluster *infrav1.Nut
return nil, err
}

client, err := nutanixclient.NutanixClientCacheV3.GetOrCreate(&nutanixclient.CacheParams{
v3Client, err := nutanixclient.NutanixClientCache.GetOrCreate(&nutanixclient.CacheParams{
NutanixCluster: cluster,
PrismManagementEndpoint: managementEndpoint,
})
if err != nil {
log.Error(err, "error occurred while getting nutanix prism client from cache")
log.Error(err, "error occurred while getting nutanix prism v3 Client from cache")
conditions.MarkFalse(cluster, infrav1.PrismCentralClientCondition, infrav1.PrismCentralClientInitializationFailed, capiv1.ConditionSeverityError, err.Error())
return nil, fmt.Errorf("nutanix prism client error: %w", err)
return nil, fmt.Errorf("nutanix prism v3 Client error: %w", err)
}

conditions.MarkTrue(cluster, infrav1.PrismCentralClientCondition)
return client, nil
return v3Client, nil
}

Check warning on line 794 in controllers/helpers.go

View check run for this annotation

Codecov / codecov/patch

controllers/helpers.go#L794

Added line #L794 was not covered by tests
func getPrismCentralV4ClientForCluster(ctx context.Context, cluster *infrav1.NutanixCluster, secretInformer v1.SecretInformer, mapInformer v1.ConfigMapInformer) (*prismclientv4.Client, error) {
Expand All @@ -797,7 +799,7 @@ func getPrismCentralV4ClientForCluster(ctx context.Context, cluster *infrav1.Nut
managementEndpoint, err := clientHelper.BuildManagementEndpoint(ctx, cluster)
if err != nil {
log.Error(err, fmt.Sprintf("error occurred while getting management endpoint for cluster %q", cluster.GetNamespacedName()))

Check warning on line 801 in controllers/helpers.go

View check run for this annotation

Codecov / codecov/patch

controllers/helpers.go#L796-L801

Added lines #L796 - L801 were not covered by tests
conditions.MarkFalse(cluster, infrav1.PrismCentralClientCondition, infrav1.PrismCentralClientInitializationFailed, capiv1.ConditionSeverityError, err.Error())
conditions.MarkFalse(cluster, infrav1.PrismCentralV4ClientCondition, infrav1.PrismCentralV4ClientInitializationFailed, capiv1.ConditionSeverityError, err.Error())
return nil, err
}

Check warning on line 804 in controllers/helpers.go

View check run for this annotation

Codecov / codecov/patch

controllers/helpers.go#L804

Added line #L804 was not covered by tests

Expand All @@ -806,12 +808,12 @@ func getPrismCentralV4ClientForCluster(ctx context.Context, cluster *infrav1.Nut
PrismManagementEndpoint: managementEndpoint,
})
if err != nil {
log.Error(err, "error occurred while getting nutanix prism client from cache")
conditions.MarkFalse(cluster, infrav1.PrismCentralClientCondition, infrav1.PrismCentralClientInitializationFailed, capiv1.ConditionSeverityError, err.Error())
return nil, fmt.Errorf("nutanix prism client error: %w", err)
log.Error(err, "error occurred while getting nutanix prism v4 client from cache")

Check warning on line 811 in controllers/helpers.go

View check run for this annotation

Codecov / codecov/patch

controllers/helpers.go#L808-L811

Added lines #L808 - L811 were not covered by tests
conditions.MarkFalse(cluster, infrav1.PrismCentralV4ClientCondition, infrav1.PrismCentralV4ClientInitializationFailed, capiv1.ConditionSeverityError, err.Error())
return nil, fmt.Errorf("nutanix prism v4 client error: %w", err)
}

Check warning on line 815 in controllers/helpers.go

View check run for this annotation

Codecov / codecov/patch

controllers/helpers.go#L814-L815

Added lines #L814 - L815 were not covered by tests
conditions.MarkTrue(cluster, infrav1.PrismCentralClientCondition)
conditions.MarkTrue(cluster, infrav1.PrismCentralV4ClientCondition)
return client, nil
}

Expand All @@ -829,7 +831,7 @@ func isPrismCentralV4Compatible(ctx context.Context, v3Client *prismclientv3.Cli
// We can check if the version is greater than or equal to 2024

Check warning on line 832 in controllers/helpers.go

View check run for this annotation

Codecov / codecov/patch

controllers/helpers.go#L831-L832

Added lines #L831 - L832 were not covered by tests
if pcVersion == "" {
return false, errors.New("version is empty")
return false, errors.New("prism central version is empty")
}

Check warning on line 835 in controllers/helpers.go

View check run for this annotation

Codecov / codecov/patch

controllers/helpers.go#L835

Added line #L835 was not covered by tests

for _, internalPCName := range internalPCNames {
Expand Down Expand Up @@ -899,8 +901,6 @@ func detachVolumeGroupsFromVM(ctx context.Context, v4Client *prismclientv4.Clien
volumeGroupsToDetach = append(volumeGroupsToDetach, *backingInfo.VolumeGroupExtId)
}

Check warning on line 902 in controllers/helpers.go

View check run for this annotation

Codecov / codecov/patch

controllers/helpers.go#L902

Added line #L902 was not covered by tests

log.Info(fmt.Sprintf("detaching %d volume groups from virtual machine %s", len(volumeGroupsToDetach), vmUUID))

// Detach the volume groups from the virtual machine
for _, volumeGroup := range volumeGroupsToDetach {
log.Info(fmt.Sprintf("detaching volume group %s from virtual machine %s", volumeGroup, vmUUID))
Expand Down Expand Up @@ -935,7 +935,7 @@ func waitForTaskCompletionV4(ctx context.Context, v4Client *prismclientv4.Client

if err := wait.PollUntilContextCancel(

Check warning on line 936 in controllers/helpers.go

View check run for this annotation

Codecov / codecov/patch

controllers/helpers.go#L936

Added line #L936 was not covered by tests
ctx,
100*time.Millisecond,
pollingInterval,
true,
func(ctx context.Context) (done bool, err error) {
task, err := v4Client.TasksApiInstance.GetTaskById(utils.StringPtr(taskID))
Expand Down
12 changes: 6 additions & 6 deletions controllers/helpers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ func TestGetPrismCentralClientForCluster(t *testing.T) {
mapInformer := mockk8sclient.NewMockConfigMapInformer(ctrl)
secretInformer.EXPECT().Lister().Return(secretLister)

_, err := getPrismCentralV3ClientForCluster(ctx, cluster, secretInformer, mapInformer)
_, err := getPrismCentralClientForCluster(ctx, cluster, secretInformer, mapInformer)
assert.Error(t, err)
})

Expand Down Expand Up @@ -187,20 +187,20 @@ func TestGetPrismCentralClientForCluster(t *testing.T) {
mapInformer := mockk8sclient.NewMockConfigMapInformer(ctrl)
secretInformer.EXPECT().Lister().Return(secretLister)

_, err = getPrismCentralV3ClientForCluster(ctx, cluster, secretInformer, mapInformer)
_, err = getPrismCentralClientForCluster(ctx, cluster, secretInformer, mapInformer)
assert.Error(t, err)
})

t.Run("GetOrCreate succeeds", func(t *testing.T) {
ctrl := gomock.NewController(t)

oldNutanixClientCache := nutanixclient.NutanixClientCacheV3
oldNutanixClientCache := nutanixclient.NutanixClientCache
defer func() {
nutanixclient.NutanixClientCacheV3 = oldNutanixClientCache
nutanixclient.NutanixClientCache = oldNutanixClientCache
}()

// Create a new client cache with session auth disabled to avoid network calls in tests
nutanixclient.NutanixClientCacheV3 = prismclientv3.NewClientCache()
nutanixclient.NutanixClientCache = prismclientv3.NewClientCache()

creds := []credentialtypes.Credential{
{
Expand All @@ -225,7 +225,7 @@ func TestGetPrismCentralClientForCluster(t *testing.T) {
mapInformer := mockk8sclient.NewMockConfigMapInformer(ctrl)
secretInformer.EXPECT().Lister().Return(secretLister)

_, err = getPrismCentralV3ClientForCluster(ctx, cluster, secretInformer, mapInformer)
_, err = getPrismCentralClientForCluster(ctx, cluster, secretInformer, mapInformer)
assert.NoError(t, err)
})
}
16 changes: 8 additions & 8 deletions controllers/nutanixcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -189,17 +189,17 @@ func (r *NutanixClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reque
return reconcile.Result{}, err
}

v3Client, err := getPrismCentralV3ClientForCluster(ctx, cluster, r.SecretInformer, r.ConfigMapInformer)
v3Client, err := getPrismCentralClientForCluster(ctx, cluster, r.SecretInformer, r.ConfigMapInformer)
if err != nil {
log.Error(err, "error occurred while fetching prism central client")
return reconcile.Result{}, err
}

rctx := &nctx.ClusterContext{
Context: ctx,
Cluster: capiCluster,
NutanixCluster: cluster,
NutanixClientV3: v3Client,
Context: ctx,
Cluster: capiCluster,
NutanixCluster: cluster,
NutanixClient: v3Client,
}

createV4Client, err := isPrismCentralV4Compatible(ctx, v3Client)
Expand Down Expand Up @@ -251,7 +251,7 @@ func (r *NutanixClusterReconciler) reconcileDelete(rctx *nctx.ClusterContext) (r

// delete the client from the cache
log.Info(fmt.Sprintf("deleting nutanix prism client for cluster %s from cache", rctx.NutanixCluster.GetNamespacedName()))
nutanixclient.NutanixClientCacheV3.Delete(&nutanixclient.CacheParams{NutanixCluster: rctx.NutanixCluster})
nutanixclient.NutanixClientCache.Delete(&nutanixclient.CacheParams{NutanixCluster: rctx.NutanixCluster})

Check warning on line 254 in controllers/nutanixcluster_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/nutanixcluster_controller.go#L254

Added line #L254 was not covered by tests

if err := r.reconcileCredentialRefDelete(rctx.Context, rctx.NutanixCluster); err != nil {
log.Error(err, fmt.Sprintf("error occurred while reconciling credential ref deletion for cluster %s", rctx.Cluster.Name))
Expand Down Expand Up @@ -334,7 +334,7 @@ func (r *NutanixClusterReconciler) reconcileCategories(rctx *nctx.ClusterContext
log := ctrl.LoggerFrom(rctx.Context)
log.Info("Reconciling categories for cluster")
defaultCategories := GetDefaultCAPICategoryIdentifiers(rctx.Cluster.Name)
_, err := GetOrCreateCategories(rctx.Context, rctx.NutanixClientV3, defaultCategories)
_, err := GetOrCreateCategories(rctx.Context, rctx.NutanixClient, defaultCategories)
if err != nil {
conditions.MarkFalse(rctx.NutanixCluster, infrav1.ClusterCategoryCreatedCondition, infrav1.ClusterCategoryCreationFailed, capiv1.ConditionSeverityError, err.Error())
return err
Expand All @@ -350,7 +350,7 @@ func (r *NutanixClusterReconciler) reconcileCategoriesDelete(rctx *nctx.ClusterC
conditions.GetReason(rctx.NutanixCluster, infrav1.ClusterCategoryCreatedCondition) == infrav1.DeletionFailed {
defaultCategories := GetDefaultCAPICategoryIdentifiers(rctx.Cluster.Name)
obsoleteCategories := GetObsoleteDefaultCAPICategoryIdentifiers(rctx.Cluster.Name)
err := DeleteCategories(rctx.Context, rctx.NutanixClientV3, defaultCategories, obsoleteCategories)
err := DeleteCategories(rctx.Context, rctx.NutanixClient, defaultCategories, obsoleteCategories)
if err != nil {
conditions.MarkFalse(rctx.NutanixCluster, infrav1.ClusterCategoryCreatedCondition, infrav1.DeletionFailed, capiv1.ConditionSeverityWarning, err.Error())
return err
Expand Down
36 changes: 18 additions & 18 deletions controllers/nutanixmachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -260,19 +260,19 @@ func (r *NutanixMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque
return ctrl.Result{Requeue: true}, nil
}

v3Client, err := getPrismCentralV3ClientForCluster(ctx, ntxCluster, r.SecretInformer, r.ConfigMapInformer)
v3Client, err := getPrismCentralClientForCluster(ctx, ntxCluster, r.SecretInformer, r.ConfigMapInformer)
if err != nil {
log.Error(err, "error occurred while fetching prism central client")
return reconcile.Result{}, err
}

rctx := &nctx.MachineContext{
Context: ctx,
Cluster: cluster,
Machine: machine,
NutanixCluster: ntxCluster,
NutanixMachine: ntxMachine,
NutanixClientV3: v3Client,
Context: ctx,
Cluster: cluster,
Machine: machine,
NutanixCluster: ntxCluster,
NutanixMachine: ntxMachine,
NutanixClient: v3Client,
}

createV4Client, err := isPrismCentralV4Compatible(ctx, v3Client)
Expand Down Expand Up @@ -317,7 +317,7 @@ func (r *NutanixMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque
func (r *NutanixMachineReconciler) reconcileDelete(rctx *nctx.MachineContext) (reconcile.Result, error) {
ctx := rctx.Context
log := ctrl.LoggerFrom(ctx)
v3Client := rctx.NutanixClientV3
v3Client := rctx.NutanixClient

Check warning on line 320 in controllers/nutanixmachine_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/nutanixmachine_controller.go#L320

Added line #L320 was not covered by tests
vmName := rctx.Machine.Name
log.Info(fmt.Sprintf("Handling deletion of VM: %s", vmName))
conditions.MarkFalse(rctx.NutanixMachine, infrav1.VMProvisionedCondition, capiv1.DeletingReason, capiv1.ConditionSeverityInfo, "")
Expand Down Expand Up @@ -362,7 +362,7 @@ func (r *NutanixMachineReconciler) reconcileDelete(rctx *nctx.MachineContext) (r
}
if lastTaskUUID != "" {
log.Info(fmt.Sprintf("checking if VM %s with UUID %s has in progress tasks", vmName, vmUUID))
taskInProgress, err := HasTaskInProgress(ctx, rctx.NutanixClientV3, lastTaskUUID)
taskInProgress, err := HasTaskInProgress(ctx, rctx.NutanixClient, lastTaskUUID)
if err != nil {
log.Error(err, fmt.Sprintf("error occurred while checking task %s for VM %s. Trying to delete VM", lastTaskUUID, vmName))
}
Expand Down Expand Up @@ -541,7 +541,7 @@ func (r *NutanixMachineReconciler) getOrCreateVM(rctx *nctx.MachineContext) (*pr
ctx := rctx.Context
log := ctrl.LoggerFrom(ctx)
vmName := rctx.Machine.Name
v3Client := rctx.NutanixClientV3
v3Client := rctx.NutanixClient

Check warning on line 544 in controllers/nutanixmachine_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/nutanixmachine_controller.go#L544

Added line #L544 was not covered by tests

// Check if the VM already exists
vm, err = FindVM(ctx, v3Client, rctx.NutanixMachine, vmName)

Check warning on line 547 in controllers/nutanixmachine_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/nutanixmachine_controller.go#L547

Added line #L547 was not covered by tests
Expand Down Expand Up @@ -755,7 +755,7 @@ func getDiskList(rctx *nctx.MachineContext) ([]*prismclientv3.VMDisk, error) {

func getSystemDisk(rctx *nctx.MachineContext) (*prismclientv3.VMDisk, error) {
nodeOSImageName := rctx.NutanixMachine.Spec.Image.Name
nodeOSImageUUID, err := GetImageUUID(rctx.Context, rctx.NutanixClientV3, nodeOSImageName, rctx.NutanixMachine.Spec.Image.UUID)
nodeOSImageUUID, err := GetImageUUID(rctx.Context, rctx.NutanixClient, nodeOSImageName, rctx.NutanixMachine.Spec.Image.UUID)
if err != nil {
errorMsg := fmt.Errorf("failed to get the image UUID for image named %q: %w", *nodeOSImageName, err)
rctx.SetFailureStatus(capierrors.CreateMachineError, errorMsg)
Expand All @@ -775,7 +775,7 @@ func getSystemDisk(rctx *nctx.MachineContext) (*prismclientv3.VMDisk, error) {

func getBootstrapDisk(rctx *nctx.MachineContext) (*prismclientv3.VMDisk, error) {
bootstrapImageName := rctx.NutanixMachine.Spec.BootstrapRef.Name
bootstrapImageUUID, err := GetImageUUID(rctx.Context, rctx.NutanixClientV3, &bootstrapImageName, nil)
bootstrapImageUUID, err := GetImageUUID(rctx.Context, rctx.NutanixClient, &bootstrapImageName, nil)
if err != nil {
errorMsg := fmt.Errorf("failed to get the image UUID for image named %q: %w", bootstrapImageName, err)
rctx.SetFailureStatus(capierrors.CreateMachineError, errorMsg)
Expand Down Expand Up @@ -872,7 +872,7 @@ func (r *NutanixMachineReconciler) getMachineCategoryIdentifiers(rctx *nctx.Mach
categoryIdentifiers := GetDefaultCAPICategoryIdentifiers(rctx.Cluster.Name)
// Only try to create default categories. ignoring error so that we can return all including
// additionalCategories as well
_, err := GetOrCreateCategories(rctx.Context, rctx.NutanixClientV3, categoryIdentifiers)
_, err := GetOrCreateCategories(rctx.Context, rctx.NutanixClient, categoryIdentifiers)
if err != nil {
log.Error(err, "Failed to getOrCreateCategories")
}
Expand Down Expand Up @@ -925,7 +925,7 @@ func (r *NutanixMachineReconciler) addVMToProject(rctx *nctx.MachineContext, vmM
return errorMsg
}

projectUUID, err := GetProjectUUID(rctx.Context, rctx.NutanixClientV3, projectRef.Name, projectRef.UUID)
projectUUID, err := GetProjectUUID(rctx.Context, rctx.NutanixClient, projectRef.Name, projectRef.UUID)
if err != nil {
errorMsg := fmt.Errorf("error occurred while searching for project for VM %s: %v", vmName, err)
log.Error(errorMsg, "error occurred while searching for project")
Expand Down Expand Up @@ -954,11 +954,11 @@ func (r *NutanixMachineReconciler) GetSubnetAndPEUUIDs(rctx *nctx.MachineContext
if len(rctx.NutanixMachine.Spec.Subnets) == 0 {
return "", nil, fmt.Errorf("subnets must be passed if failure domain is not configured")
}
peUUID, err := GetPEUUID(rctx.Context, rctx.NutanixClientV3, rctx.NutanixMachine.Spec.Cluster.Name, rctx.NutanixMachine.Spec.Cluster.UUID)
peUUID, err := GetPEUUID(rctx.Context, rctx.NutanixClient, rctx.NutanixMachine.Spec.Cluster.Name, rctx.NutanixMachine.Spec.Cluster.UUID)
if err != nil {
return "", nil, err
}
subnetUUIDs, err := GetSubnetUUIDList(rctx.Context, rctx.NutanixClientV3, rctx.NutanixMachine.Spec.Subnets, peUUID)
subnetUUIDs, err := GetSubnetUUIDList(rctx.Context, rctx.NutanixClient, rctx.NutanixMachine.Spec.Subnets, peUUID)
if err != nil {
return "", nil, err
}
Expand All @@ -972,11 +972,11 @@ func (r *NutanixMachineReconciler) GetSubnetAndPEUUIDs(rctx *nctx.MachineContext
if err != nil {
return "", nil, fmt.Errorf("failed to find failure domain %s", failureDomainName)
}
peUUID, err := GetPEUUID(rctx.Context, rctx.NutanixClientV3, failureDomain.Cluster.Name, failureDomain.Cluster.UUID)
peUUID, err := GetPEUUID(rctx.Context, rctx.NutanixClient, failureDomain.Cluster.Name, failureDomain.Cluster.UUID)
if err != nil {
return "", nil, fmt.Errorf("failed to find prism element uuid for failure domain %s", failureDomainName)
}
subnetUUIDs, err := GetSubnetUUIDList(rctx.Context, rctx.NutanixClientV3, failureDomain.Subnets, peUUID)
subnetUUIDs, err := GetSubnetUUIDList(rctx.Context, rctx.NutanixClient, failureDomain.Subnets, peUUID)
if err != nil {
return "", nil, fmt.Errorf("failed to find subnet uuids for failure domain %s", failureDomainName)
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/client/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ import (
"github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1beta1"
)

// NutanixClientCacheV3 is the cache of prism clients to be shared across the different controllers
var NutanixClientCacheV3 = v3.NewClientCache(v3.WithSessionAuth(true))
// NutanixClientCache is the cache of prism clients to be shared across the different controllers
var NutanixClientCache = v3.NewClientCache(v3.WithSessionAuth(true))

// NutanixClientCacheV4 is the cache of prism clients to be shared across the different controllers
var NutanixClientCacheV4 = v4.NewClientCache(v4.WithSessionAuth(true))
Expand Down
4 changes: 2 additions & 2 deletions pkg/client/cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,6 @@ func TestCacheParamsManagementEndpoint(t *testing.T) {
}

func TestNutanixClientCache(t *testing.T) {
assert.NotNil(t, NutanixClientCacheV3)
assert.IsType(t, &v3.ClientCache{}, NutanixClientCacheV3)
assert.NotNil(t, NutanixClientCache)
assert.IsType(t, &v3.ClientCache{}, NutanixClientCache)
}
4 changes: 2 additions & 2 deletions pkg/context/context.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ var (
// ClusterContext is a context used with a NutanixCluster reconciler
type ClusterContext struct {
Context context.Context
NutanixClientV3 *prismclientv3.Client
NutanixClient *prismclientv3.Client
NutanixClientV4 *prismclientv4.Client

Cluster *capiv1.Cluster
Expand All @@ -51,7 +51,7 @@ type ClusterContext struct {
// MachineContext is a context used with a NutanixMachine reconciler
type MachineContext struct {
Context context.Context
NutanixClientV3 *prismclientv3.Client
NutanixClient *prismclientv3.Client
NutanixClientV4 *prismclientv4.Client

Cluster *capiv1.Cluster
Expand Down

0 comments on commit ab38e14

Please sign in to comment.