diff --git a/simulator/cluster_compute_resource_test.go b/simulator/cluster_compute_resource_test.go index 9ff0ad119..915b3443e 100644 --- a/simulator/cluster_compute_resource_test.go +++ b/simulator/cluster_compute_resource_test.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -259,7 +259,7 @@ func TestPlaceVmRelocate(t *testing.T) { "InvalidArgument", }, { - "relocate with a diskId in spec.dick that does not exist in the vm", + "relocate with a diskId in spec.disk that does not exist in the vm", &types.VirtualMachineRelocateSpec{ Host: &hostMoRef, Disk: []types.VirtualMachineRelocateSpecDiskLocator{ diff --git a/simulator/folder.go b/simulator/folder.go index bd97e6b4c..dcf2dfa75 100644 --- a/simulator/folder.go +++ b/simulator/folder.go @@ -772,6 +772,274 @@ func (f *Folder) DestroyTask(ctx *Context, req *types.Destroy_Task) soap.HasFaul } } +func addPlacementFault(body *methods.PlaceVmsXClusterBody, vmName string, vmRef *types.ManagedObjectReference, poolRef types.ManagedObjectReference) { + faults := types.PlaceVmsXClusterResultPlacementFaults{ + VmName: vmName, + ResourcePool: poolRef, + Vm: vmRef, + Faults: []types.LocalizedMethodFault{ + { + Fault: &types.GenericDrsFault{}, + }, + }, + } + body.Res.Returnval.Faults = append(body.Res.Returnval.Faults, faults) +} + +func generateInitialPlacementAction(ctx *Context, inputConfigSpec *types.VirtualMachineConfigSpec, pool *ResourcePool, + cluster *ClusterComputeResource, hostRequired, datastoreRequired bool) *types.ClusterClusterInitialPlacementAction { + var configSpec *types.VirtualMachineConfigSpec + + placementAction := types.ClusterClusterInitialPlacementAction{ + Pool: pool.Self, + } + + if hostRequired { + randomHost := cluster.Host[rand.Intn(len(cluster.Host))] + placementAction.TargetHost = &randomHost + } + + if datastoreRequired { + configSpec = inputConfigSpec + + // TODO: This is just an initial implementation aimed at returning some data but it is not + // necessarily fully consistent, like we should ensure the host, if also required, has the + // datastore mounted. + ds := ctx.Map.Get(cluster.Datastore[rand.Intn(len(cluster.Datastore))]).(*Datastore) + + if configSpec.Files == nil { + configSpec.Files = new(types.VirtualMachineFileInfo) + } + configSpec.Files.VmPathName = fmt.Sprintf("[%[1]s] %[2]s/%[2]s.vmx", ds.Name, inputConfigSpec.Name) + + for _, change := range configSpec.DeviceChange { + dspec := change.GetVirtualDeviceConfigSpec() + + if dspec.FileOperation != types.VirtualDeviceConfigSpecFileOperationCreate { + continue + } + + switch dspec.Operation { + case types.VirtualDeviceConfigSpecOperationAdd: + device := dspec.Device + d := device.GetVirtualDevice() + + switch device.(type) { + case *types.VirtualDisk: + switch b := d.Backing.(type) { + case types.BaseVirtualDeviceFileBackingInfo: + info := b.GetVirtualDeviceFileBackingInfo() + info.Datastore = types.NewReference(ds.Reference()) + + var dsPath object.DatastorePath + if dsPath.FromString(info.FileName) { + dsPath.Datastore = ds.Name + info.FileName = dsPath.String() + } + } + } + } + } + } + + placementAction.ConfigSpec = configSpec + return &placementAction +} + +func generateRecommendationForRelocate(ctx *Context, req *types.PlaceVmsXCluster) *methods.PlaceVmsXClusterBody { + + pools := req.PlacementSpec.ResourcePools + specs := req.PlacementSpec.VmPlacementSpecs + + body := new(methods.PlaceVmsXClusterBody) + body.Res = new(types.PlaceVmsXClusterResponse) + hostRequired := req.PlacementSpec.HostRecommRequired != nil && *req.PlacementSpec.HostRecommRequired + datastoreRequired := req.PlacementSpec.DatastoreRecommRequired != nil && *req.PlacementSpec.DatastoreRecommRequired + + for _, spec := range specs { + + // The RelocateSpec must be set. + if spec.RelocateSpec == nil { + body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "relocateSpec"}) + return body + } + + // The VM Reference must be set. + if spec.Vm == nil { + body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "vm"}) + return body + } + + vmRef := ctx.Map.Get(*spec.Vm) + if vmRef == nil { + body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "vm"}) + return body + } + + vm := vmRef.(*VirtualMachine) + pool := ctx.Map.Get(pools[rand.Intn(len(pools))]).(*ResourcePool) + cluster := ctx.Map.Get(pool.Owner).(*ClusterComputeResource) + + if len(cluster.Host) == 0 { + addPlacementFault(body, spec.ConfigSpec.Name, &vm.Self, pool.Self) + continue + } + + reco := types.ClusterRecommendation{ + Key: "1", + Type: "V1", + Time: time.Now(), + Rating: 1, + Reason: string(types.RecommendationReasonCodeXClusterPlacement), + ReasonText: string(types.RecommendationReasonCodeXClusterPlacement), + Target: &cluster.Self, + } + + placementAction := generateInitialPlacementAction(ctx, &spec.ConfigSpec, pool, cluster, hostRequired, datastoreRequired) + + reco.Action = append(reco.Action, placementAction) + + body.Res.Returnval.PlacementInfos = append(body.Res.Returnval.PlacementInfos, + types.PlaceVmsXClusterResultPlacementInfo{ + VmName: vm.Name, + Recommendation: reco, + Vm: &vm.Self, + }, + ) + } + return body +} + +func generateRecommendationForReconfigure(ctx *Context, req *types.PlaceVmsXCluster) *methods.PlaceVmsXClusterBody { + + pools := req.PlacementSpec.ResourcePools + specs := req.PlacementSpec.VmPlacementSpecs + + body := new(methods.PlaceVmsXClusterBody) + body.Res = new(types.PlaceVmsXClusterResponse) + hostRequired := req.PlacementSpec.HostRecommRequired != nil && *req.PlacementSpec.HostRecommRequired + datastoreRequired := req.PlacementSpec.DatastoreRecommRequired != nil && *req.PlacementSpec.DatastoreRecommRequired + + for _, spec := range specs { + + // Only a single pool must be set + if len(pools) != 1 { + body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "resourcePools"}) + return body + } + + // The RelocateSpec must not be set. + if spec.RelocateSpec != nil { + body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "relocateSpec"}) + return body + } + + // The VM Reference must be set. + if spec.Vm == nil { + body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "vm"}) + return body + } + + vmRef := ctx.Map.Get(*spec.Vm) + if vmRef == nil { + body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "vm"}) + return body + } + + vm := vmRef.(*VirtualMachine) + + // Use VM's current host + host := Map.Get(vm.Runtime.Host.Reference()).(*HostSystem) + + if host.Parent.Type != "ClusterComputeResource" { + addPlacementFault(body, spec.ConfigSpec.Name, &vm.Self, host.Self) + continue + } + + cluster := ctx.Map.Get(*host.Parent).(*ClusterComputeResource) + pool := ctx.Map.Get(*cluster.ResourcePool).(*ResourcePool) + + reco := types.ClusterRecommendation{ + Key: "1", + Type: "V1", + Time: time.Now(), + Rating: 1, + Reason: string(types.RecommendationReasonCodeXClusterPlacement), + ReasonText: string(types.RecommendationReasonCodeXClusterPlacement), + Target: &cluster.Self, + } + + placementAction := generateInitialPlacementAction(ctx, &spec.ConfigSpec, pool, cluster, hostRequired, datastoreRequired) + + reco.Action = append(reco.Action, placementAction) + + body.Res.Returnval.PlacementInfos = append(body.Res.Returnval.PlacementInfos, + types.PlaceVmsXClusterResultPlacementInfo{ + VmName: vm.Name, + Recommendation: reco, + Vm: &vm.Self, + }, + ) + } + return body +} + +func generateRecommendationForCreateAndPowerOn(ctx *Context, req *types.PlaceVmsXCluster) *methods.PlaceVmsXClusterBody { + + pools := req.PlacementSpec.ResourcePools + specs := req.PlacementSpec.VmPlacementSpecs + + body := new(methods.PlaceVmsXClusterBody) + body.Res = new(types.PlaceVmsXClusterResponse) + hostRequired := req.PlacementSpec.HostRecommRequired != nil && *req.PlacementSpec.HostRecommRequired + datastoreRequired := req.PlacementSpec.DatastoreRecommRequired != nil && *req.PlacementSpec.DatastoreRecommRequired + + for _, spec := range specs { + + // The RelocateSpec must not be set. + if spec.RelocateSpec != nil { + body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "relocateSpec"}) + return body + } + + // The name in the ConfigSpec must set. + if spec.ConfigSpec.Name == "" { + body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "configSpec.name"}) + return body + } + + pool := ctx.Map.Get(pools[rand.Intn(len(pools))]).(*ResourcePool) + cluster := ctx.Map.Get(pool.Owner).(*ClusterComputeResource) + + if len(cluster.Host) == 0 { + addPlacementFault(body, spec.ConfigSpec.Name, nil, pool.Self) + continue + } + + reco := types.ClusterRecommendation{ + Key: "1", + Type: "V1", + Time: time.Now(), + Rating: 1, + Reason: string(types.RecommendationReasonCodeXClusterPlacement), + ReasonText: string(types.RecommendationReasonCodeXClusterPlacement), + Target: &cluster.Self, + } + + placementAction := generateInitialPlacementAction(ctx, &spec.ConfigSpec, pool, cluster, hostRequired, datastoreRequired) + + reco.Action = append(reco.Action, placementAction) + + body.Res.Returnval.PlacementInfos = append(body.Res.Returnval.PlacementInfos, + types.PlaceVmsXClusterResultPlacementInfo{ + VmName: spec.ConfigSpec.Name, + Recommendation: reco, + }, + ) + } + return body +} + func (f *Folder) PlaceVmsXCluster(ctx *Context, req *types.PlaceVmsXCluster) soap.HasFault { body := new(methods.PlaceVmsXClusterBody) @@ -805,115 +1073,28 @@ func (f *Folder) PlaceVmsXCluster(ctx *Context, req *types.PlaceVmsXCluster) soa clusters[pool.Owner] = struct{}{} } - // MVP: Only a single VM is supported. + // MVP: Only a single VM placement spec is supported. if len(specs) != 1 { body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "vmPlacementSpecs"}) return body } - for _, spec := range specs { - if spec.ConfigSpec.Name == "" { - body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "configSpec.name"}) - return body - } - } - - body.Res = new(types.PlaceVmsXClusterResponse) - hostRequired := req.PlacementSpec.HostRecommRequired != nil && *req.PlacementSpec.HostRecommRequired - datastoreRequired := req.PlacementSpec.DatastoreRecommRequired != nil && *req.PlacementSpec.DatastoreRecommRequired - - for _, spec := range specs { - pool := ctx.Map.Get(pools[rand.Intn(len(pools))]).(*ResourcePool) - cluster := ctx.Map.Get(pool.Owner).(*ClusterComputeResource) + placementType := types.PlaceVmsXClusterSpecPlacementType(req.PlacementSpec.PlacementType) - if len(cluster.Host) == 0 { - faults := types.PlaceVmsXClusterResultPlacementFaults{ - VmName: spec.ConfigSpec.Name, - ResourcePool: pool.Self, - Faults: []types.LocalizedMethodFault{ - { - Fault: &types.GenericDrsFault{}, - }, - }, - } - body.Res.Returnval.Faults = append(body.Res.Returnval.Faults, faults) - } else { - var configSpec *types.VirtualMachineConfigSpec - - res := types.ClusterRecommendation{ - Key: "1", - Type: "V1", - Time: time.Now(), - Rating: 1, - Reason: string(types.RecommendationReasonCodeXClusterPlacement), - ReasonText: string(types.RecommendationReasonCodeXClusterPlacement), - Target: &cluster.Self, - } - - placementAction := types.ClusterClusterInitialPlacementAction{ - Pool: pool.Self, - } - - if hostRequired { - randomHost := cluster.Host[rand.Intn(len(cluster.Host))] - placementAction.TargetHost = &randomHost - } - - if datastoreRequired { - configSpec = &spec.ConfigSpec - - // TODO: This is just an initial implementation aimed at returning some data but it is not - // necessarily fully consistent, like we should ensure the host, if also required, has the - // datastore mounted. - ds := ctx.Map.Get(cluster.Datastore[rand.Intn(len(cluster.Datastore))]).(*Datastore) - - if configSpec.Files == nil { - configSpec.Files = new(types.VirtualMachineFileInfo) - } - configSpec.Files.VmPathName = fmt.Sprintf("[%[1]s] %[2]s/%[2]s.vmx", ds.Name, spec.ConfigSpec.Name) - - for _, change := range configSpec.DeviceChange { - dspec := change.GetVirtualDeviceConfigSpec() - - if dspec.FileOperation != types.VirtualDeviceConfigSpecFileOperationCreate { - continue - } - - switch dspec.Operation { - case types.VirtualDeviceConfigSpecOperationAdd: - device := dspec.Device - d := device.GetVirtualDevice() - - switch device.(type) { - case *types.VirtualDisk: - switch b := d.Backing.(type) { - case types.BaseVirtualDeviceFileBackingInfo: - info := b.GetVirtualDeviceFileBackingInfo() - info.Datastore = types.NewReference(ds.Reference()) - - var dsPath object.DatastorePath - if dsPath.FromString(info.FileName) { - dsPath.Datastore = ds.Name - info.FileName = dsPath.String() - } - } - } - } - } - - placementAction.ConfigSpec = configSpec - } - - res.Action = append(res.Action, &placementAction) + // An empty placement type defaults to CreateAndPowerOn. + if req.PlacementSpec.PlacementType == "" { + placementType = types.PlaceVmsXClusterSpecPlacementTypeCreateAndPowerOn + } - body.Res.Returnval.PlacementInfos = append(body.Res.Returnval.PlacementInfos, - types.PlaceVmsXClusterResultPlacementInfo{ - VmName: spec.ConfigSpec.Name, - Recommendation: res, - }, - ) - } + switch placementType { + case types.PlaceVmsXClusterSpecPlacementTypeCreateAndPowerOn: + return generateRecommendationForCreateAndPowerOn(ctx, req) + case types.PlaceVmsXClusterSpecPlacementTypeRelocate: + return generateRecommendationForRelocate(ctx, req) + case types.PlaceVmsXClusterSpecPlacementTypeReconfigure: + return generateRecommendationForReconfigure(ctx, req) } + body.Fault_ = Fault("", &types.InvalidArgument{InvalidProperty: "placementType"}) return body } diff --git a/simulator/folder_test.go b/simulator/folder_test.go index c898fbc9b..796cbb37a 100644 --- a/simulator/folder_test.go +++ b/simulator/folder_test.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -19,6 +19,7 @@ package simulator import ( "context" "reflect" + "strings" "testing" "github.com/vmware/govmomi" @@ -610,7 +611,7 @@ func TestFolderCreateDVS(t *testing.T) { } } -func TestPlaceVmsXCluster(t *testing.T) { +func TestPlaceVmsXClusterCreateAndPowerOn(t *testing.T) { vpx := VPX() vpx.Cluster = 3 @@ -645,3 +646,224 @@ func TestPlaceVmsXCluster(t *testing.T) { } }, vpx) } + +func TestPlaceVmsXClusterRelocate(t *testing.T) { + vpx := VPX() + vpx.Cluster = 3 + + Test(func(ctx context.Context, c *vim25.Client) { + finder := find.NewFinder(c, true) + datacenter, err := finder.DefaultDatacenter(ctx) + if err != nil { + t.Fatalf("failed to get default datacenter: %v", err) + } + finder.SetDatacenter(datacenter) + + pools, err := finder.ResourcePoolList(ctx, "/DC0/host/DC0_C*/*") + if err != nil { + t.Fatal(err) + } + + var poolMoRefs []types.ManagedObjectReference + for _, pool := range pools { + poolMoRefs = append(poolMoRefs, pool.Reference()) + } + + vmMoRef := Map.Any("VirtualMachine").(*VirtualMachine).Reference() + + cfgSpec := types.VirtualMachineConfigSpec{} + + tests := []struct { + name string + poolMoRefs []types.ManagedObjectReference + configSpec types.VirtualMachineConfigSpec + relocateSpec *types.VirtualMachineRelocateSpec + vmMoRef *types.ManagedObjectReference + expectedErr string + }{ + { + "relocate without any resource pools", + nil, + cfgSpec, + &types.VirtualMachineRelocateSpec{}, + &vmMoRef, + "InvalidArgument", + }, + { + "relocate without a relocate spec", + poolMoRefs, + cfgSpec, + nil, + &vmMoRef, + "InvalidArgument", + }, + { + "relocate without a vm in the placement spec", + poolMoRefs, + cfgSpec, + &types.VirtualMachineRelocateSpec{}, + nil, + "InvalidArgument", + }, + { + "relocate with a non-existing vm in the placement spec", + poolMoRefs, + cfgSpec, + &types.VirtualMachineRelocateSpec{}, + &types.ManagedObjectReference{ + Type: "VirtualMachine", + Value: "fake-vm-999", + }, + "InvalidArgument", + }, + { + "relocate with an empty relocate spec", + poolMoRefs, + cfgSpec, + &types.VirtualMachineRelocateSpec{}, + &vmMoRef, + "", + }, + } + + for testNo, test := range tests { + test := test // assign to local var since loop var is reused + + placeVmsXClusterSpec := types.PlaceVmsXClusterSpec{ + ResourcePools: test.poolMoRefs, + PlacementType: string(types.PlaceVmsXClusterSpecPlacementTypeRelocate), + } + + placeVmsXClusterSpec.VmPlacementSpecs = []types.PlaceVmsXClusterSpecVmPlacementSpec{{ + ConfigSpec: test.configSpec, + Vm: test.vmMoRef, + RelocateSpec: test.relocateSpec, + }} + + folder := object.NewRootFolder(c) + res, err := folder.PlaceVmsXCluster(ctx, placeVmsXClusterSpec) + + if err == nil && test.expectedErr != "" { + t.Fatalf("Test %v: expected error %q, received nil", testNo, test.expectedErr) + } else if err != nil && + (test.expectedErr == "" || !strings.Contains(err.Error(), test.expectedErr)) { + t.Fatalf("Test %v: expected error %q, received %v", testNo, test.expectedErr, err) + } + + if err == nil { + if len(res.PlacementInfos) != len(placeVmsXClusterSpec.VmPlacementSpecs) { + t.Errorf("%d PlacementInfos vs %d VmPlacementSpecs", len(res.PlacementInfos), len(placeVmsXClusterSpec.VmPlacementSpecs)) + } + } + } + }, vpx) +} + +func TestPlaceVmsXClusterReconfigure(t *testing.T) { + vpx := VPX() + // All hosts are cluster hosts + vpx.Host = 0 + + Test(func(ctx context.Context, c *vim25.Client) { + finder := find.NewFinder(c, true) + datacenter, err := finder.DefaultDatacenter(ctx) + if err != nil { + t.Fatalf("failed to get default datacenter: %v", err) + } + finder.SetDatacenter(datacenter) + + vm := Map.Any("VirtualMachine").(*VirtualMachine) + host := Map.Get(vm.Runtime.Host.Reference()).(*HostSystem) + cluster := Map.Get(*host.Parent).(*ClusterComputeResource) + pool := Map.Get(*cluster.ResourcePool).(*ResourcePool) + + var poolMoRefs []types.ManagedObjectReference + poolMoRefs = append(poolMoRefs, pool.Reference()) + + cfgSpec := types.VirtualMachineConfigSpec{} + + tests := []struct { + name string + poolMoRefs []types.ManagedObjectReference + configSpec types.VirtualMachineConfigSpec + relocateSpec *types.VirtualMachineRelocateSpec + vmMoRef *types.ManagedObjectReference + expectedErr string + }{ + { + "reconfigure without any resource pools", + nil, + cfgSpec, + nil, + &vm.Self, + "InvalidArgument", + }, + { + "reconfigure with a relocate spec", + poolMoRefs, + cfgSpec, + &types.VirtualMachineRelocateSpec{}, + &vm.Self, + "InvalidArgument", + }, + { + "reconfigure without a vm in the placement spec", + poolMoRefs, + cfgSpec, + nil, + nil, + "InvalidArgument", + }, + { + "reconfigure with a non-existing vm in the placement spec", + poolMoRefs, + cfgSpec, + nil, + &types.ManagedObjectReference{ + Type: "VirtualMachine", + Value: "fake-vm-999", + }, + "InvalidArgument", + }, + { + "reconfigure with an empty config spec", + poolMoRefs, + cfgSpec, + nil, + &vm.Self, + "", + }, + } + + for testNo, test := range tests { + test := test // assign to local var since loop var is reused + + placeVmsXClusterSpec := types.PlaceVmsXClusterSpec{ + ResourcePools: test.poolMoRefs, + PlacementType: string(types.PlaceVmsXClusterSpecPlacementTypeReconfigure), + } + + placeVmsXClusterSpec.VmPlacementSpecs = []types.PlaceVmsXClusterSpecVmPlacementSpec{{ + ConfigSpec: test.configSpec, + Vm: test.vmMoRef, + RelocateSpec: test.relocateSpec, + }} + + folder := object.NewRootFolder(c) + res, err := folder.PlaceVmsXCluster(ctx, placeVmsXClusterSpec) + + if err == nil && test.expectedErr != "" { + t.Fatalf("Test %v: expected error %q, received nil", testNo, test.expectedErr) + } else if err != nil && + (test.expectedErr == "" || !strings.Contains(err.Error(), test.expectedErr)) { + t.Fatalf("Test %v: expected error %q, received %v", testNo, test.expectedErr, err) + } + + if err == nil { + if len(res.PlacementInfos) != len(placeVmsXClusterSpec.VmPlacementSpecs) { + t.Errorf("%d PlacementInfos vs %d VmPlacementSpecs", len(res.PlacementInfos), len(placeVmsXClusterSpec.VmPlacementSpecs)) + } + } + } + }, vpx) +} diff --git a/vim25/types/unreleased.go b/vim25/types/unreleased.go index 72bc1082b..57bc82a1b 100644 --- a/vim25/types/unreleased.go +++ b/vim25/types/unreleased.go @@ -1,17 +1,17 @@ /* - Copyright (c) 2022 VMware, Inc. All Rights Reserved. +Copyright (c) 2022-2024 VMware, Inc. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ package types @@ -75,9 +75,10 @@ func init() { type PlaceVmsXClusterResultPlacementFaults struct { DynamicData - ResourcePool ManagedObjectReference `xml:"resourcePool"` - VmName string `xml:"vmName"` - Faults []LocalizedMethodFault `xml:"faults,omitempty"` + ResourcePool ManagedObjectReference `xml:"resourcePool"` + VmName string `xml:"vmName"` + Faults []LocalizedMethodFault `xml:"faults,omitempty"` + Vm *ManagedObjectReference `xml:"vm,omitempty"` } func init() { @@ -87,18 +88,48 @@ func init() { type PlaceVmsXClusterResultPlacementInfo struct { DynamicData - VmName string `xml:"vmName"` - Recommendation ClusterRecommendation `xml:"recommendation"` + VmName string `xml:"vmName"` + Recommendation ClusterRecommendation `xml:"recommendation"` + Vm *ManagedObjectReference `xml:"vm,omitempty"` } func init() { t["PlaceVmsXClusterResultPlacementInfo"] = reflect.TypeOf((*PlaceVmsXClusterResultPlacementInfo)(nil)).Elem() } +// Defines the type of placement +type PlaceVmsXClusterSpecPlacementType string + +const ( + // Create a new VM to be powered On + PlaceVmsXClusterSpecPlacementTypeCreateAndPowerOn = PlaceVmsXClusterSpecPlacementType("createAndPowerOn") + // Reconfigure a VM + PlaceVmsXClusterSpecPlacementTypeReconfigure = PlaceVmsXClusterSpecPlacementType("reconfigure") + // Relocate a VM + PlaceVmsXClusterSpecPlacementTypeRelocate = PlaceVmsXClusterSpecPlacementType("relocate") +) + +func (e PlaceVmsXClusterSpecPlacementType) Values() []PlaceVmsXClusterSpecPlacementType { + return []PlaceVmsXClusterSpecPlacementType{ + PlaceVmsXClusterSpecPlacementTypeCreateAndPowerOn, + PlaceVmsXClusterSpecPlacementTypeReconfigure, + PlaceVmsXClusterSpecPlacementTypeRelocate, + } +} + +func (e PlaceVmsXClusterSpecPlacementType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["PlaceVmsXClusterSpecPlacementType"] = reflect.TypeOf((*PlaceVmsXClusterSpecPlacementType)(nil)).Elem() +} + type PlaceVmsXClusterSpec struct { DynamicData ResourcePools []ManagedObjectReference `xml:"resourcePools,omitempty"` + PlacementType string `xml:"placementType,omitempty"` VmPlacementSpecs []PlaceVmsXClusterSpecVmPlacementSpec `xml:"vmPlacementSpecs,omitempty"` HostRecommRequired *bool `xml:"hostRecommRequired"` DatastoreRecommRequired *bool `xml:"datastoreRecommRequired"` @@ -111,7 +142,9 @@ func init() { type PlaceVmsXClusterSpecVmPlacementSpec struct { DynamicData - ConfigSpec VirtualMachineConfigSpec `xml:"configSpec"` + Vm *ManagedObjectReference `xml:"vm,omitempty"` + ConfigSpec VirtualMachineConfigSpec `xml:"configSpec"` + RelocateSpec *VirtualMachineRelocateSpec `xml:"relocateSpec,omitempty"` } func init() { @@ -119,3 +152,25 @@ func init() { } const RecommendationReasonCodeXClusterPlacement = RecommendationReasonCode("xClusterPlacement") + +type ClusterReconfigurePlacementAction struct { + ClusterAction + TargetHost *ManagedObjectReference `xml:"targetHost,omitempty"` + Pool ManagedObjectReference `xml:"pool"` + ConfigSpec *VirtualMachineConfigSpec `xml:"configSpec,omitempty"` +} + +func init() { + t["ClusterReconfigurePlacementAction"] = reflect.TypeOf((*ClusterReconfigurePlacementAction)(nil)).Elem() +} + +type ClusterRelocatePlacementAction struct { + ClusterAction + TargetHost *ManagedObjectReference `xml:"targetHost,omitempty"` + Pool ManagedObjectReference `xml:"pool"` + RelocateSpec *VirtualMachineRelocateSpec `xml:"relocateSpec,omitempty"` +} + +func init() { + t["ClusterRelocatePlacementAction"] = reflect.TypeOf((*ClusterRelocatePlacementAction)(nil)).Elem() +}