diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/_compute_management_client.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/_compute_management_client.py index 83a6973bd51a..5b58a2adf72f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/_compute_management_client.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/_compute_management_client.py @@ -46,21 +46,37 @@ class ComputeManagementClient(MultiApiClientMixin, SDKClient): :type profile: azure.profiles.KnownProfiles """ - DEFAULT_API_VERSION = '2020-06-01' + DEFAULT_API_VERSION = '2020-06-30' _PROFILE_TAG = "azure.mgmt.compute.ComputeManagementClient" LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG: { None: DEFAULT_API_VERSION, - 'disk_accesses': '2020-05-01', - 'disk_encryption_sets': '2020-05-01', - 'disks': '2020-05-01', + 'availability_sets': '2020-06-01', + 'dedicated_host_groups': '2020-06-01', + 'dedicated_hosts': '2020-06-01', 'galleries': '2019-12-01', 'gallery_application_versions': '2019-12-01', 'gallery_applications': '2019-12-01', 'gallery_image_versions': '2019-12-01', 'gallery_images': '2019-12-01', + 'images': '2020-06-01', + 'log_analytics': '2020-06-01', + 'operations': '2020-06-01', + 'proximity_placement_groups': '2020-06-01', 'resource_skus': '2019-04-01', - 'snapshots': '2020-05-01', + 'ssh_public_keys': '2020-06-01', + 'usage': '2020-06-01', + 'virtual_machine_extension_images': '2020-06-01', + 'virtual_machine_extensions': '2020-06-01', + 'virtual_machine_images': '2020-06-01', + 'virtual_machine_run_commands': '2020-06-01', + 'virtual_machine_scale_set_extensions': '2020-06-01', + 'virtual_machine_scale_set_rolling_upgrades': '2020-06-01', + 'virtual_machine_scale_set_vm_extensions': '2020-06-01', + 'virtual_machine_scale_set_vms': '2020-06-01', + 'virtual_machine_scale_sets': '2020-06-01', + 'virtual_machine_sizes': '2020-06-01', + 'virtual_machines': '2020-06-01', }}, _PROFILE_TAG + " latest" ) @@ -99,6 +115,7 @@ def models(cls, api_version=DEFAULT_API_VERSION): * 2019-12-01: :mod:`v2019_12_01.models` * 2020-05-01: :mod:`v2020_05_01.models` * 2020-06-01: :mod:`v2020_06_01.models` + * 2020-06-30: :mod:`v2020_06_30.models` """ if api_version == '2015-06-15': from .v2015_06_15 import models @@ -151,6 +168,9 @@ def models(cls, api_version=DEFAULT_API_VERSION): elif api_version == '2020-06-01': from .v2020_06_01 import models return models + elif api_version == '2020-06-30': + from .v2020_06_30 import models + return models raise NotImplementedError("APIVersion {} is not available".format(api_version)) @property @@ -248,10 +268,13 @@ def disk_accesses(self): """Instance depends on the API version: * 2020-05-01: :class:`DiskAccessesOperations` + * 2020-06-30: :class:`DiskAccessesOperations` """ api_version = self._get_api_version('disk_accesses') if api_version == '2020-05-01': from .v2020_05_01.operations import DiskAccessesOperations as OperationClass + elif api_version == '2020-06-30': + from .v2020_06_30.operations import DiskAccessesOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @@ -263,6 +286,7 @@ def disk_encryption_sets(self): * 2019-07-01: :class:`DiskEncryptionSetsOperations` * 2019-11-01: :class:`DiskEncryptionSetsOperations` * 2020-05-01: :class:`DiskEncryptionSetsOperations` + * 2020-06-30: :class:`DiskEncryptionSetsOperations` """ api_version = self._get_api_version('disk_encryption_sets') if api_version == '2019-07-01': @@ -271,6 +295,8 @@ def disk_encryption_sets(self): from .v2019_11_01.operations import DiskEncryptionSetsOperations as OperationClass elif api_version == '2020-05-01': from .v2020_05_01.operations import DiskEncryptionSetsOperations as OperationClass + elif api_version == '2020-06-30': + from .v2020_06_30.operations import DiskEncryptionSetsOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @@ -288,6 +314,7 @@ def disks(self): * 2019-07-01: :class:`DisksOperations` * 2019-11-01: :class:`DisksOperations` * 2020-05-01: :class:`DisksOperations` + * 2020-06-30: :class:`DisksOperations` """ api_version = self._get_api_version('disks') if api_version == '2016-04-30-preview': @@ -308,6 +335,8 @@ def disks(self): from .v2019_11_01.operations import DisksOperations as OperationClass elif api_version == '2020-05-01': from .v2020_05_01.operations import DisksOperations as OperationClass + elif api_version == '2020-06-30': + from .v2020_06_30.operations import DisksOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @@ -587,6 +616,7 @@ def snapshots(self): * 2019-07-01: :class:`SnapshotsOperations` * 2019-11-01: :class:`SnapshotsOperations` * 2020-05-01: :class:`SnapshotsOperations` + * 2020-06-30: :class:`SnapshotsOperations` """ api_version = self._get_api_version('snapshots') if api_version == '2016-04-30-preview': @@ -607,6 +637,8 @@ def snapshots(self): from .v2019_11_01.operations import SnapshotsOperations as OperationClass elif api_version == '2020-05-01': from .v2020_05_01.operations import SnapshotsOperations as OperationClass + elif api_version == '2020-06-30': + from .v2020_06_30.operations import SnapshotsOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/models.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/models.py index 7b1c6dc05c94..5d376ed3e899 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/models.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/models.py @@ -6,5 +6,5 @@ # -------------------------------------------------------------------------- from .v2019_04_01.models import * from .v2019_12_01.models import * -from .v2020_05_01.models import * from .v2020_06_01.models import * +from .v2020_06_30.models import * diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/models/__init__.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/models/__init__.py index 9e4583068493..4cf5e17b2d24 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/models/__init__.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/models/__init__.py @@ -20,6 +20,7 @@ from ._models_py3 import AutomaticRepairsPolicy from ._models_py3 import AvailabilitySet from ._models_py3 import AvailabilitySetUpdate + from ._models_py3 import AvailablePatchSummary from ._models_py3 import BillingProfile from ._models_py3 import BootDiagnostics from ._models_py3 import BootDiagnosticsInstanceView @@ -53,6 +54,7 @@ from ._models_py3 import InstanceViewStatus from ._models_py3 import KeyVaultKeyReference from ._models_py3 import KeyVaultSecretReference + from ._models_py3 import LastPatchInstallationSummary from ._models_py3 import LinuxConfiguration from ._models_py3 import LogAnalyticsInputBase from ._models_py3 import LogAnalyticsOperationResult @@ -128,6 +130,7 @@ from ._models_py3 import VirtualMachineImage from ._models_py3 import VirtualMachineImageResource from ._models_py3 import VirtualMachineInstanceView + from ._models_py3 import VirtualMachinePatchStatus from ._models_py3 import VirtualMachineReimageParameters from ._models_py3 import VirtualMachineScaleSet from ._models_py3 import VirtualMachineScaleSetDataDisk @@ -189,6 +192,7 @@ from ._models import AutomaticRepairsPolicy from ._models import AvailabilitySet from ._models import AvailabilitySetUpdate + from ._models import AvailablePatchSummary from ._models import BillingProfile from ._models import BootDiagnostics from ._models import BootDiagnosticsInstanceView @@ -222,6 +226,7 @@ from ._models import InstanceViewStatus from ._models import KeyVaultKeyReference from ._models import KeyVaultSecretReference + from ._models import LastPatchInstallationSummary from ._models import LinuxConfiguration from ._models import LogAnalyticsInputBase from ._models import LogAnalyticsOperationResult @@ -297,6 +302,7 @@ from ._models import VirtualMachineImage from ._models import VirtualMachineImageResource from ._models import VirtualMachineInstanceView + from ._models import VirtualMachinePatchStatus from ._models import VirtualMachineReimageParameters from ._models import VirtualMachineScaleSet from ._models import VirtualMachineScaleSetDataDisk @@ -390,6 +396,7 @@ ResourceIdentityType, MaintenanceOperationResultCodeTypes, HyperVGenerationType, + RebootStatus, UpgradeMode, VirtualMachineScaleSetScaleInRules, OperatingSystemStateTypes, @@ -417,6 +424,7 @@ 'AutomaticRepairsPolicy', 'AvailabilitySet', 'AvailabilitySetUpdate', + 'AvailablePatchSummary', 'BillingProfile', 'BootDiagnostics', 'BootDiagnosticsInstanceView', @@ -450,6 +458,7 @@ 'InstanceViewStatus', 'KeyVaultKeyReference', 'KeyVaultSecretReference', + 'LastPatchInstallationSummary', 'LinuxConfiguration', 'LogAnalyticsInputBase', 'LogAnalyticsOperationResult', @@ -525,6 +534,7 @@ 'VirtualMachineImage', 'VirtualMachineImageResource', 'VirtualMachineInstanceView', + 'VirtualMachinePatchStatus', 'VirtualMachineReimageParameters', 'VirtualMachineScaleSet', 'VirtualMachineScaleSetDataDisk', @@ -617,6 +627,7 @@ 'ResourceIdentityType', 'MaintenanceOperationResultCodeTypes', 'HyperVGenerationType', + 'RebootStatus', 'UpgradeMode', 'VirtualMachineScaleSetScaleInRules', 'OperatingSystemStateTypes', diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/models/_compute_management_client_enums.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/models/_compute_management_client_enums.py index 43221c71ff12..d29fbdf55093 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/models/_compute_management_client_enums.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/models/_compute_management_client_enums.py @@ -348,6 +348,15 @@ class HyperVGenerationType(str, Enum): v2 = "V2" +class RebootStatus(str, Enum): + + not_needed = "NotNeeded" + required = "Required" + started = "Started" + failed = "Failed" + completed = "Completed" + + class UpgradeMode(str, Enum): automatic = "Automatic" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/models/_models.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/models/_models.py index f7c6a44da03b..efa163dc3795 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/models/_models.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/models/_models.py @@ -419,6 +419,77 @@ def __init__(self, **kwargs): self.sku = kwargs.get('sku', None) +class AvailablePatchSummary(Model): + """Describes the properties of an virtual machine instance view for available + patch summary. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar status: The overall success or failure status of the operation. It + remains "InProgress" until the operation completes. At that point it will + become "Failed", "Succeeded", or "CompletedWithWarnings.". Possible values + include: 'InProgress', 'Failed', 'Succeeded', 'CompletedWithWarnings' + :vartype status: str or + ~azure.mgmt.compute.v2020_06_01.models.PatchOperationStatus + :ivar assessment_activity_id: The activity ID of the operation that + produced this result. It is used to correlate across CRP and extension + logs. + :vartype assessment_activity_id: str + :ivar reboot_pending: The overall reboot status of the VM. It will be true + when partially installed patches require a reboot to complete installation + but the reboot has not yet occurred. + :vartype reboot_pending: bool + :ivar critical_and_security_patch_count: The number of critical or + security patches that have been detected as available and not yet + installed. + :vartype critical_and_security_patch_count: int + :ivar other_patch_count: The number of all available patches excluding + critical and security. + :vartype other_patch_count: int + :ivar start_time: The UTC timestamp when the operation began. + :vartype start_time: datetime + :ivar last_modified_time: The UTC timestamp when the operation began. + :vartype last_modified_time: datetime + :ivar error: The errors that were encountered during execution of the + operation. The details array contains the list of them. + :vartype error: ~azure.mgmt.compute.v2020_06_01.models.ApiError + """ + + _validation = { + 'status': {'readonly': True}, + 'assessment_activity_id': {'readonly': True}, + 'reboot_pending': {'readonly': True}, + 'critical_and_security_patch_count': {'readonly': True}, + 'other_patch_count': {'readonly': True}, + 'start_time': {'readonly': True}, + 'last_modified_time': {'readonly': True}, + 'error': {'readonly': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'str'}, + 'assessment_activity_id': {'key': 'assessmentActivityId', 'type': 'str'}, + 'reboot_pending': {'key': 'rebootPending', 'type': 'bool'}, + 'critical_and_security_patch_count': {'key': 'criticalAndSecurityPatchCount', 'type': 'int'}, + 'other_patch_count': {'key': 'otherPatchCount', 'type': 'int'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_modified_time': {'key': 'lastModifiedTime', 'type': 'iso-8601'}, + 'error': {'key': 'error', 'type': 'ApiError'}, + } + + def __init__(self, **kwargs): + super(AvailablePatchSummary, self).__init__(**kwargs) + self.status = None + self.assessment_activity_id = None + self.reboot_pending = None + self.critical_and_security_patch_count = None + self.other_patch_count = None + self.start_time = None + self.last_modified_time = None + self.error = None + + class BillingProfile(Model): """Specifies the billing related details of a Azure Spot VM or VMSS.

Minimum api-version: 2019-03-01. @@ -1875,6 +1946,110 @@ def __init__(self, **kwargs): self.source_vault = kwargs.get('source_vault', None) +class LastPatchInstallationSummary(Model): + """Describes the properties of the last installed patch summary. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar status: The overall success or failure status of the operation. It + remains "InProgress" until the operation completes. At that point it will + become "Failed", "Succeeded", or "CompletedWithWarnings.". Possible values + include: 'InProgress', 'Failed', 'Succeeded', 'CompletedWithWarnings' + :vartype status: str or + ~azure.mgmt.compute.v2020_06_01.models.PatchOperationStatus + :ivar installation_activity_id: The activity ID of the operation that + produced this result. It is used to correlate across CRP and extension + logs. + :vartype installation_activity_id: str + :ivar maintenance_window_exceeded: Describes whether the operation ran out + of time before it completed all its intended actions + :vartype maintenance_window_exceeded: bool + :ivar reboot_status: The reboot status of the machine after the patch + operation. It will be in "NotNeeded" status if reboot is not needed after + the patch operation. "Required" will be the status once the patch is + applied and machine is required to reboot. "Started" will be the reboot + status when the machine has started to reboot. "Failed" will be the status + if the machine is failed to reboot. "Completed" will be the status once + the machine is rebooted successfully. Possible values include: + 'NotNeeded', 'Required', 'Started', 'Failed', 'Completed' + :vartype reboot_status: str or + ~azure.mgmt.compute.v2020_06_01.models.RebootStatus + :ivar not_selected_patch_count: The number of all available patches but + not going to be installed because it didn't match a classification or + inclusion list entry. + :vartype not_selected_patch_count: int + :ivar excluded_patch_count: The number of all available patches but + excluded explicitly by a customer-specified exclusion list match. + :vartype excluded_patch_count: int + :ivar pending_patch_count: The number of all available patches expected to + be installed over the course of the patch installation operation. + :vartype pending_patch_count: int + :ivar installed_patch_count: The count of patches that successfully + installed. + :vartype installed_patch_count: int + :ivar failed_patch_count: The count of patches that failed installation. + :vartype failed_patch_count: int + :ivar start_time: The UTC timestamp when the operation began. + :vartype start_time: datetime + :ivar last_modified_time: The UTC timestamp when the operation began. + :vartype last_modified_time: datetime + :ivar started_by: The person or system account that started the operation + :vartype started_by: str + :ivar error: The errors that were encountered during execution of the + operation. The details array contains the list of them. + :vartype error: ~azure.mgmt.compute.v2020_06_01.models.ApiError + """ + + _validation = { + 'status': {'readonly': True}, + 'installation_activity_id': {'readonly': True}, + 'maintenance_window_exceeded': {'readonly': True}, + 'reboot_status': {'readonly': True}, + 'not_selected_patch_count': {'readonly': True}, + 'excluded_patch_count': {'readonly': True}, + 'pending_patch_count': {'readonly': True}, + 'installed_patch_count': {'readonly': True}, + 'failed_patch_count': {'readonly': True}, + 'start_time': {'readonly': True}, + 'last_modified_time': {'readonly': True}, + 'started_by': {'readonly': True}, + 'error': {'readonly': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'str'}, + 'installation_activity_id': {'key': 'installationActivityId', 'type': 'str'}, + 'maintenance_window_exceeded': {'key': 'maintenanceWindowExceeded', 'type': 'bool'}, + 'reboot_status': {'key': 'rebootStatus', 'type': 'str'}, + 'not_selected_patch_count': {'key': 'notSelectedPatchCount', 'type': 'int'}, + 'excluded_patch_count': {'key': 'excludedPatchCount', 'type': 'int'}, + 'pending_patch_count': {'key': 'pendingPatchCount', 'type': 'int'}, + 'installed_patch_count': {'key': 'installedPatchCount', 'type': 'int'}, + 'failed_patch_count': {'key': 'failedPatchCount', 'type': 'int'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_modified_time': {'key': 'lastModifiedTime', 'type': 'iso-8601'}, + 'started_by': {'key': 'startedBy', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'ApiError'}, + } + + def __init__(self, **kwargs): + super(LastPatchInstallationSummary, self).__init__(**kwargs) + self.status = None + self.installation_activity_id = None + self.maintenance_window_exceeded = None + self.reboot_status = None + self.not_selected_patch_count = None + self.excluded_patch_count = None + self.pending_patch_count = None + self.installed_patch_count = None + self.failed_patch_count = None + self.start_time = None + self.last_modified_time = None + self.started_by = None + self.error = None + + class LinuxConfiguration(Model): """Specifies the Linux operating system settings on the virtual machine.

For a list of supported Linux distributions, see [Linux on @@ -2419,7 +2594,7 @@ class PatchSettings(Model): automatically be updated by the OS. The property WindowsConfiguration.enableAutomaticUpdates must be true.

** AutomaticByPlatform** - the virtual machine will automatically updated by - the OS. The properties provisionVMAgent and + the platform. The properties provisionVMAgent and WindowsConfiguration.enableAutomaticUpdates must be true. Possible values include: 'Manual', 'AutomaticByOS', 'AutomaticByPlatform' :type patch_mode: str or @@ -4254,6 +4429,10 @@ class VirtualMachineExtension(Resource): deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. :type auto_upgrade_minor_version: bool + :param enable_automatic_upgrade: Indicates whether the extension should be + automatically upgraded by the platform if there is a newer version of the + extension available. + :type enable_automatic_upgrade: bool :param settings: Json formatted public settings for the extension. :type settings: object :param protected_settings: The extension can contain either @@ -4287,6 +4466,7 @@ class VirtualMachineExtension(Resource): 'virtual_machine_extension_type': {'key': 'properties.type', 'type': 'str'}, 'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'}, 'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'}, + 'enable_automatic_upgrade': {'key': 'properties.enableAutomaticUpgrade', 'type': 'bool'}, 'settings': {'key': 'properties.settings', 'type': 'object'}, 'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, @@ -4300,6 +4480,7 @@ def __init__(self, **kwargs): self.virtual_machine_extension_type = kwargs.get('virtual_machine_extension_type', None) self.type_handler_version = kwargs.get('type_handler_version', None) self.auto_upgrade_minor_version = kwargs.get('auto_upgrade_minor_version', None) + self.enable_automatic_upgrade = kwargs.get('enable_automatic_upgrade', None) self.settings = kwargs.get('settings', None) self.protected_settings = kwargs.get('protected_settings', None) self.provisioning_state = None @@ -4472,6 +4653,10 @@ class VirtualMachineExtensionUpdate(UpdateResource): deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. :type auto_upgrade_minor_version: bool + :param enable_automatic_upgrade: Indicates whether the extension should be + automatically upgraded by the platform if there is a newer version of the + extension available. + :type enable_automatic_upgrade: bool :param settings: Json formatted public settings for the extension. :type settings: object :param protected_settings: The extension can contain either @@ -4487,6 +4672,7 @@ class VirtualMachineExtensionUpdate(UpdateResource): 'type': {'key': 'properties.type', 'type': 'str'}, 'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'}, 'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'}, + 'enable_automatic_upgrade': {'key': 'properties.enableAutomaticUpgrade', 'type': 'bool'}, 'settings': {'key': 'properties.settings', 'type': 'object'}, 'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'}, } @@ -4498,6 +4684,7 @@ def __init__(self, **kwargs): self.type = kwargs.get('type', None) self.type_handler_version = kwargs.get('type_handler_version', None) self.auto_upgrade_minor_version = kwargs.get('auto_upgrade_minor_version', None) + self.enable_automatic_upgrade = kwargs.get('enable_automatic_upgrade', None) self.settings = kwargs.get('settings', None) self.protected_settings = kwargs.get('protected_settings', None) @@ -4737,6 +4924,9 @@ class VirtualMachineInstanceView(Model): :param extensions: The extensions information. :type extensions: list[~azure.mgmt.compute.v2020_06_01.models.VirtualMachineExtensionInstanceView] + :ivar vm_health: The health status for the VM. + :vartype vm_health: + ~azure.mgmt.compute.v2020_06_01.models.VirtualMachineHealthStatus :param boot_diagnostics: Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.

You can easily view the output of your console log.

@@ -4751,9 +4941,13 @@ class VirtualMachineInstanceView(Model): :param statuses: The resource status information. :type statuses: list[~azure.mgmt.compute.v2020_06_01.models.InstanceViewStatus] + :param patch_status: The status of virtual machine patch operations. + :type patch_status: + ~azure.mgmt.compute.v2020_06_01.models.VirtualMachinePatchStatus """ _validation = { + 'vm_health': {'readonly': True}, 'assigned_host': {'readonly': True}, } @@ -4769,9 +4963,11 @@ class VirtualMachineInstanceView(Model): 'maintenance_redeploy_status': {'key': 'maintenanceRedeployStatus', 'type': 'MaintenanceRedeployStatus'}, 'disks': {'key': 'disks', 'type': '[DiskInstanceView]'}, 'extensions': {'key': 'extensions', 'type': '[VirtualMachineExtensionInstanceView]'}, + 'vm_health': {'key': 'vmHealth', 'type': 'VirtualMachineHealthStatus'}, 'boot_diagnostics': {'key': 'bootDiagnostics', 'type': 'BootDiagnosticsInstanceView'}, 'assigned_host': {'key': 'assignedHost', 'type': 'str'}, 'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'}, + 'patch_status': {'key': 'patchStatus', 'type': 'VirtualMachinePatchStatus'}, } def __init__(self, **kwargs): @@ -4787,9 +4983,35 @@ def __init__(self, **kwargs): self.maintenance_redeploy_status = kwargs.get('maintenance_redeploy_status', None) self.disks = kwargs.get('disks', None) self.extensions = kwargs.get('extensions', None) + self.vm_health = None self.boot_diagnostics = kwargs.get('boot_diagnostics', None) self.assigned_host = None self.statuses = kwargs.get('statuses', None) + self.patch_status = kwargs.get('patch_status', None) + + +class VirtualMachinePatchStatus(Model): + """The status of virtual machine patch operations. + + :param available_patch_summary: The available patch summary of the latest + assessment operation for the virtual machine. + :type available_patch_summary: + ~azure.mgmt.compute.v2020_06_01.models.AvailablePatchSummary + :param last_patch_installation_summary: The installation summary of the + latest installation operation for the virtual machine. + :type last_patch_installation_summary: + ~azure.mgmt.compute.v2020_06_01.models.LastPatchInstallationSummary + """ + + _attribute_map = { + 'available_patch_summary': {'key': 'availablePatchSummary', 'type': 'AvailablePatchSummary'}, + 'last_patch_installation_summary': {'key': 'lastPatchInstallationSummary', 'type': 'LastPatchInstallationSummary'}, + } + + def __init__(self, **kwargs): + super(VirtualMachinePatchStatus, self).__init__(**kwargs) + self.available_patch_summary = kwargs.get('available_patch_summary', None) + self.last_patch_installation_summary = kwargs.get('last_patch_installation_summary', None) class VirtualMachineReimageParameters(Model): @@ -5057,6 +5279,10 @@ class VirtualMachineScaleSetExtension(SubResourceReadOnly): deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. :type auto_upgrade_minor_version: bool + :param enable_automatic_upgrade: Indicates whether the extension should be + automatically upgraded by the platform if there is a newer version of the + extension available. + :type enable_automatic_upgrade: bool :param settings: Json formatted public settings for the extension. :type settings: object :param protected_settings: The extension can contain either @@ -5086,6 +5312,7 @@ class VirtualMachineScaleSetExtension(SubResourceReadOnly): 'type1': {'key': 'properties.type', 'type': 'str'}, 'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'}, 'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'}, + 'enable_automatic_upgrade': {'key': 'properties.enableAutomaticUpgrade', 'type': 'bool'}, 'settings': {'key': 'properties.settings', 'type': 'object'}, 'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, @@ -5101,6 +5328,7 @@ def __init__(self, **kwargs): self.type1 = kwargs.get('type1', None) self.type_handler_version = kwargs.get('type_handler_version', None) self.auto_upgrade_minor_version = kwargs.get('auto_upgrade_minor_version', None) + self.enable_automatic_upgrade = kwargs.get('enable_automatic_upgrade', None) self.settings = kwargs.get('settings', None) self.protected_settings = kwargs.get('protected_settings', None) self.provisioning_state = None @@ -5161,6 +5389,10 @@ class VirtualMachineScaleSetExtensionUpdate(SubResourceReadOnly): deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. :type auto_upgrade_minor_version: bool + :param enable_automatic_upgrade: Indicates whether the extension should be + automatically upgraded by the platform if there is a newer version of the + extension available. + :type enable_automatic_upgrade: bool :param settings: Json formatted public settings for the extension. :type settings: object :param protected_settings: The extension can contain either @@ -5191,6 +5423,7 @@ class VirtualMachineScaleSetExtensionUpdate(SubResourceReadOnly): 'type1': {'key': 'properties.type', 'type': 'str'}, 'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'}, 'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'}, + 'enable_automatic_upgrade': {'key': 'properties.enableAutomaticUpgrade', 'type': 'bool'}, 'settings': {'key': 'properties.settings', 'type': 'object'}, 'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, @@ -5206,6 +5439,7 @@ def __init__(self, **kwargs): self.type1 = kwargs.get('type1', None) self.type_handler_version = kwargs.get('type_handler_version', None) self.auto_upgrade_minor_version = kwargs.get('auto_upgrade_minor_version', None) + self.enable_automatic_upgrade = kwargs.get('enable_automatic_upgrade', None) self.settings = kwargs.get('settings', None) self.protected_settings = kwargs.get('protected_settings', None) self.provisioning_state = None diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/models/_models_py3.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/models/_models_py3.py index 4516e6d813e6..cfad7bff11f0 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/models/_models_py3.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/models/_models_py3.py @@ -419,6 +419,77 @@ def __init__(self, *, tags=None, platform_update_domain_count: int=None, platfor self.sku = sku +class AvailablePatchSummary(Model): + """Describes the properties of an virtual machine instance view for available + patch summary. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar status: The overall success or failure status of the operation. It + remains "InProgress" until the operation completes. At that point it will + become "Failed", "Succeeded", or "CompletedWithWarnings.". Possible values + include: 'InProgress', 'Failed', 'Succeeded', 'CompletedWithWarnings' + :vartype status: str or + ~azure.mgmt.compute.v2020_06_01.models.PatchOperationStatus + :ivar assessment_activity_id: The activity ID of the operation that + produced this result. It is used to correlate across CRP and extension + logs. + :vartype assessment_activity_id: str + :ivar reboot_pending: The overall reboot status of the VM. It will be true + when partially installed patches require a reboot to complete installation + but the reboot has not yet occurred. + :vartype reboot_pending: bool + :ivar critical_and_security_patch_count: The number of critical or + security patches that have been detected as available and not yet + installed. + :vartype critical_and_security_patch_count: int + :ivar other_patch_count: The number of all available patches excluding + critical and security. + :vartype other_patch_count: int + :ivar start_time: The UTC timestamp when the operation began. + :vartype start_time: datetime + :ivar last_modified_time: The UTC timestamp when the operation began. + :vartype last_modified_time: datetime + :ivar error: The errors that were encountered during execution of the + operation. The details array contains the list of them. + :vartype error: ~azure.mgmt.compute.v2020_06_01.models.ApiError + """ + + _validation = { + 'status': {'readonly': True}, + 'assessment_activity_id': {'readonly': True}, + 'reboot_pending': {'readonly': True}, + 'critical_and_security_patch_count': {'readonly': True}, + 'other_patch_count': {'readonly': True}, + 'start_time': {'readonly': True}, + 'last_modified_time': {'readonly': True}, + 'error': {'readonly': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'str'}, + 'assessment_activity_id': {'key': 'assessmentActivityId', 'type': 'str'}, + 'reboot_pending': {'key': 'rebootPending', 'type': 'bool'}, + 'critical_and_security_patch_count': {'key': 'criticalAndSecurityPatchCount', 'type': 'int'}, + 'other_patch_count': {'key': 'otherPatchCount', 'type': 'int'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_modified_time': {'key': 'lastModifiedTime', 'type': 'iso-8601'}, + 'error': {'key': 'error', 'type': 'ApiError'}, + } + + def __init__(self, **kwargs) -> None: + super(AvailablePatchSummary, self).__init__(**kwargs) + self.status = None + self.assessment_activity_id = None + self.reboot_pending = None + self.critical_and_security_patch_count = None + self.other_patch_count = None + self.start_time = None + self.last_modified_time = None + self.error = None + + class BillingProfile(Model): """Specifies the billing related details of a Azure Spot VM or VMSS.

Minimum api-version: 2019-03-01. @@ -1875,6 +1946,110 @@ def __init__(self, *, secret_url: str, source_vault, **kwargs) -> None: self.source_vault = source_vault +class LastPatchInstallationSummary(Model): + """Describes the properties of the last installed patch summary. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar status: The overall success or failure status of the operation. It + remains "InProgress" until the operation completes. At that point it will + become "Failed", "Succeeded", or "CompletedWithWarnings.". Possible values + include: 'InProgress', 'Failed', 'Succeeded', 'CompletedWithWarnings' + :vartype status: str or + ~azure.mgmt.compute.v2020_06_01.models.PatchOperationStatus + :ivar installation_activity_id: The activity ID of the operation that + produced this result. It is used to correlate across CRP and extension + logs. + :vartype installation_activity_id: str + :ivar maintenance_window_exceeded: Describes whether the operation ran out + of time before it completed all its intended actions + :vartype maintenance_window_exceeded: bool + :ivar reboot_status: The reboot status of the machine after the patch + operation. It will be in "NotNeeded" status if reboot is not needed after + the patch operation. "Required" will be the status once the patch is + applied and machine is required to reboot. "Started" will be the reboot + status when the machine has started to reboot. "Failed" will be the status + if the machine is failed to reboot. "Completed" will be the status once + the machine is rebooted successfully. Possible values include: + 'NotNeeded', 'Required', 'Started', 'Failed', 'Completed' + :vartype reboot_status: str or + ~azure.mgmt.compute.v2020_06_01.models.RebootStatus + :ivar not_selected_patch_count: The number of all available patches but + not going to be installed because it didn't match a classification or + inclusion list entry. + :vartype not_selected_patch_count: int + :ivar excluded_patch_count: The number of all available patches but + excluded explicitly by a customer-specified exclusion list match. + :vartype excluded_patch_count: int + :ivar pending_patch_count: The number of all available patches expected to + be installed over the course of the patch installation operation. + :vartype pending_patch_count: int + :ivar installed_patch_count: The count of patches that successfully + installed. + :vartype installed_patch_count: int + :ivar failed_patch_count: The count of patches that failed installation. + :vartype failed_patch_count: int + :ivar start_time: The UTC timestamp when the operation began. + :vartype start_time: datetime + :ivar last_modified_time: The UTC timestamp when the operation began. + :vartype last_modified_time: datetime + :ivar started_by: The person or system account that started the operation + :vartype started_by: str + :ivar error: The errors that were encountered during execution of the + operation. The details array contains the list of them. + :vartype error: ~azure.mgmt.compute.v2020_06_01.models.ApiError + """ + + _validation = { + 'status': {'readonly': True}, + 'installation_activity_id': {'readonly': True}, + 'maintenance_window_exceeded': {'readonly': True}, + 'reboot_status': {'readonly': True}, + 'not_selected_patch_count': {'readonly': True}, + 'excluded_patch_count': {'readonly': True}, + 'pending_patch_count': {'readonly': True}, + 'installed_patch_count': {'readonly': True}, + 'failed_patch_count': {'readonly': True}, + 'start_time': {'readonly': True}, + 'last_modified_time': {'readonly': True}, + 'started_by': {'readonly': True}, + 'error': {'readonly': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'str'}, + 'installation_activity_id': {'key': 'installationActivityId', 'type': 'str'}, + 'maintenance_window_exceeded': {'key': 'maintenanceWindowExceeded', 'type': 'bool'}, + 'reboot_status': {'key': 'rebootStatus', 'type': 'str'}, + 'not_selected_patch_count': {'key': 'notSelectedPatchCount', 'type': 'int'}, + 'excluded_patch_count': {'key': 'excludedPatchCount', 'type': 'int'}, + 'pending_patch_count': {'key': 'pendingPatchCount', 'type': 'int'}, + 'installed_patch_count': {'key': 'installedPatchCount', 'type': 'int'}, + 'failed_patch_count': {'key': 'failedPatchCount', 'type': 'int'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_modified_time': {'key': 'lastModifiedTime', 'type': 'iso-8601'}, + 'started_by': {'key': 'startedBy', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'ApiError'}, + } + + def __init__(self, **kwargs) -> None: + super(LastPatchInstallationSummary, self).__init__(**kwargs) + self.status = None + self.installation_activity_id = None + self.maintenance_window_exceeded = None + self.reboot_status = None + self.not_selected_patch_count = None + self.excluded_patch_count = None + self.pending_patch_count = None + self.installed_patch_count = None + self.failed_patch_count = None + self.start_time = None + self.last_modified_time = None + self.started_by = None + self.error = None + + class LinuxConfiguration(Model): """Specifies the Linux operating system settings on the virtual machine.

For a list of supported Linux distributions, see [Linux on @@ -2419,7 +2594,7 @@ class PatchSettings(Model): automatically be updated by the OS. The property WindowsConfiguration.enableAutomaticUpdates must be true.

** AutomaticByPlatform** - the virtual machine will automatically updated by - the OS. The properties provisionVMAgent and + the platform. The properties provisionVMAgent and WindowsConfiguration.enableAutomaticUpdates must be true. Possible values include: 'Manual', 'AutomaticByOS', 'AutomaticByPlatform' :type patch_mode: str or @@ -4254,6 +4429,10 @@ class VirtualMachineExtension(Resource): deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. :type auto_upgrade_minor_version: bool + :param enable_automatic_upgrade: Indicates whether the extension should be + automatically upgraded by the platform if there is a newer version of the + extension available. + :type enable_automatic_upgrade: bool :param settings: Json formatted public settings for the extension. :type settings: object :param protected_settings: The extension can contain either @@ -4287,19 +4466,21 @@ class VirtualMachineExtension(Resource): 'virtual_machine_extension_type': {'key': 'properties.type', 'type': 'str'}, 'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'}, 'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'}, + 'enable_automatic_upgrade': {'key': 'properties.enableAutomaticUpgrade', 'type': 'bool'}, 'settings': {'key': 'properties.settings', 'type': 'object'}, 'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineExtensionInstanceView'}, } - def __init__(self, *, location: str, tags=None, force_update_tag: str=None, publisher: str=None, virtual_machine_extension_type: str=None, type_handler_version: str=None, auto_upgrade_minor_version: bool=None, settings=None, protected_settings=None, instance_view=None, **kwargs) -> None: + def __init__(self, *, location: str, tags=None, force_update_tag: str=None, publisher: str=None, virtual_machine_extension_type: str=None, type_handler_version: str=None, auto_upgrade_minor_version: bool=None, enable_automatic_upgrade: bool=None, settings=None, protected_settings=None, instance_view=None, **kwargs) -> None: super(VirtualMachineExtension, self).__init__(location=location, tags=tags, **kwargs) self.force_update_tag = force_update_tag self.publisher = publisher self.virtual_machine_extension_type = virtual_machine_extension_type self.type_handler_version = type_handler_version self.auto_upgrade_minor_version = auto_upgrade_minor_version + self.enable_automatic_upgrade = enable_automatic_upgrade self.settings = settings self.protected_settings = protected_settings self.provisioning_state = None @@ -4472,6 +4653,10 @@ class VirtualMachineExtensionUpdate(UpdateResource): deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. :type auto_upgrade_minor_version: bool + :param enable_automatic_upgrade: Indicates whether the extension should be + automatically upgraded by the platform if there is a newer version of the + extension available. + :type enable_automatic_upgrade: bool :param settings: Json formatted public settings for the extension. :type settings: object :param protected_settings: The extension can contain either @@ -4487,17 +4672,19 @@ class VirtualMachineExtensionUpdate(UpdateResource): 'type': {'key': 'properties.type', 'type': 'str'}, 'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'}, 'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'}, + 'enable_automatic_upgrade': {'key': 'properties.enableAutomaticUpgrade', 'type': 'bool'}, 'settings': {'key': 'properties.settings', 'type': 'object'}, 'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'}, } - def __init__(self, *, tags=None, force_update_tag: str=None, publisher: str=None, type: str=None, type_handler_version: str=None, auto_upgrade_minor_version: bool=None, settings=None, protected_settings=None, **kwargs) -> None: + def __init__(self, *, tags=None, force_update_tag: str=None, publisher: str=None, type: str=None, type_handler_version: str=None, auto_upgrade_minor_version: bool=None, enable_automatic_upgrade: bool=None, settings=None, protected_settings=None, **kwargs) -> None: super(VirtualMachineExtensionUpdate, self).__init__(tags=tags, **kwargs) self.force_update_tag = force_update_tag self.publisher = publisher self.type = type self.type_handler_version = type_handler_version self.auto_upgrade_minor_version = auto_upgrade_minor_version + self.enable_automatic_upgrade = enable_automatic_upgrade self.settings = settings self.protected_settings = protected_settings @@ -4737,6 +4924,9 @@ class VirtualMachineInstanceView(Model): :param extensions: The extensions information. :type extensions: list[~azure.mgmt.compute.v2020_06_01.models.VirtualMachineExtensionInstanceView] + :ivar vm_health: The health status for the VM. + :vartype vm_health: + ~azure.mgmt.compute.v2020_06_01.models.VirtualMachineHealthStatus :param boot_diagnostics: Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.

You can easily view the output of your console log.

@@ -4751,9 +4941,13 @@ class VirtualMachineInstanceView(Model): :param statuses: The resource status information. :type statuses: list[~azure.mgmt.compute.v2020_06_01.models.InstanceViewStatus] + :param patch_status: The status of virtual machine patch operations. + :type patch_status: + ~azure.mgmt.compute.v2020_06_01.models.VirtualMachinePatchStatus """ _validation = { + 'vm_health': {'readonly': True}, 'assigned_host': {'readonly': True}, } @@ -4769,12 +4963,14 @@ class VirtualMachineInstanceView(Model): 'maintenance_redeploy_status': {'key': 'maintenanceRedeployStatus', 'type': 'MaintenanceRedeployStatus'}, 'disks': {'key': 'disks', 'type': '[DiskInstanceView]'}, 'extensions': {'key': 'extensions', 'type': '[VirtualMachineExtensionInstanceView]'}, + 'vm_health': {'key': 'vmHealth', 'type': 'VirtualMachineHealthStatus'}, 'boot_diagnostics': {'key': 'bootDiagnostics', 'type': 'BootDiagnosticsInstanceView'}, 'assigned_host': {'key': 'assignedHost', 'type': 'str'}, 'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'}, + 'patch_status': {'key': 'patchStatus', 'type': 'VirtualMachinePatchStatus'}, } - def __init__(self, *, platform_update_domain: int=None, platform_fault_domain: int=None, computer_name: str=None, os_name: str=None, os_version: str=None, hyper_vgeneration=None, rdp_thumb_print: str=None, vm_agent=None, maintenance_redeploy_status=None, disks=None, extensions=None, boot_diagnostics=None, statuses=None, **kwargs) -> None: + def __init__(self, *, platform_update_domain: int=None, platform_fault_domain: int=None, computer_name: str=None, os_name: str=None, os_version: str=None, hyper_vgeneration=None, rdp_thumb_print: str=None, vm_agent=None, maintenance_redeploy_status=None, disks=None, extensions=None, boot_diagnostics=None, statuses=None, patch_status=None, **kwargs) -> None: super(VirtualMachineInstanceView, self).__init__(**kwargs) self.platform_update_domain = platform_update_domain self.platform_fault_domain = platform_fault_domain @@ -4787,9 +4983,35 @@ def __init__(self, *, platform_update_domain: int=None, platform_fault_domain: i self.maintenance_redeploy_status = maintenance_redeploy_status self.disks = disks self.extensions = extensions + self.vm_health = None self.boot_diagnostics = boot_diagnostics self.assigned_host = None self.statuses = statuses + self.patch_status = patch_status + + +class VirtualMachinePatchStatus(Model): + """The status of virtual machine patch operations. + + :param available_patch_summary: The available patch summary of the latest + assessment operation for the virtual machine. + :type available_patch_summary: + ~azure.mgmt.compute.v2020_06_01.models.AvailablePatchSummary + :param last_patch_installation_summary: The installation summary of the + latest installation operation for the virtual machine. + :type last_patch_installation_summary: + ~azure.mgmt.compute.v2020_06_01.models.LastPatchInstallationSummary + """ + + _attribute_map = { + 'available_patch_summary': {'key': 'availablePatchSummary', 'type': 'AvailablePatchSummary'}, + 'last_patch_installation_summary': {'key': 'lastPatchInstallationSummary', 'type': 'LastPatchInstallationSummary'}, + } + + def __init__(self, *, available_patch_summary=None, last_patch_installation_summary=None, **kwargs) -> None: + super(VirtualMachinePatchStatus, self).__init__(**kwargs) + self.available_patch_summary = available_patch_summary + self.last_patch_installation_summary = last_patch_installation_summary class VirtualMachineReimageParameters(Model): @@ -5057,6 +5279,10 @@ class VirtualMachineScaleSetExtension(SubResourceReadOnly): deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. :type auto_upgrade_minor_version: bool + :param enable_automatic_upgrade: Indicates whether the extension should be + automatically upgraded by the platform if there is a newer version of the + extension available. + :type enable_automatic_upgrade: bool :param settings: Json formatted public settings for the extension. :type settings: object :param protected_settings: The extension can contain either @@ -5086,13 +5312,14 @@ class VirtualMachineScaleSetExtension(SubResourceReadOnly): 'type1': {'key': 'properties.type', 'type': 'str'}, 'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'}, 'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'}, + 'enable_automatic_upgrade': {'key': 'properties.enableAutomaticUpgrade', 'type': 'bool'}, 'settings': {'key': 'properties.settings', 'type': 'object'}, 'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'provision_after_extensions': {'key': 'properties.provisionAfterExtensions', 'type': '[str]'}, } - def __init__(self, *, name: str=None, force_update_tag: str=None, publisher: str=None, type1: str=None, type_handler_version: str=None, auto_upgrade_minor_version: bool=None, settings=None, protected_settings=None, provision_after_extensions=None, **kwargs) -> None: + def __init__(self, *, name: str=None, force_update_tag: str=None, publisher: str=None, type1: str=None, type_handler_version: str=None, auto_upgrade_minor_version: bool=None, enable_automatic_upgrade: bool=None, settings=None, protected_settings=None, provision_after_extensions=None, **kwargs) -> None: super(VirtualMachineScaleSetExtension, self).__init__(**kwargs) self.name = name self.type = None @@ -5101,6 +5328,7 @@ def __init__(self, *, name: str=None, force_update_tag: str=None, publisher: str self.type1 = type1 self.type_handler_version = type_handler_version self.auto_upgrade_minor_version = auto_upgrade_minor_version + self.enable_automatic_upgrade = enable_automatic_upgrade self.settings = settings self.protected_settings = protected_settings self.provisioning_state = None @@ -5161,6 +5389,10 @@ class VirtualMachineScaleSetExtensionUpdate(SubResourceReadOnly): deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. :type auto_upgrade_minor_version: bool + :param enable_automatic_upgrade: Indicates whether the extension should be + automatically upgraded by the platform if there is a newer version of the + extension available. + :type enable_automatic_upgrade: bool :param settings: Json formatted public settings for the extension. :type settings: object :param protected_settings: The extension can contain either @@ -5191,13 +5423,14 @@ class VirtualMachineScaleSetExtensionUpdate(SubResourceReadOnly): 'type1': {'key': 'properties.type', 'type': 'str'}, 'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'}, 'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'}, + 'enable_automatic_upgrade': {'key': 'properties.enableAutomaticUpgrade', 'type': 'bool'}, 'settings': {'key': 'properties.settings', 'type': 'object'}, 'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'provision_after_extensions': {'key': 'properties.provisionAfterExtensions', 'type': '[str]'}, } - def __init__(self, *, force_update_tag: str=None, publisher: str=None, type1: str=None, type_handler_version: str=None, auto_upgrade_minor_version: bool=None, settings=None, protected_settings=None, provision_after_extensions=None, **kwargs) -> None: + def __init__(self, *, force_update_tag: str=None, publisher: str=None, type1: str=None, type_handler_version: str=None, auto_upgrade_minor_version: bool=None, enable_automatic_upgrade: bool=None, settings=None, protected_settings=None, provision_after_extensions=None, **kwargs) -> None: super(VirtualMachineScaleSetExtensionUpdate, self).__init__(**kwargs) self.name = None self.type = None @@ -5206,6 +5439,7 @@ def __init__(self, *, force_update_tag: str=None, publisher: str=None, type1: st self.type1 = type1 self.type_handler_version = type_handler_version self.auto_upgrade_minor_version = auto_upgrade_minor_version + self.enable_automatic_upgrade = enable_automatic_upgrade self.settings = settings self.protected_settings = protected_settings self.provisioning_state = None diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/operations/_virtual_machine_scale_set_vms_operations.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/operations/_virtual_machine_scale_set_vms_operations.py index 29b7cf089ca1..4f51e73d57dd 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/operations/_virtual_machine_scale_set_vms_operations.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/operations/_virtual_machine_scale_set_vms_operations.py @@ -1307,8 +1307,7 @@ def _simulate_eviction_initial( def simulate_eviction( self, resource_group_name, vm_scale_set_name, instance_id, custom_headers=None, raw=False, polling=True, **operation_config): """The operation to simulate the eviction of spot virtual machine in a VM - scale set. The eviction will occur within 30 minutes of calling the - API. + scale set. :param resource_group_name: The name of the resource group. :type resource_group_name: str diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/operations/_virtual_machines_operations.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/operations/_virtual_machines_operations.py index 3ba00c70c08d..9950fd978ffe 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/operations/_virtual_machines_operations.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/operations/_virtual_machines_operations.py @@ -1796,8 +1796,7 @@ def _simulate_eviction_initial( def simulate_eviction( self, resource_group_name, vm_name, custom_headers=None, raw=False, polling=True, **operation_config): - """The operation to simulate the eviction of spot virtual machine. The - eviction will occur within 30 minutes of calling the API. + """The operation to simulate the eviction of spot virtual machine. :param resource_group_name: The name of the resource group. :type resource_group_name: str diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/__init__.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/__init__.py new file mode 100644 index 000000000000..a7bc1ce68e37 --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/__init__.py @@ -0,0 +1,19 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._configuration import ComputeManagementClientConfiguration +from ._compute_management_client import ComputeManagementClient +__all__ = ['ComputeManagementClient', 'ComputeManagementClientConfiguration'] + +from .version import VERSION + +__version__ = VERSION + diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/_compute_management_client.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/_compute_management_client.py new file mode 100644 index 000000000000..dfd5881f7868 --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/_compute_management_client.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.service_client import SDKClient +from msrest import Serializer, Deserializer + +from ._configuration import ComputeManagementClientConfiguration +from .operations import DisksOperations +from .operations import SnapshotsOperations +from .operations import DiskEncryptionSetsOperations +from .operations import DiskAccessesOperations +from . import models + + +class ComputeManagementClient(SDKClient): + """Compute Client + + :ivar config: Configuration for client. + :vartype config: ComputeManagementClientConfiguration + + :ivar disks: Disks operations + :vartype disks: azure.mgmt.compute.v2020_06_30.operations.DisksOperations + :ivar snapshots: Snapshots operations + :vartype snapshots: azure.mgmt.compute.v2020_06_30.operations.SnapshotsOperations + :ivar disk_encryption_sets: DiskEncryptionSets operations + :vartype disk_encryption_sets: azure.mgmt.compute.v2020_06_30.operations.DiskEncryptionSetsOperations + :ivar disk_accesses: DiskAccesses operations + :vartype disk_accesses: azure.mgmt.compute.v2020_06_30.operations.DiskAccessesOperations + + :param credentials: Credentials needed for the client to connect to Azure. + :type credentials: :mod:`A msrestazure Credentials + object` + :param subscription_id: Subscription credentials which uniquely identify + Microsoft Azure subscription. The subscription ID forms part of the URI + for every service call. + :type subscription_id: str + :param str base_url: Service URL + """ + + def __init__( + self, credentials, subscription_id, base_url=None): + + self.config = ComputeManagementClientConfiguration(credentials, subscription_id, base_url) + super(ComputeManagementClient, self).__init__(self.config.credentials, self.config) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self.api_version = '2020-06-30' + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.disks = DisksOperations( + self._client, self.config, self._serialize, self._deserialize) + self.snapshots = SnapshotsOperations( + self._client, self.config, self._serialize, self._deserialize) + self.disk_encryption_sets = DiskEncryptionSetsOperations( + self._client, self.config, self._serialize, self._deserialize) + self.disk_accesses = DiskAccessesOperations( + self._client, self.config, self._serialize, self._deserialize) diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/_configuration.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/_configuration.py new file mode 100644 index 000000000000..0fcf5984e2ca --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/_configuration.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +from msrestazure import AzureConfiguration + +from .version import VERSION + + +class ComputeManagementClientConfiguration(AzureConfiguration): + """Configuration for ComputeManagementClient + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credentials: Credentials needed for the client to connect to Azure. + :type credentials: :mod:`A msrestazure Credentials + object` + :param subscription_id: Subscription credentials which uniquely identify + Microsoft Azure subscription. The subscription ID forms part of the URI + for every service call. + :type subscription_id: str + :param str base_url: Service URL + """ + + def __init__( + self, credentials, subscription_id, base_url=None): + + if credentials is None: + raise ValueError("Parameter 'credentials' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + if not base_url: + base_url = 'https://management.azure.com' + + super(ComputeManagementClientConfiguration, self).__init__(base_url) + + # Starting Autorest.Python 4.0.64, make connection pool activated by default + self.keep_alive = True + + self.add_user_agent('azure-mgmt-compute/{}'.format(VERSION)) + self.add_user_agent('Azure-SDK-For-Python') + + self.credentials = credentials + self.subscription_id = subscription_id diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/models/__init__.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/models/__init__.py new file mode 100644 index 000000000000..1061aaa30ef7 --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/models/__init__.py @@ -0,0 +1,147 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +try: + from ._models_py3 import AccessUri + from ._models_py3 import ApiError + from ._models_py3 import ApiErrorBase + from ._models_py3 import CreationData + from ._models_py3 import Disk + from ._models_py3 import DiskAccess + from ._models_py3 import DiskAccessUpdate + from ._models_py3 import DiskEncryptionSet + from ._models_py3 import DiskEncryptionSetUpdate + from ._models_py3 import DiskSku + from ._models_py3 import DiskUpdate + from ._models_py3 import Encryption + from ._models_py3 import EncryptionSetIdentity + from ._models_py3 import EncryptionSettingsCollection + from ._models_py3 import EncryptionSettingsElement + from ._models_py3 import GrantAccessData + from ._models_py3 import ImageDiskReference + from ._models_py3 import InnerError + from ._models_py3 import KeyVaultAndKeyReference + from ._models_py3 import KeyVaultAndSecretReference + from ._models_py3 import PrivateEndpoint + from ._models_py3 import PrivateEndpointConnection + from ._models_py3 import PrivateLinkResource + from ._models_py3 import PrivateLinkResourceListResult + from ._models_py3 import PrivateLinkServiceConnectionState + from ._models_py3 import Resource + from ._models_py3 import ShareInfoElement + from ._models_py3 import Snapshot + from ._models_py3 import SnapshotSku + from ._models_py3 import SnapshotUpdate + from ._models_py3 import SourceVault +except (SyntaxError, ImportError): + from ._models import AccessUri + from ._models import ApiError + from ._models import ApiErrorBase + from ._models import CreationData + from ._models import Disk + from ._models import DiskAccess + from ._models import DiskAccessUpdate + from ._models import DiskEncryptionSet + from ._models import DiskEncryptionSetUpdate + from ._models import DiskSku + from ._models import DiskUpdate + from ._models import Encryption + from ._models import EncryptionSetIdentity + from ._models import EncryptionSettingsCollection + from ._models import EncryptionSettingsElement + from ._models import GrantAccessData + from ._models import ImageDiskReference + from ._models import InnerError + from ._models import KeyVaultAndKeyReference + from ._models import KeyVaultAndSecretReference + from ._models import PrivateEndpoint + from ._models import PrivateEndpointConnection + from ._models import PrivateLinkResource + from ._models import PrivateLinkResourceListResult + from ._models import PrivateLinkServiceConnectionState + from ._models import Resource + from ._models import ShareInfoElement + from ._models import Snapshot + from ._models import SnapshotSku + from ._models import SnapshotUpdate + from ._models import SourceVault +from ._paged_models import DiskAccessPaged +from ._paged_models import DiskEncryptionSetPaged +from ._paged_models import DiskPaged +from ._paged_models import SnapshotPaged +from ._paged_models import StrPaged +from ._compute_management_client_enums import ( + DiskStorageAccountTypes, + OperatingSystemTypes, + HyperVGeneration, + DiskCreateOption, + DiskState, + EncryptionType, + NetworkAccessPolicy, + SnapshotStorageAccountTypes, + DiskEncryptionSetType, + AccessLevel, + DiskEncryptionSetIdentityType, + PrivateEndpointServiceConnectionStatus, + PrivateEndpointConnectionProvisioningState, +) + +__all__ = [ + 'AccessUri', + 'ApiError', + 'ApiErrorBase', + 'CreationData', + 'Disk', + 'DiskAccess', + 'DiskAccessUpdate', + 'DiskEncryptionSet', + 'DiskEncryptionSetUpdate', + 'DiskSku', + 'DiskUpdate', + 'Encryption', + 'EncryptionSetIdentity', + 'EncryptionSettingsCollection', + 'EncryptionSettingsElement', + 'GrantAccessData', + 'ImageDiskReference', + 'InnerError', + 'KeyVaultAndKeyReference', + 'KeyVaultAndSecretReference', + 'PrivateEndpoint', + 'PrivateEndpointConnection', + 'PrivateLinkResource', + 'PrivateLinkResourceListResult', + 'PrivateLinkServiceConnectionState', + 'Resource', + 'ShareInfoElement', + 'Snapshot', + 'SnapshotSku', + 'SnapshotUpdate', + 'SourceVault', + 'DiskPaged', + 'SnapshotPaged', + 'DiskEncryptionSetPaged', + 'StrPaged', + 'DiskAccessPaged', + 'DiskStorageAccountTypes', + 'OperatingSystemTypes', + 'HyperVGeneration', + 'DiskCreateOption', + 'DiskState', + 'EncryptionType', + 'NetworkAccessPolicy', + 'SnapshotStorageAccountTypes', + 'DiskEncryptionSetType', + 'AccessLevel', + 'DiskEncryptionSetIdentityType', + 'PrivateEndpointServiceConnectionStatus', + 'PrivateEndpointConnectionProvisioningState', +] diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/models/_compute_management_client_enums.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/models/_compute_management_client_enums.py new file mode 100644 index 000000000000..516825ceed2a --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/models/_compute_management_client_enums.py @@ -0,0 +1,107 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum + + +class DiskStorageAccountTypes(str, Enum): + + standard_lrs = "Standard_LRS" #: Standard HDD locally redundant storage. Best for backup, non-critical, and infrequent access. + premium_lrs = "Premium_LRS" #: Premium SSD locally redundant storage. Best for production and performance sensitive workloads. + standard_ssd_lrs = "StandardSSD_LRS" #: Standard SSD locally redundant storage. Best for web servers, lightly used enterprise applications and dev/test. + ultra_ssd_lrs = "UltraSSD_LRS" #: Ultra SSD locally redundant storage. Best for IO-intensive workloads such as SAP HANA, top tier databases (for example, SQL, Oracle), and other transaction-heavy workloads. + + +class OperatingSystemTypes(str, Enum): + + windows = "Windows" + linux = "Linux" + + +class HyperVGeneration(str, Enum): + + v1 = "V1" + v2 = "V2" + + +class DiskCreateOption(str, Enum): + + empty = "Empty" #: Create an empty data disk of a size given by diskSizeGB. + attach = "Attach" #: Disk will be attached to a VM. + from_image = "FromImage" #: Create a new disk from a platform image specified by the given imageReference or galleryImageReference. + import_enum = "Import" #: Create a disk by importing from a blob specified by a sourceUri in a storage account specified by storageAccountId. + copy = "Copy" #: Create a new disk or snapshot by copying from a disk or snapshot specified by the given sourceResourceId. + restore = "Restore" #: Create a new disk by copying from a backup recovery point. + upload = "Upload" #: Create a new disk by obtaining a write token and using it to directly upload the contents of the disk. + + +class DiskState(str, Enum): + + unattached = "Unattached" #: The disk is not being used and can be attached to a VM. + attached = "Attached" #: The disk is currently mounted to a running VM. + reserved = "Reserved" #: The disk is mounted to a stopped-deallocated VM + active_sas = "ActiveSAS" #: The disk currently has an Active SAS Uri associated with it. + ready_to_upload = "ReadyToUpload" #: A disk is ready to be created by upload by requesting a write token. + active_upload = "ActiveUpload" #: A disk is created for upload and a write token has been issued for uploading to it. + + +class EncryptionType(str, Enum): + + encryption_at_rest_with_platform_key = "EncryptionAtRestWithPlatformKey" #: Disk is encrypted at rest with Platform managed key. It is the default encryption type. This is not a valid encryption type for disk encryption sets. + encryption_at_rest_with_customer_key = "EncryptionAtRestWithCustomerKey" #: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. + encryption_at_rest_with_platform_and_customer_keys = "EncryptionAtRestWithPlatformAndCustomerKeys" #: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed. + + +class NetworkAccessPolicy(str, Enum): + + allow_all = "AllowAll" #: The disk can be exported or uploaded to from any network. + allow_private = "AllowPrivate" #: The disk can be exported or uploaded to using a DiskAccess resource's private endpoints. + deny_all = "DenyAll" #: The disk cannot be exported. + + +class SnapshotStorageAccountTypes(str, Enum): + + standard_lrs = "Standard_LRS" #: Standard HDD locally redundant storage + premium_lrs = "Premium_LRS" #: Premium SSD locally redundant storage + standard_zrs = "Standard_ZRS" #: Standard zone redundant storage + + +class DiskEncryptionSetType(str, Enum): + + encryption_at_rest_with_customer_key = "EncryptionAtRestWithCustomerKey" #: Resource using diskEncryptionSet would be encrypted at rest with Customer managed key that can be changed and revoked by a customer. + encryption_at_rest_with_platform_and_customer_keys = "EncryptionAtRestWithPlatformAndCustomerKeys" #: Resource using diskEncryptionSet would be encrypted at rest with two layers of encryption. One of the keys is Customer managed and the other key is Platform managed. + + +class AccessLevel(str, Enum): + + none = "None" + read = "Read" + write = "Write" + + +class DiskEncryptionSetIdentityType(str, Enum): + + system_assigned = "SystemAssigned" + + +class PrivateEndpointServiceConnectionStatus(str, Enum): + + pending = "Pending" + approved = "Approved" + rejected = "Rejected" + + +class PrivateEndpointConnectionProvisioningState(str, Enum): + + succeeded = "Succeeded" + creating = "Creating" + deleting = "Deleting" + failed = "Failed" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/models/_models.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/models/_models.py new file mode 100644 index 000000000000..9d82fc111254 --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/models/_models.py @@ -0,0 +1,1403 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from msrest.exceptions import HttpOperationError + + +class AccessUri(Model): + """A disk access SAS uri. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar access_sas: A SAS uri for accessing a disk. + :vartype access_sas: str + """ + + _validation = { + 'access_sas': {'readonly': True}, + } + + _attribute_map = { + 'access_sas': {'key': 'accessSAS', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(AccessUri, self).__init__(**kwargs) + self.access_sas = None + + +class ApiError(Model): + """Api error. + + :param details: The Api error details + :type details: list[~azure.mgmt.compute.v2020_06_30.models.ApiErrorBase] + :param innererror: The Api inner error + :type innererror: ~azure.mgmt.compute.v2020_06_30.models.InnerError + :param code: The error code. + :type code: str + :param target: The target of the particular error. + :type target: str + :param message: The error message. + :type message: str + """ + + _attribute_map = { + 'details': {'key': 'details', 'type': '[ApiErrorBase]'}, + 'innererror': {'key': 'innererror', 'type': 'InnerError'}, + 'code': {'key': 'code', 'type': 'str'}, + 'target': {'key': 'target', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ApiError, self).__init__(**kwargs) + self.details = kwargs.get('details', None) + self.innererror = kwargs.get('innererror', None) + self.code = kwargs.get('code', None) + self.target = kwargs.get('target', None) + self.message = kwargs.get('message', None) + + +class ApiErrorBase(Model): + """Api error base. + + :param code: The error code. + :type code: str + :param target: The target of the particular error. + :type target: str + :param message: The error message. + :type message: str + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'target': {'key': 'target', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ApiErrorBase, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.target = kwargs.get('target', None) + self.message = kwargs.get('message', None) + + +class CloudError(Model): + """An error response from the Compute service. + + :param error: + :type error: ~azure.mgmt.compute.v2020_06_30.models.ApiError + """ + + _attribute_map = { + 'error': {'key': 'error', 'type': 'ApiError'}, + } + + def __init__(self, **kwargs): + super(CloudError, self).__init__(**kwargs) + self.error = kwargs.get('error', None) + + +class CloudErrorException(HttpOperationError): + """Server responsed with exception of type: 'CloudError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(CloudErrorException, self).__init__(deserialize, response, 'CloudError', *args) + + +class CreationData(Model): + """Data used when creating a disk. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :param create_option: Required. This enumerates the possible sources of a + disk's creation. Possible values include: 'Empty', 'Attach', 'FromImage', + 'Import', 'Copy', 'Restore', 'Upload' + :type create_option: str or + ~azure.mgmt.compute.v2020_06_30.models.DiskCreateOption + :param storage_account_id: Required if createOption is Import. The Azure + Resource Manager identifier of the storage account containing the blob to + import as a disk. + :type storage_account_id: str + :param image_reference: Disk source information. + :type image_reference: + ~azure.mgmt.compute.v2020_06_30.models.ImageDiskReference + :param gallery_image_reference: Required if creating from a Gallery Image. + The id of the ImageDiskReference will be the ARM id of the shared galley + image version from which to create a disk. + :type gallery_image_reference: + ~azure.mgmt.compute.v2020_06_30.models.ImageDiskReference + :param source_uri: If createOption is Import, this is the URI of a blob to + be imported into a managed disk. + :type source_uri: str + :param source_resource_id: If createOption is Copy, this is the ARM id of + the source snapshot or disk. + :type source_resource_id: str + :ivar source_unique_id: If this field is set, this is the unique id + identifying the source of this resource. + :vartype source_unique_id: str + :param upload_size_bytes: If createOption is Upload, this is the size of + the contents of the upload including the VHD footer. This value should be + between 20972032 (20 MiB + 512 bytes for the VHD footer) and + 35183298347520 bytes (32 TiB + 512 bytes for the VHD footer). + :type upload_size_bytes: long + :param logical_sector_size: Logical sector size in bytes for Ultra disks. + Supported values are 512 ad 4096. 4096 is the default. + :type logical_sector_size: int + """ + + _validation = { + 'create_option': {'required': True}, + 'source_unique_id': {'readonly': True}, + } + + _attribute_map = { + 'create_option': {'key': 'createOption', 'type': 'str'}, + 'storage_account_id': {'key': 'storageAccountId', 'type': 'str'}, + 'image_reference': {'key': 'imageReference', 'type': 'ImageDiskReference'}, + 'gallery_image_reference': {'key': 'galleryImageReference', 'type': 'ImageDiskReference'}, + 'source_uri': {'key': 'sourceUri', 'type': 'str'}, + 'source_resource_id': {'key': 'sourceResourceId', 'type': 'str'}, + 'source_unique_id': {'key': 'sourceUniqueId', 'type': 'str'}, + 'upload_size_bytes': {'key': 'uploadSizeBytes', 'type': 'long'}, + 'logical_sector_size': {'key': 'logicalSectorSize', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(CreationData, self).__init__(**kwargs) + self.create_option = kwargs.get('create_option', None) + self.storage_account_id = kwargs.get('storage_account_id', None) + self.image_reference = kwargs.get('image_reference', None) + self.gallery_image_reference = kwargs.get('gallery_image_reference', None) + self.source_uri = kwargs.get('source_uri', None) + self.source_resource_id = kwargs.get('source_resource_id', None) + self.source_unique_id = None + self.upload_size_bytes = kwargs.get('upload_size_bytes', None) + self.logical_sector_size = kwargs.get('logical_sector_size', None) + + +class Resource(Model): + """The Resource model definition. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: Resource Id + :vartype id: str + :ivar name: Resource name + :vartype name: str + :ivar type: Resource type + :vartype type: str + :param location: Required. Resource location + :type location: str + :param tags: Resource tags + :type tags: dict[str, str] + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'location': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + } + + def __init__(self, **kwargs): + super(Resource, self).__init__(**kwargs) + self.id = None + self.name = None + self.type = None + self.location = kwargs.get('location', None) + self.tags = kwargs.get('tags', None) + + +class Disk(Resource): + """Disk resource. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: Resource Id + :vartype id: str + :ivar name: Resource name + :vartype name: str + :ivar type: Resource type + :vartype type: str + :param location: Required. Resource location + :type location: str + :param tags: Resource tags + :type tags: dict[str, str] + :ivar managed_by: A relative URI containing the ID of the VM that has the + disk attached. + :vartype managed_by: str + :ivar managed_by_extended: List of relative URIs containing the IDs of the + VMs that have the disk attached. maxShares should be set to a value + greater than one for disks to allow attaching them to multiple VMs. + :vartype managed_by_extended: list[str] + :param sku: + :type sku: ~azure.mgmt.compute.v2020_06_30.models.DiskSku + :param zones: The Logical zone list for Disk. + :type zones: list[str] + :ivar time_created: The time when the disk was created. + :vartype time_created: datetime + :param os_type: The Operating System type. Possible values include: + 'Windows', 'Linux' + :type os_type: str or + ~azure.mgmt.compute.v2020_06_30.models.OperatingSystemTypes + :param hyper_vgeneration: The hypervisor generation of the Virtual + Machine. Applicable to OS disks only. Possible values include: 'V1', 'V2' + :type hyper_vgeneration: str or + ~azure.mgmt.compute.v2020_06_30.models.HyperVGeneration + :param creation_data: Required. Disk source information. CreationData + information cannot be changed after the disk has been created. + :type creation_data: ~azure.mgmt.compute.v2020_06_30.models.CreationData + :param disk_size_gb: If creationData.createOption is Empty, this field is + mandatory and it indicates the size of the disk to create. If this field + is present for updates or creation with other options, it indicates a + resize. Resizes are only allowed if the disk is not attached to a running + VM, and can only increase the disk's size. + :type disk_size_gb: int + :ivar disk_size_bytes: The size of the disk in bytes. This field is read + only. + :vartype disk_size_bytes: long + :ivar unique_id: Unique Guid identifying the resource. + :vartype unique_id: str + :param encryption_settings_collection: Encryption settings collection used + for Azure Disk Encryption, can contain multiple encryption settings per + disk or snapshot. + :type encryption_settings_collection: + ~azure.mgmt.compute.v2020_06_30.models.EncryptionSettingsCollection + :ivar provisioning_state: The disk provisioning state. + :vartype provisioning_state: str + :param disk_iops_read_write: The number of IOPS allowed for this disk; + only settable for UltraSSD disks. One operation can transfer between 4k + and 256k bytes. + :type disk_iops_read_write: long + :param disk_mbps_read_write: The bandwidth allowed for this disk; only + settable for UltraSSD disks. MBps means millions of bytes per second - MB + here uses the ISO notation, of powers of 10. + :type disk_mbps_read_write: long + :param disk_iops_read_only: The total number of IOPS that will be allowed + across all VMs mounting the shared disk as ReadOnly. One operation can + transfer between 4k and 256k bytes. + :type disk_iops_read_only: long + :param disk_mbps_read_only: The total throughput (MBps) that will be + allowed across all VMs mounting the shared disk as ReadOnly. MBps means + millions of bytes per second - MB here uses the ISO notation, of powers of + 10. + :type disk_mbps_read_only: long + :param disk_state: The state of the disk. Possible values include: + 'Unattached', 'Attached', 'Reserved', 'ActiveSAS', 'ReadyToUpload', + 'ActiveUpload' + :type disk_state: str or ~azure.mgmt.compute.v2020_06_30.models.DiskState + :param encryption: Encryption property can be used to encrypt data at rest + with customer managed keys or platform managed keys. + :type encryption: ~azure.mgmt.compute.v2020_06_30.models.Encryption + :param max_shares: The maximum number of VMs that can attach to the disk + at the same time. Value greater than one indicates a disk that can be + mounted on multiple VMs at the same time. + :type max_shares: int + :ivar share_info: Details of the list of all VMs that have the disk + attached. maxShares should be set to a value greater than one for disks to + allow attaching them to multiple VMs. + :vartype share_info: + list[~azure.mgmt.compute.v2020_06_30.models.ShareInfoElement] + :param network_access_policy: Possible values include: 'AllowAll', + 'AllowPrivate', 'DenyAll' + :type network_access_policy: str or + ~azure.mgmt.compute.v2020_06_30.models.NetworkAccessPolicy + :param disk_access_id: ARM id of the DiskAccess resource for using private + endpoints on disks. + :type disk_access_id: str + :param tier: Performance tier of the disk (e.g, P4, S10) as described + here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/. + Does not apply to Ultra disks. + :type tier: str + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'location': {'required': True}, + 'managed_by': {'readonly': True}, + 'managed_by_extended': {'readonly': True}, + 'time_created': {'readonly': True}, + 'creation_data': {'required': True}, + 'disk_size_bytes': {'readonly': True}, + 'unique_id': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + 'share_info': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'managed_by': {'key': 'managedBy', 'type': 'str'}, + 'managed_by_extended': {'key': 'managedByExtended', 'type': '[str]'}, + 'sku': {'key': 'sku', 'type': 'DiskSku'}, + 'zones': {'key': 'zones', 'type': '[str]'}, + 'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'}, + 'os_type': {'key': 'properties.osType', 'type': 'OperatingSystemTypes'}, + 'hyper_vgeneration': {'key': 'properties.hyperVGeneration', 'type': 'str'}, + 'creation_data': {'key': 'properties.creationData', 'type': 'CreationData'}, + 'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'}, + 'disk_size_bytes': {'key': 'properties.diskSizeBytes', 'type': 'long'}, + 'unique_id': {'key': 'properties.uniqueId', 'type': 'str'}, + 'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'disk_iops_read_write': {'key': 'properties.diskIOPSReadWrite', 'type': 'long'}, + 'disk_mbps_read_write': {'key': 'properties.diskMBpsReadWrite', 'type': 'long'}, + 'disk_iops_read_only': {'key': 'properties.diskIOPSReadOnly', 'type': 'long'}, + 'disk_mbps_read_only': {'key': 'properties.diskMBpsReadOnly', 'type': 'long'}, + 'disk_state': {'key': 'properties.diskState', 'type': 'str'}, + 'encryption': {'key': 'properties.encryption', 'type': 'Encryption'}, + 'max_shares': {'key': 'properties.maxShares', 'type': 'int'}, + 'share_info': {'key': 'properties.shareInfo', 'type': '[ShareInfoElement]'}, + 'network_access_policy': {'key': 'properties.networkAccessPolicy', 'type': 'str'}, + 'disk_access_id': {'key': 'properties.diskAccessId', 'type': 'str'}, + 'tier': {'key': 'properties.tier', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(Disk, self).__init__(**kwargs) + self.managed_by = None + self.managed_by_extended = None + self.sku = kwargs.get('sku', None) + self.zones = kwargs.get('zones', None) + self.time_created = None + self.os_type = kwargs.get('os_type', None) + self.hyper_vgeneration = kwargs.get('hyper_vgeneration', None) + self.creation_data = kwargs.get('creation_data', None) + self.disk_size_gb = kwargs.get('disk_size_gb', None) + self.disk_size_bytes = None + self.unique_id = None + self.encryption_settings_collection = kwargs.get('encryption_settings_collection', None) + self.provisioning_state = None + self.disk_iops_read_write = kwargs.get('disk_iops_read_write', None) + self.disk_mbps_read_write = kwargs.get('disk_mbps_read_write', None) + self.disk_iops_read_only = kwargs.get('disk_iops_read_only', None) + self.disk_mbps_read_only = kwargs.get('disk_mbps_read_only', None) + self.disk_state = kwargs.get('disk_state', None) + self.encryption = kwargs.get('encryption', None) + self.max_shares = kwargs.get('max_shares', None) + self.share_info = None + self.network_access_policy = kwargs.get('network_access_policy', None) + self.disk_access_id = kwargs.get('disk_access_id', None) + self.tier = kwargs.get('tier', None) + + +class DiskAccess(Resource): + """disk access resource. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: Resource Id + :vartype id: str + :ivar name: Resource name + :vartype name: str + :ivar type: Resource type + :vartype type: str + :param location: Required. Resource location + :type location: str + :param tags: Resource tags + :type tags: dict[str, str] + :ivar private_endpoint_connections: A readonly collection of private + endpoint connections created on the disk. Currently only one endpoint + connection is supported. + :vartype private_endpoint_connections: + list[~azure.mgmt.compute.v2020_06_30.models.PrivateEndpointConnection] + :ivar provisioning_state: The disk access resource provisioning state. + :vartype provisioning_state: str + :ivar time_created: The time when the disk access was created. + :vartype time_created: datetime + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'location': {'required': True}, + 'private_endpoint_connections': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + 'time_created': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(DiskAccess, self).__init__(**kwargs) + self.private_endpoint_connections = None + self.provisioning_state = None + self.time_created = None + + +class DiskAccessUpdate(Model): + """Used for updating a disk access resource. + + :param tags: Resource tags + :type tags: dict[str, str] + """ + + _attribute_map = { + 'tags': {'key': 'tags', 'type': '{str}'}, + } + + def __init__(self, **kwargs): + super(DiskAccessUpdate, self).__init__(**kwargs) + self.tags = kwargs.get('tags', None) + + +class DiskEncryptionSet(Resource): + """disk encryption set resource. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: Resource Id + :vartype id: str + :ivar name: Resource name + :vartype name: str + :ivar type: Resource type + :vartype type: str + :param location: Required. Resource location + :type location: str + :param tags: Resource tags + :type tags: dict[str, str] + :param identity: + :type identity: + ~azure.mgmt.compute.v2020_06_30.models.EncryptionSetIdentity + :param encryption_type: Possible values include: + 'EncryptionAtRestWithCustomerKey', + 'EncryptionAtRestWithPlatformAndCustomerKeys' + :type encryption_type: str or + ~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSetType + :param active_key: The key vault key which is currently used by this disk + encryption set. + :type active_key: + ~azure.mgmt.compute.v2020_06_30.models.KeyVaultAndKeyReference + :ivar previous_keys: A readonly collection of key vault keys previously + used by this disk encryption set while a key rotation is in progress. It + will be empty if there is no ongoing key rotation. + :vartype previous_keys: + list[~azure.mgmt.compute.v2020_06_30.models.KeyVaultAndKeyReference] + :ivar provisioning_state: The disk encryption set provisioning state. + :vartype provisioning_state: str + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'location': {'required': True}, + 'previous_keys': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'identity': {'key': 'identity', 'type': 'EncryptionSetIdentity'}, + 'encryption_type': {'key': 'properties.encryptionType', 'type': 'str'}, + 'active_key': {'key': 'properties.activeKey', 'type': 'KeyVaultAndKeyReference'}, + 'previous_keys': {'key': 'properties.previousKeys', 'type': '[KeyVaultAndKeyReference]'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(DiskEncryptionSet, self).__init__(**kwargs) + self.identity = kwargs.get('identity', None) + self.encryption_type = kwargs.get('encryption_type', None) + self.active_key = kwargs.get('active_key', None) + self.previous_keys = None + self.provisioning_state = None + + +class DiskEncryptionSetUpdate(Model): + """disk encryption set update resource. + + :param encryption_type: Possible values include: + 'EncryptionAtRestWithCustomerKey', + 'EncryptionAtRestWithPlatformAndCustomerKeys' + :type encryption_type: str or + ~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSetType + :param active_key: + :type active_key: + ~azure.mgmt.compute.v2020_06_30.models.KeyVaultAndKeyReference + :param tags: Resource tags + :type tags: dict[str, str] + """ + + _attribute_map = { + 'encryption_type': {'key': 'properties.encryptionType', 'type': 'str'}, + 'active_key': {'key': 'properties.activeKey', 'type': 'KeyVaultAndKeyReference'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + } + + def __init__(self, **kwargs): + super(DiskEncryptionSetUpdate, self).__init__(**kwargs) + self.encryption_type = kwargs.get('encryption_type', None) + self.active_key = kwargs.get('active_key', None) + self.tags = kwargs.get('tags', None) + + +class DiskSku(Model): + """The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or + UltraSSD_LRS. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :param name: The sku name. Possible values include: 'Standard_LRS', + 'Premium_LRS', 'StandardSSD_LRS', 'UltraSSD_LRS' + :type name: str or + ~azure.mgmt.compute.v2020_06_30.models.DiskStorageAccountTypes + :ivar tier: The sku tier. + :vartype tier: str + """ + + _validation = { + 'tier': {'readonly': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'tier': {'key': 'tier', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(DiskSku, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.tier = None + + +class DiskUpdate(Model): + """Disk update resource. + + :param os_type: the Operating System type. Possible values include: + 'Windows', 'Linux' + :type os_type: str or + ~azure.mgmt.compute.v2020_06_30.models.OperatingSystemTypes + :param disk_size_gb: If creationData.createOption is Empty, this field is + mandatory and it indicates the size of the disk to create. If this field + is present for updates or creation with other options, it indicates a + resize. Resizes are only allowed if the disk is not attached to a running + VM, and can only increase the disk's size. + :type disk_size_gb: int + :param encryption_settings_collection: Encryption settings collection used + be Azure Disk Encryption, can contain multiple encryption settings per + disk or snapshot. + :type encryption_settings_collection: + ~azure.mgmt.compute.v2020_06_30.models.EncryptionSettingsCollection + :param disk_iops_read_write: The number of IOPS allowed for this disk; + only settable for UltraSSD disks. One operation can transfer between 4k + and 256k bytes. + :type disk_iops_read_write: long + :param disk_mbps_read_write: The bandwidth allowed for this disk; only + settable for UltraSSD disks. MBps means millions of bytes per second - MB + here uses the ISO notation, of powers of 10. + :type disk_mbps_read_write: long + :param disk_iops_read_only: The total number of IOPS that will be allowed + across all VMs mounting the shared disk as ReadOnly. One operation can + transfer between 4k and 256k bytes. + :type disk_iops_read_only: long + :param disk_mbps_read_only: The total throughput (MBps) that will be + allowed across all VMs mounting the shared disk as ReadOnly. MBps means + millions of bytes per second - MB here uses the ISO notation, of powers of + 10. + :type disk_mbps_read_only: long + :param max_shares: The maximum number of VMs that can attach to the disk + at the same time. Value greater than one indicates a disk that can be + mounted on multiple VMs at the same time. + :type max_shares: int + :param encryption: Encryption property can be used to encrypt data at rest + with customer managed keys or platform managed keys. + :type encryption: ~azure.mgmt.compute.v2020_06_30.models.Encryption + :param network_access_policy: Possible values include: 'AllowAll', + 'AllowPrivate', 'DenyAll' + :type network_access_policy: str or + ~azure.mgmt.compute.v2020_06_30.models.NetworkAccessPolicy + :param disk_access_id: ARM id of the DiskAccess resource for using private + endpoints on disks. + :type disk_access_id: str + :param tier: Performance tier of the disk (e.g, P4, S10) as described + here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/. + Does not apply to Ultra disks. + :type tier: str + :param tags: Resource tags + :type tags: dict[str, str] + :param sku: + :type sku: ~azure.mgmt.compute.v2020_06_30.models.DiskSku + """ + + _attribute_map = { + 'os_type': {'key': 'properties.osType', 'type': 'OperatingSystemTypes'}, + 'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'}, + 'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'}, + 'disk_iops_read_write': {'key': 'properties.diskIOPSReadWrite', 'type': 'long'}, + 'disk_mbps_read_write': {'key': 'properties.diskMBpsReadWrite', 'type': 'long'}, + 'disk_iops_read_only': {'key': 'properties.diskIOPSReadOnly', 'type': 'long'}, + 'disk_mbps_read_only': {'key': 'properties.diskMBpsReadOnly', 'type': 'long'}, + 'max_shares': {'key': 'properties.maxShares', 'type': 'int'}, + 'encryption': {'key': 'properties.encryption', 'type': 'Encryption'}, + 'network_access_policy': {'key': 'properties.networkAccessPolicy', 'type': 'str'}, + 'disk_access_id': {'key': 'properties.diskAccessId', 'type': 'str'}, + 'tier': {'key': 'properties.tier', 'type': 'str'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'sku': {'key': 'sku', 'type': 'DiskSku'}, + } + + def __init__(self, **kwargs): + super(DiskUpdate, self).__init__(**kwargs) + self.os_type = kwargs.get('os_type', None) + self.disk_size_gb = kwargs.get('disk_size_gb', None) + self.encryption_settings_collection = kwargs.get('encryption_settings_collection', None) + self.disk_iops_read_write = kwargs.get('disk_iops_read_write', None) + self.disk_mbps_read_write = kwargs.get('disk_mbps_read_write', None) + self.disk_iops_read_only = kwargs.get('disk_iops_read_only', None) + self.disk_mbps_read_only = kwargs.get('disk_mbps_read_only', None) + self.max_shares = kwargs.get('max_shares', None) + self.encryption = kwargs.get('encryption', None) + self.network_access_policy = kwargs.get('network_access_policy', None) + self.disk_access_id = kwargs.get('disk_access_id', None) + self.tier = kwargs.get('tier', None) + self.tags = kwargs.get('tags', None) + self.sku = kwargs.get('sku', None) + + +class Encryption(Model): + """Encryption at rest settings for disk or snapshot. + + :param disk_encryption_set_id: ResourceId of the disk encryption set to + use for enabling encryption at rest. + :type disk_encryption_set_id: str + :param type: Possible values include: 'EncryptionAtRestWithPlatformKey', + 'EncryptionAtRestWithCustomerKey', + 'EncryptionAtRestWithPlatformAndCustomerKeys' + :type type: str or ~azure.mgmt.compute.v2020_06_30.models.EncryptionType + """ + + _attribute_map = { + 'disk_encryption_set_id': {'key': 'diskEncryptionSetId', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(Encryption, self).__init__(**kwargs) + self.disk_encryption_set_id = kwargs.get('disk_encryption_set_id', None) + self.type = kwargs.get('type', None) + + +class EncryptionSetIdentity(Model): + """The managed identity for the disk encryption set. It should be given + permission on the key vault before it can be used to encrypt disks. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :param type: The type of Managed Identity used by the DiskEncryptionSet. + Only SystemAssigned is supported. Possible values include: + 'SystemAssigned' + :type type: str or + ~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSetIdentityType + :ivar principal_id: The object id of the Managed Identity Resource. This + will be sent to the RP from ARM via the x-ms-identity-principal-id header + in the PUT request if the resource has a systemAssigned(implicit) identity + :vartype principal_id: str + :ivar tenant_id: The tenant id of the Managed Identity Resource. This will + be sent to the RP from ARM via the x-ms-client-tenant-id header in the PUT + request if the resource has a systemAssigned(implicit) identity + :vartype tenant_id: str + """ + + _validation = { + 'principal_id': {'readonly': True}, + 'tenant_id': {'readonly': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'principal_id': {'key': 'principalId', 'type': 'str'}, + 'tenant_id': {'key': 'tenantId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(EncryptionSetIdentity, self).__init__(**kwargs) + self.type = kwargs.get('type', None) + self.principal_id = None + self.tenant_id = None + + +class EncryptionSettingsCollection(Model): + """Encryption settings for disk or snapshot. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Set this flag to true and provide + DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set + this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to + disable encryption. If EncryptionSettings is null in the request object, + the existing settings remain unchanged. + :type enabled: bool + :param encryption_settings: A collection of encryption settings, one for + each disk volume. + :type encryption_settings: + list[~azure.mgmt.compute.v2020_06_30.models.EncryptionSettingsElement] + :param encryption_settings_version: Describes what type of encryption is + used for the disks. Once this field is set, it cannot be overwritten. + '1.0' corresponds to Azure Disk Encryption with AAD app.'1.1' corresponds + to Azure Disk Encryption. + :type encryption_settings_version: str + """ + + _validation = { + 'enabled': {'required': True}, + } + + _attribute_map = { + 'enabled': {'key': 'enabled', 'type': 'bool'}, + 'encryption_settings': {'key': 'encryptionSettings', 'type': '[EncryptionSettingsElement]'}, + 'encryption_settings_version': {'key': 'encryptionSettingsVersion', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(EncryptionSettingsCollection, self).__init__(**kwargs) + self.enabled = kwargs.get('enabled', None) + self.encryption_settings = kwargs.get('encryption_settings', None) + self.encryption_settings_version = kwargs.get('encryption_settings_version', None) + + +class EncryptionSettingsElement(Model): + """Encryption settings for one disk volume. + + :param disk_encryption_key: Key Vault Secret Url and vault id of the disk + encryption key + :type disk_encryption_key: + ~azure.mgmt.compute.v2020_06_30.models.KeyVaultAndSecretReference + :param key_encryption_key: Key Vault Key Url and vault id of the key + encryption key. KeyEncryptionKey is optional and when provided is used to + unwrap the disk encryption key. + :type key_encryption_key: + ~azure.mgmt.compute.v2020_06_30.models.KeyVaultAndKeyReference + """ + + _attribute_map = { + 'disk_encryption_key': {'key': 'diskEncryptionKey', 'type': 'KeyVaultAndSecretReference'}, + 'key_encryption_key': {'key': 'keyEncryptionKey', 'type': 'KeyVaultAndKeyReference'}, + } + + def __init__(self, **kwargs): + super(EncryptionSettingsElement, self).__init__(**kwargs) + self.disk_encryption_key = kwargs.get('disk_encryption_key', None) + self.key_encryption_key = kwargs.get('key_encryption_key', None) + + +class GrantAccessData(Model): + """Data used for requesting a SAS. + + All required parameters must be populated in order to send to Azure. + + :param access: Required. Possible values include: 'None', 'Read', 'Write' + :type access: str or ~azure.mgmt.compute.v2020_06_30.models.AccessLevel + :param duration_in_seconds: Required. Time duration in seconds until the + SAS access expires. + :type duration_in_seconds: int + """ + + _validation = { + 'access': {'required': True}, + 'duration_in_seconds': {'required': True}, + } + + _attribute_map = { + 'access': {'key': 'access', 'type': 'str'}, + 'duration_in_seconds': {'key': 'durationInSeconds', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(GrantAccessData, self).__init__(**kwargs) + self.access = kwargs.get('access', None) + self.duration_in_seconds = kwargs.get('duration_in_seconds', None) + + +class ImageDiskReference(Model): + """The source image used for creating the disk. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A relative uri containing either a Platform Image + Repository or user image reference. + :type id: str + :param lun: If the disk is created from an image's data disk, this is an + index that indicates which of the data disks in the image to use. For OS + disks, this field is null. + :type lun: int + """ + + _validation = { + 'id': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'lun': {'key': 'lun', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(ImageDiskReference, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.lun = kwargs.get('lun', None) + + +class InnerError(Model): + """Inner error details. + + :param exceptiontype: The exception type. + :type exceptiontype: str + :param errordetail: The internal error message or exception dump. + :type errordetail: str + """ + + _attribute_map = { + 'exceptiontype': {'key': 'exceptiontype', 'type': 'str'}, + 'errordetail': {'key': 'errordetail', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(InnerError, self).__init__(**kwargs) + self.exceptiontype = kwargs.get('exceptiontype', None) + self.errordetail = kwargs.get('errordetail', None) + + +class KeyVaultAndKeyReference(Model): + """Key Vault Key Url and vault id of KeK, KeK is optional and when provided is + used to unwrap the encryptionKey. + + All required parameters must be populated in order to send to Azure. + + :param source_vault: Required. Resource id of the KeyVault containing the + key or secret + :type source_vault: ~azure.mgmt.compute.v2020_06_30.models.SourceVault + :param key_url: Required. Url pointing to a key or secret in KeyVault + :type key_url: str + """ + + _validation = { + 'source_vault': {'required': True}, + 'key_url': {'required': True}, + } + + _attribute_map = { + 'source_vault': {'key': 'sourceVault', 'type': 'SourceVault'}, + 'key_url': {'key': 'keyUrl', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(KeyVaultAndKeyReference, self).__init__(**kwargs) + self.source_vault = kwargs.get('source_vault', None) + self.key_url = kwargs.get('key_url', None) + + +class KeyVaultAndSecretReference(Model): + """Key Vault Secret Url and vault id of the encryption key . + + All required parameters must be populated in order to send to Azure. + + :param source_vault: Required. Resource id of the KeyVault containing the + key or secret + :type source_vault: ~azure.mgmt.compute.v2020_06_30.models.SourceVault + :param secret_url: Required. Url pointing to a key or secret in KeyVault + :type secret_url: str + """ + + _validation = { + 'source_vault': {'required': True}, + 'secret_url': {'required': True}, + } + + _attribute_map = { + 'source_vault': {'key': 'sourceVault', 'type': 'SourceVault'}, + 'secret_url': {'key': 'secretUrl', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(KeyVaultAndSecretReference, self).__init__(**kwargs) + self.source_vault = kwargs.get('source_vault', None) + self.secret_url = kwargs.get('secret_url', None) + + +class PrivateEndpoint(Model): + """The Private Endpoint resource. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar id: The ARM identifier for Private Endpoint + :vartype id: str + """ + + _validation = { + 'id': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(PrivateEndpoint, self).__init__(**kwargs) + self.id = None + + +class PrivateEndpointConnection(Model): + """The Private Endpoint Connection resource. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :param private_endpoint: The resource of private end point. + :type private_endpoint: + ~azure.mgmt.compute.v2020_06_30.models.PrivateEndpoint + :param private_link_service_connection_state: Required. A collection of + information about the state of the connection between DiskAccess and + Virtual Network. + :type private_link_service_connection_state: + ~azure.mgmt.compute.v2020_06_30.models.PrivateLinkServiceConnectionState + :param provisioning_state: The provisioning state of the private endpoint + connection resource. Possible values include: 'Succeeded', 'Creating', + 'Deleting', 'Failed' + :type provisioning_state: str or + ~azure.mgmt.compute.v2020_06_30.models.PrivateEndpointConnectionProvisioningState + :ivar id: private endpoint connection Id + :vartype id: str + :ivar name: private endpoint connection name + :vartype name: str + :ivar type: private endpoint connection type + :vartype type: str + """ + + _validation = { + 'private_link_service_connection_state': {'required': True}, + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + } + + _attribute_map = { + 'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'}, + 'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(PrivateEndpointConnection, self).__init__(**kwargs) + self.private_endpoint = kwargs.get('private_endpoint', None) + self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None) + self.provisioning_state = kwargs.get('provisioning_state', None) + self.id = None + self.name = None + self.type = None + + +class PrivateLinkResource(Model): + """A private link resource. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar group_id: The private link resource group id. + :vartype group_id: str + :ivar required_members: The private link resource required member names. + :vartype required_members: list[str] + :param required_zone_names: The private link resource DNS zone name. + :type required_zone_names: list[str] + :ivar id: private link resource Id + :vartype id: str + :ivar name: private link resource name + :vartype name: str + :ivar type: private link resource type + :vartype type: str + """ + + _validation = { + 'group_id': {'readonly': True}, + 'required_members': {'readonly': True}, + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + } + + _attribute_map = { + 'group_id': {'key': 'properties.groupId', 'type': 'str'}, + 'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'}, + 'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'}, + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(PrivateLinkResource, self).__init__(**kwargs) + self.group_id = None + self.required_members = None + self.required_zone_names = kwargs.get('required_zone_names', None) + self.id = None + self.name = None + self.type = None + + +class PrivateLinkResourceListResult(Model): + """A list of private link resources. + + :param value: Array of private link resources + :type value: + list[~azure.mgmt.compute.v2020_06_30.models.PrivateLinkResource] + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[PrivateLinkResource]'}, + } + + def __init__(self, **kwargs): + super(PrivateLinkResourceListResult, self).__init__(**kwargs) + self.value = kwargs.get('value', None) + + +class PrivateLinkServiceConnectionState(Model): + """A collection of information about the state of the connection between + service consumer and provider. + + :param status: Indicates whether the connection has been + Approved/Rejected/Removed by the owner of the service. Possible values + include: 'Pending', 'Approved', 'Rejected' + :type status: str or + ~azure.mgmt.compute.v2020_06_30.models.PrivateEndpointServiceConnectionStatus + :param description: The reason for approval/rejection of the connection. + :type description: str + :param actions_required: A message indicating if changes on the service + provider require any updates on the consumer. + :type actions_required: str + """ + + _attribute_map = { + 'status': {'key': 'status', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'actions_required': {'key': 'actionsRequired', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(PrivateLinkServiceConnectionState, self).__init__(**kwargs) + self.status = kwargs.get('status', None) + self.description = kwargs.get('description', None) + self.actions_required = kwargs.get('actions_required', None) + + +class ShareInfoElement(Model): + """ShareInfoElement. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar vm_uri: A relative URI containing the ID of the VM that has the disk + attached. + :vartype vm_uri: str + """ + + _validation = { + 'vm_uri': {'readonly': True}, + } + + _attribute_map = { + 'vm_uri': {'key': 'vmUri', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ShareInfoElement, self).__init__(**kwargs) + self.vm_uri = None + + +class Snapshot(Resource): + """Snapshot resource. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: Resource Id + :vartype id: str + :ivar name: Resource name + :vartype name: str + :ivar type: Resource type + :vartype type: str + :param location: Required. Resource location + :type location: str + :param tags: Resource tags + :type tags: dict[str, str] + :ivar managed_by: Unused. Always Null. + :vartype managed_by: str + :param sku: + :type sku: ~azure.mgmt.compute.v2020_06_30.models.SnapshotSku + :ivar time_created: The time when the snapshot was created. + :vartype time_created: datetime + :param os_type: The Operating System type. Possible values include: + 'Windows', 'Linux' + :type os_type: str or + ~azure.mgmt.compute.v2020_06_30.models.OperatingSystemTypes + :param hyper_vgeneration: The hypervisor generation of the Virtual + Machine. Applicable to OS disks only. Possible values include: 'V1', 'V2' + :type hyper_vgeneration: str or + ~azure.mgmt.compute.v2020_06_30.models.HyperVGeneration + :param creation_data: Required. Disk source information. CreationData + information cannot be changed after the disk has been created. + :type creation_data: ~azure.mgmt.compute.v2020_06_30.models.CreationData + :param disk_size_gb: If creationData.createOption is Empty, this field is + mandatory and it indicates the size of the disk to create. If this field + is present for updates or creation with other options, it indicates a + resize. Resizes are only allowed if the disk is not attached to a running + VM, and can only increase the disk's size. + :type disk_size_gb: int + :ivar disk_size_bytes: The size of the disk in bytes. This field is read + only. + :vartype disk_size_bytes: long + :param disk_state: The state of the snapshot. Possible values include: + 'Unattached', 'Attached', 'Reserved', 'ActiveSAS', 'ReadyToUpload', + 'ActiveUpload' + :type disk_state: str or ~azure.mgmt.compute.v2020_06_30.models.DiskState + :ivar unique_id: Unique Guid identifying the resource. + :vartype unique_id: str + :param encryption_settings_collection: Encryption settings collection used + be Azure Disk Encryption, can contain multiple encryption settings per + disk or snapshot. + :type encryption_settings_collection: + ~azure.mgmt.compute.v2020_06_30.models.EncryptionSettingsCollection + :ivar provisioning_state: The disk provisioning state. + :vartype provisioning_state: str + :param incremental: Whether a snapshot is incremental. Incremental + snapshots on the same disk occupy less space than full snapshots and can + be diffed. + :type incremental: bool + :param encryption: Encryption property can be used to encrypt data at rest + with customer managed keys or platform managed keys. + :type encryption: ~azure.mgmt.compute.v2020_06_30.models.Encryption + :param network_access_policy: Possible values include: 'AllowAll', + 'AllowPrivate', 'DenyAll' + :type network_access_policy: str or + ~azure.mgmt.compute.v2020_06_30.models.NetworkAccessPolicy + :param disk_access_id: ARM id of the DiskAccess resource for using private + endpoints on disks. + :type disk_access_id: str + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'location': {'required': True}, + 'managed_by': {'readonly': True}, + 'time_created': {'readonly': True}, + 'creation_data': {'required': True}, + 'disk_size_bytes': {'readonly': True}, + 'unique_id': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'managed_by': {'key': 'managedBy', 'type': 'str'}, + 'sku': {'key': 'sku', 'type': 'SnapshotSku'}, + 'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'}, + 'os_type': {'key': 'properties.osType', 'type': 'OperatingSystemTypes'}, + 'hyper_vgeneration': {'key': 'properties.hyperVGeneration', 'type': 'str'}, + 'creation_data': {'key': 'properties.creationData', 'type': 'CreationData'}, + 'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'}, + 'disk_size_bytes': {'key': 'properties.diskSizeBytes', 'type': 'long'}, + 'disk_state': {'key': 'properties.diskState', 'type': 'str'}, + 'unique_id': {'key': 'properties.uniqueId', 'type': 'str'}, + 'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'incremental': {'key': 'properties.incremental', 'type': 'bool'}, + 'encryption': {'key': 'properties.encryption', 'type': 'Encryption'}, + 'network_access_policy': {'key': 'properties.networkAccessPolicy', 'type': 'str'}, + 'disk_access_id': {'key': 'properties.diskAccessId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(Snapshot, self).__init__(**kwargs) + self.managed_by = None + self.sku = kwargs.get('sku', None) + self.time_created = None + self.os_type = kwargs.get('os_type', None) + self.hyper_vgeneration = kwargs.get('hyper_vgeneration', None) + self.creation_data = kwargs.get('creation_data', None) + self.disk_size_gb = kwargs.get('disk_size_gb', None) + self.disk_size_bytes = None + self.disk_state = kwargs.get('disk_state', None) + self.unique_id = None + self.encryption_settings_collection = kwargs.get('encryption_settings_collection', None) + self.provisioning_state = None + self.incremental = kwargs.get('incremental', None) + self.encryption = kwargs.get('encryption', None) + self.network_access_policy = kwargs.get('network_access_policy', None) + self.disk_access_id = kwargs.get('disk_access_id', None) + + +class SnapshotSku(Model): + """The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :param name: The sku name. Possible values include: 'Standard_LRS', + 'Premium_LRS', 'Standard_ZRS' + :type name: str or + ~azure.mgmt.compute.v2020_06_30.models.SnapshotStorageAccountTypes + :ivar tier: The sku tier. + :vartype tier: str + """ + + _validation = { + 'tier': {'readonly': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'tier': {'key': 'tier', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(SnapshotSku, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.tier = None + + +class SnapshotUpdate(Model): + """Snapshot update resource. + + :param os_type: the Operating System type. Possible values include: + 'Windows', 'Linux' + :type os_type: str or + ~azure.mgmt.compute.v2020_06_30.models.OperatingSystemTypes + :param disk_size_gb: If creationData.createOption is Empty, this field is + mandatory and it indicates the size of the disk to create. If this field + is present for updates or creation with other options, it indicates a + resize. Resizes are only allowed if the disk is not attached to a running + VM, and can only increase the disk's size. + :type disk_size_gb: int + :param encryption_settings_collection: Encryption settings collection used + be Azure Disk Encryption, can contain multiple encryption settings per + disk or snapshot. + :type encryption_settings_collection: + ~azure.mgmt.compute.v2020_06_30.models.EncryptionSettingsCollection + :param encryption: Encryption property can be used to encrypt data at rest + with customer managed keys or platform managed keys. + :type encryption: ~azure.mgmt.compute.v2020_06_30.models.Encryption + :param network_access_policy: Possible values include: 'AllowAll', + 'AllowPrivate', 'DenyAll' + :type network_access_policy: str or + ~azure.mgmt.compute.v2020_06_30.models.NetworkAccessPolicy + :param disk_access_id: ARM id of the DiskAccess resource for using private + endpoints on disks. + :type disk_access_id: str + :param tags: Resource tags + :type tags: dict[str, str] + :param sku: + :type sku: ~azure.mgmt.compute.v2020_06_30.models.SnapshotSku + """ + + _attribute_map = { + 'os_type': {'key': 'properties.osType', 'type': 'OperatingSystemTypes'}, + 'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'}, + 'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'}, + 'encryption': {'key': 'properties.encryption', 'type': 'Encryption'}, + 'network_access_policy': {'key': 'properties.networkAccessPolicy', 'type': 'str'}, + 'disk_access_id': {'key': 'properties.diskAccessId', 'type': 'str'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'sku': {'key': 'sku', 'type': 'SnapshotSku'}, + } + + def __init__(self, **kwargs): + super(SnapshotUpdate, self).__init__(**kwargs) + self.os_type = kwargs.get('os_type', None) + self.disk_size_gb = kwargs.get('disk_size_gb', None) + self.encryption_settings_collection = kwargs.get('encryption_settings_collection', None) + self.encryption = kwargs.get('encryption', None) + self.network_access_policy = kwargs.get('network_access_policy', None) + self.disk_access_id = kwargs.get('disk_access_id', None) + self.tags = kwargs.get('tags', None) + self.sku = kwargs.get('sku', None) + + +class SourceVault(Model): + """The vault id is an Azure Resource Manager Resource id in the form + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}. + + :param id: Resource Id + :type id: str + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(SourceVault, self).__init__(**kwargs) + self.id = kwargs.get('id', None) diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/models/_models_py3.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/models/_models_py3.py new file mode 100644 index 000000000000..19a451a165e0 --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/models/_models_py3.py @@ -0,0 +1,1403 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from msrest.exceptions import HttpOperationError + + +class AccessUri(Model): + """A disk access SAS uri. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar access_sas: A SAS uri for accessing a disk. + :vartype access_sas: str + """ + + _validation = { + 'access_sas': {'readonly': True}, + } + + _attribute_map = { + 'access_sas': {'key': 'accessSAS', 'type': 'str'}, + } + + def __init__(self, **kwargs) -> None: + super(AccessUri, self).__init__(**kwargs) + self.access_sas = None + + +class ApiError(Model): + """Api error. + + :param details: The Api error details + :type details: list[~azure.mgmt.compute.v2020_06_30.models.ApiErrorBase] + :param innererror: The Api inner error + :type innererror: ~azure.mgmt.compute.v2020_06_30.models.InnerError + :param code: The error code. + :type code: str + :param target: The target of the particular error. + :type target: str + :param message: The error message. + :type message: str + """ + + _attribute_map = { + 'details': {'key': 'details', 'type': '[ApiErrorBase]'}, + 'innererror': {'key': 'innererror', 'type': 'InnerError'}, + 'code': {'key': 'code', 'type': 'str'}, + 'target': {'key': 'target', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + } + + def __init__(self, *, details=None, innererror=None, code: str=None, target: str=None, message: str=None, **kwargs) -> None: + super(ApiError, self).__init__(**kwargs) + self.details = details + self.innererror = innererror + self.code = code + self.target = target + self.message = message + + +class ApiErrorBase(Model): + """Api error base. + + :param code: The error code. + :type code: str + :param target: The target of the particular error. + :type target: str + :param message: The error message. + :type message: str + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'target': {'key': 'target', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + } + + def __init__(self, *, code: str=None, target: str=None, message: str=None, **kwargs) -> None: + super(ApiErrorBase, self).__init__(**kwargs) + self.code = code + self.target = target + self.message = message + + +class CloudError(Model): + """An error response from the Compute service. + + :param error: + :type error: ~azure.mgmt.compute.v2020_06_30.models.ApiError + """ + + _attribute_map = { + 'error': {'key': 'error', 'type': 'ApiError'}, + } + + def __init__(self, *, error=None, **kwargs) -> None: + super(CloudError, self).__init__(**kwargs) + self.error = error + + +class CloudErrorException(HttpOperationError): + """Server responsed with exception of type: 'CloudError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(CloudErrorException, self).__init__(deserialize, response, 'CloudError', *args) + + +class CreationData(Model): + """Data used when creating a disk. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :param create_option: Required. This enumerates the possible sources of a + disk's creation. Possible values include: 'Empty', 'Attach', 'FromImage', + 'Import', 'Copy', 'Restore', 'Upload' + :type create_option: str or + ~azure.mgmt.compute.v2020_06_30.models.DiskCreateOption + :param storage_account_id: Required if createOption is Import. The Azure + Resource Manager identifier of the storage account containing the blob to + import as a disk. + :type storage_account_id: str + :param image_reference: Disk source information. + :type image_reference: + ~azure.mgmt.compute.v2020_06_30.models.ImageDiskReference + :param gallery_image_reference: Required if creating from a Gallery Image. + The id of the ImageDiskReference will be the ARM id of the shared galley + image version from which to create a disk. + :type gallery_image_reference: + ~azure.mgmt.compute.v2020_06_30.models.ImageDiskReference + :param source_uri: If createOption is Import, this is the URI of a blob to + be imported into a managed disk. + :type source_uri: str + :param source_resource_id: If createOption is Copy, this is the ARM id of + the source snapshot or disk. + :type source_resource_id: str + :ivar source_unique_id: If this field is set, this is the unique id + identifying the source of this resource. + :vartype source_unique_id: str + :param upload_size_bytes: If createOption is Upload, this is the size of + the contents of the upload including the VHD footer. This value should be + between 20972032 (20 MiB + 512 bytes for the VHD footer) and + 35183298347520 bytes (32 TiB + 512 bytes for the VHD footer). + :type upload_size_bytes: long + :param logical_sector_size: Logical sector size in bytes for Ultra disks. + Supported values are 512 ad 4096. 4096 is the default. + :type logical_sector_size: int + """ + + _validation = { + 'create_option': {'required': True}, + 'source_unique_id': {'readonly': True}, + } + + _attribute_map = { + 'create_option': {'key': 'createOption', 'type': 'str'}, + 'storage_account_id': {'key': 'storageAccountId', 'type': 'str'}, + 'image_reference': {'key': 'imageReference', 'type': 'ImageDiskReference'}, + 'gallery_image_reference': {'key': 'galleryImageReference', 'type': 'ImageDiskReference'}, + 'source_uri': {'key': 'sourceUri', 'type': 'str'}, + 'source_resource_id': {'key': 'sourceResourceId', 'type': 'str'}, + 'source_unique_id': {'key': 'sourceUniqueId', 'type': 'str'}, + 'upload_size_bytes': {'key': 'uploadSizeBytes', 'type': 'long'}, + 'logical_sector_size': {'key': 'logicalSectorSize', 'type': 'int'}, + } + + def __init__(self, *, create_option, storage_account_id: str=None, image_reference=None, gallery_image_reference=None, source_uri: str=None, source_resource_id: str=None, upload_size_bytes: int=None, logical_sector_size: int=None, **kwargs) -> None: + super(CreationData, self).__init__(**kwargs) + self.create_option = create_option + self.storage_account_id = storage_account_id + self.image_reference = image_reference + self.gallery_image_reference = gallery_image_reference + self.source_uri = source_uri + self.source_resource_id = source_resource_id + self.source_unique_id = None + self.upload_size_bytes = upload_size_bytes + self.logical_sector_size = logical_sector_size + + +class Resource(Model): + """The Resource model definition. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: Resource Id + :vartype id: str + :ivar name: Resource name + :vartype name: str + :ivar type: Resource type + :vartype type: str + :param location: Required. Resource location + :type location: str + :param tags: Resource tags + :type tags: dict[str, str] + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'location': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + } + + def __init__(self, *, location: str, tags=None, **kwargs) -> None: + super(Resource, self).__init__(**kwargs) + self.id = None + self.name = None + self.type = None + self.location = location + self.tags = tags + + +class Disk(Resource): + """Disk resource. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: Resource Id + :vartype id: str + :ivar name: Resource name + :vartype name: str + :ivar type: Resource type + :vartype type: str + :param location: Required. Resource location + :type location: str + :param tags: Resource tags + :type tags: dict[str, str] + :ivar managed_by: A relative URI containing the ID of the VM that has the + disk attached. + :vartype managed_by: str + :ivar managed_by_extended: List of relative URIs containing the IDs of the + VMs that have the disk attached. maxShares should be set to a value + greater than one for disks to allow attaching them to multiple VMs. + :vartype managed_by_extended: list[str] + :param sku: + :type sku: ~azure.mgmt.compute.v2020_06_30.models.DiskSku + :param zones: The Logical zone list for Disk. + :type zones: list[str] + :ivar time_created: The time when the disk was created. + :vartype time_created: datetime + :param os_type: The Operating System type. Possible values include: + 'Windows', 'Linux' + :type os_type: str or + ~azure.mgmt.compute.v2020_06_30.models.OperatingSystemTypes + :param hyper_vgeneration: The hypervisor generation of the Virtual + Machine. Applicable to OS disks only. Possible values include: 'V1', 'V2' + :type hyper_vgeneration: str or + ~azure.mgmt.compute.v2020_06_30.models.HyperVGeneration + :param creation_data: Required. Disk source information. CreationData + information cannot be changed after the disk has been created. + :type creation_data: ~azure.mgmt.compute.v2020_06_30.models.CreationData + :param disk_size_gb: If creationData.createOption is Empty, this field is + mandatory and it indicates the size of the disk to create. If this field + is present for updates or creation with other options, it indicates a + resize. Resizes are only allowed if the disk is not attached to a running + VM, and can only increase the disk's size. + :type disk_size_gb: int + :ivar disk_size_bytes: The size of the disk in bytes. This field is read + only. + :vartype disk_size_bytes: long + :ivar unique_id: Unique Guid identifying the resource. + :vartype unique_id: str + :param encryption_settings_collection: Encryption settings collection used + for Azure Disk Encryption, can contain multiple encryption settings per + disk or snapshot. + :type encryption_settings_collection: + ~azure.mgmt.compute.v2020_06_30.models.EncryptionSettingsCollection + :ivar provisioning_state: The disk provisioning state. + :vartype provisioning_state: str + :param disk_iops_read_write: The number of IOPS allowed for this disk; + only settable for UltraSSD disks. One operation can transfer between 4k + and 256k bytes. + :type disk_iops_read_write: long + :param disk_mbps_read_write: The bandwidth allowed for this disk; only + settable for UltraSSD disks. MBps means millions of bytes per second - MB + here uses the ISO notation, of powers of 10. + :type disk_mbps_read_write: long + :param disk_iops_read_only: The total number of IOPS that will be allowed + across all VMs mounting the shared disk as ReadOnly. One operation can + transfer between 4k and 256k bytes. + :type disk_iops_read_only: long + :param disk_mbps_read_only: The total throughput (MBps) that will be + allowed across all VMs mounting the shared disk as ReadOnly. MBps means + millions of bytes per second - MB here uses the ISO notation, of powers of + 10. + :type disk_mbps_read_only: long + :param disk_state: The state of the disk. Possible values include: + 'Unattached', 'Attached', 'Reserved', 'ActiveSAS', 'ReadyToUpload', + 'ActiveUpload' + :type disk_state: str or ~azure.mgmt.compute.v2020_06_30.models.DiskState + :param encryption: Encryption property can be used to encrypt data at rest + with customer managed keys or platform managed keys. + :type encryption: ~azure.mgmt.compute.v2020_06_30.models.Encryption + :param max_shares: The maximum number of VMs that can attach to the disk + at the same time. Value greater than one indicates a disk that can be + mounted on multiple VMs at the same time. + :type max_shares: int + :ivar share_info: Details of the list of all VMs that have the disk + attached. maxShares should be set to a value greater than one for disks to + allow attaching them to multiple VMs. + :vartype share_info: + list[~azure.mgmt.compute.v2020_06_30.models.ShareInfoElement] + :param network_access_policy: Possible values include: 'AllowAll', + 'AllowPrivate', 'DenyAll' + :type network_access_policy: str or + ~azure.mgmt.compute.v2020_06_30.models.NetworkAccessPolicy + :param disk_access_id: ARM id of the DiskAccess resource for using private + endpoints on disks. + :type disk_access_id: str + :param tier: Performance tier of the disk (e.g, P4, S10) as described + here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/. + Does not apply to Ultra disks. + :type tier: str + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'location': {'required': True}, + 'managed_by': {'readonly': True}, + 'managed_by_extended': {'readonly': True}, + 'time_created': {'readonly': True}, + 'creation_data': {'required': True}, + 'disk_size_bytes': {'readonly': True}, + 'unique_id': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + 'share_info': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'managed_by': {'key': 'managedBy', 'type': 'str'}, + 'managed_by_extended': {'key': 'managedByExtended', 'type': '[str]'}, + 'sku': {'key': 'sku', 'type': 'DiskSku'}, + 'zones': {'key': 'zones', 'type': '[str]'}, + 'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'}, + 'os_type': {'key': 'properties.osType', 'type': 'OperatingSystemTypes'}, + 'hyper_vgeneration': {'key': 'properties.hyperVGeneration', 'type': 'str'}, + 'creation_data': {'key': 'properties.creationData', 'type': 'CreationData'}, + 'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'}, + 'disk_size_bytes': {'key': 'properties.diskSizeBytes', 'type': 'long'}, + 'unique_id': {'key': 'properties.uniqueId', 'type': 'str'}, + 'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'disk_iops_read_write': {'key': 'properties.diskIOPSReadWrite', 'type': 'long'}, + 'disk_mbps_read_write': {'key': 'properties.diskMBpsReadWrite', 'type': 'long'}, + 'disk_iops_read_only': {'key': 'properties.diskIOPSReadOnly', 'type': 'long'}, + 'disk_mbps_read_only': {'key': 'properties.diskMBpsReadOnly', 'type': 'long'}, + 'disk_state': {'key': 'properties.diskState', 'type': 'str'}, + 'encryption': {'key': 'properties.encryption', 'type': 'Encryption'}, + 'max_shares': {'key': 'properties.maxShares', 'type': 'int'}, + 'share_info': {'key': 'properties.shareInfo', 'type': '[ShareInfoElement]'}, + 'network_access_policy': {'key': 'properties.networkAccessPolicy', 'type': 'str'}, + 'disk_access_id': {'key': 'properties.diskAccessId', 'type': 'str'}, + 'tier': {'key': 'properties.tier', 'type': 'str'}, + } + + def __init__(self, *, location: str, creation_data, tags=None, sku=None, zones=None, os_type=None, hyper_vgeneration=None, disk_size_gb: int=None, encryption_settings_collection=None, disk_iops_read_write: int=None, disk_mbps_read_write: int=None, disk_iops_read_only: int=None, disk_mbps_read_only: int=None, disk_state=None, encryption=None, max_shares: int=None, network_access_policy=None, disk_access_id: str=None, tier: str=None, **kwargs) -> None: + super(Disk, self).__init__(location=location, tags=tags, **kwargs) + self.managed_by = None + self.managed_by_extended = None + self.sku = sku + self.zones = zones + self.time_created = None + self.os_type = os_type + self.hyper_vgeneration = hyper_vgeneration + self.creation_data = creation_data + self.disk_size_gb = disk_size_gb + self.disk_size_bytes = None + self.unique_id = None + self.encryption_settings_collection = encryption_settings_collection + self.provisioning_state = None + self.disk_iops_read_write = disk_iops_read_write + self.disk_mbps_read_write = disk_mbps_read_write + self.disk_iops_read_only = disk_iops_read_only + self.disk_mbps_read_only = disk_mbps_read_only + self.disk_state = disk_state + self.encryption = encryption + self.max_shares = max_shares + self.share_info = None + self.network_access_policy = network_access_policy + self.disk_access_id = disk_access_id + self.tier = tier + + +class DiskAccess(Resource): + """disk access resource. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: Resource Id + :vartype id: str + :ivar name: Resource name + :vartype name: str + :ivar type: Resource type + :vartype type: str + :param location: Required. Resource location + :type location: str + :param tags: Resource tags + :type tags: dict[str, str] + :ivar private_endpoint_connections: A readonly collection of private + endpoint connections created on the disk. Currently only one endpoint + connection is supported. + :vartype private_endpoint_connections: + list[~azure.mgmt.compute.v2020_06_30.models.PrivateEndpointConnection] + :ivar provisioning_state: The disk access resource provisioning state. + :vartype provisioning_state: str + :ivar time_created: The time when the disk access was created. + :vartype time_created: datetime + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'location': {'required': True}, + 'private_endpoint_connections': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + 'time_created': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'}, + } + + def __init__(self, *, location: str, tags=None, **kwargs) -> None: + super(DiskAccess, self).__init__(location=location, tags=tags, **kwargs) + self.private_endpoint_connections = None + self.provisioning_state = None + self.time_created = None + + +class DiskAccessUpdate(Model): + """Used for updating a disk access resource. + + :param tags: Resource tags + :type tags: dict[str, str] + """ + + _attribute_map = { + 'tags': {'key': 'tags', 'type': '{str}'}, + } + + def __init__(self, *, tags=None, **kwargs) -> None: + super(DiskAccessUpdate, self).__init__(**kwargs) + self.tags = tags + + +class DiskEncryptionSet(Resource): + """disk encryption set resource. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: Resource Id + :vartype id: str + :ivar name: Resource name + :vartype name: str + :ivar type: Resource type + :vartype type: str + :param location: Required. Resource location + :type location: str + :param tags: Resource tags + :type tags: dict[str, str] + :param identity: + :type identity: + ~azure.mgmt.compute.v2020_06_30.models.EncryptionSetIdentity + :param encryption_type: Possible values include: + 'EncryptionAtRestWithCustomerKey', + 'EncryptionAtRestWithPlatformAndCustomerKeys' + :type encryption_type: str or + ~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSetType + :param active_key: The key vault key which is currently used by this disk + encryption set. + :type active_key: + ~azure.mgmt.compute.v2020_06_30.models.KeyVaultAndKeyReference + :ivar previous_keys: A readonly collection of key vault keys previously + used by this disk encryption set while a key rotation is in progress. It + will be empty if there is no ongoing key rotation. + :vartype previous_keys: + list[~azure.mgmt.compute.v2020_06_30.models.KeyVaultAndKeyReference] + :ivar provisioning_state: The disk encryption set provisioning state. + :vartype provisioning_state: str + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'location': {'required': True}, + 'previous_keys': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'identity': {'key': 'identity', 'type': 'EncryptionSetIdentity'}, + 'encryption_type': {'key': 'properties.encryptionType', 'type': 'str'}, + 'active_key': {'key': 'properties.activeKey', 'type': 'KeyVaultAndKeyReference'}, + 'previous_keys': {'key': 'properties.previousKeys', 'type': '[KeyVaultAndKeyReference]'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + } + + def __init__(self, *, location: str, tags=None, identity=None, encryption_type=None, active_key=None, **kwargs) -> None: + super(DiskEncryptionSet, self).__init__(location=location, tags=tags, **kwargs) + self.identity = identity + self.encryption_type = encryption_type + self.active_key = active_key + self.previous_keys = None + self.provisioning_state = None + + +class DiskEncryptionSetUpdate(Model): + """disk encryption set update resource. + + :param encryption_type: Possible values include: + 'EncryptionAtRestWithCustomerKey', + 'EncryptionAtRestWithPlatformAndCustomerKeys' + :type encryption_type: str or + ~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSetType + :param active_key: + :type active_key: + ~azure.mgmt.compute.v2020_06_30.models.KeyVaultAndKeyReference + :param tags: Resource tags + :type tags: dict[str, str] + """ + + _attribute_map = { + 'encryption_type': {'key': 'properties.encryptionType', 'type': 'str'}, + 'active_key': {'key': 'properties.activeKey', 'type': 'KeyVaultAndKeyReference'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + } + + def __init__(self, *, encryption_type=None, active_key=None, tags=None, **kwargs) -> None: + super(DiskEncryptionSetUpdate, self).__init__(**kwargs) + self.encryption_type = encryption_type + self.active_key = active_key + self.tags = tags + + +class DiskSku(Model): + """The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or + UltraSSD_LRS. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :param name: The sku name. Possible values include: 'Standard_LRS', + 'Premium_LRS', 'StandardSSD_LRS', 'UltraSSD_LRS' + :type name: str or + ~azure.mgmt.compute.v2020_06_30.models.DiskStorageAccountTypes + :ivar tier: The sku tier. + :vartype tier: str + """ + + _validation = { + 'tier': {'readonly': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'tier': {'key': 'tier', 'type': 'str'}, + } + + def __init__(self, *, name=None, **kwargs) -> None: + super(DiskSku, self).__init__(**kwargs) + self.name = name + self.tier = None + + +class DiskUpdate(Model): + """Disk update resource. + + :param os_type: the Operating System type. Possible values include: + 'Windows', 'Linux' + :type os_type: str or + ~azure.mgmt.compute.v2020_06_30.models.OperatingSystemTypes + :param disk_size_gb: If creationData.createOption is Empty, this field is + mandatory and it indicates the size of the disk to create. If this field + is present for updates or creation with other options, it indicates a + resize. Resizes are only allowed if the disk is not attached to a running + VM, and can only increase the disk's size. + :type disk_size_gb: int + :param encryption_settings_collection: Encryption settings collection used + be Azure Disk Encryption, can contain multiple encryption settings per + disk or snapshot. + :type encryption_settings_collection: + ~azure.mgmt.compute.v2020_06_30.models.EncryptionSettingsCollection + :param disk_iops_read_write: The number of IOPS allowed for this disk; + only settable for UltraSSD disks. One operation can transfer between 4k + and 256k bytes. + :type disk_iops_read_write: long + :param disk_mbps_read_write: The bandwidth allowed for this disk; only + settable for UltraSSD disks. MBps means millions of bytes per second - MB + here uses the ISO notation, of powers of 10. + :type disk_mbps_read_write: long + :param disk_iops_read_only: The total number of IOPS that will be allowed + across all VMs mounting the shared disk as ReadOnly. One operation can + transfer between 4k and 256k bytes. + :type disk_iops_read_only: long + :param disk_mbps_read_only: The total throughput (MBps) that will be + allowed across all VMs mounting the shared disk as ReadOnly. MBps means + millions of bytes per second - MB here uses the ISO notation, of powers of + 10. + :type disk_mbps_read_only: long + :param max_shares: The maximum number of VMs that can attach to the disk + at the same time. Value greater than one indicates a disk that can be + mounted on multiple VMs at the same time. + :type max_shares: int + :param encryption: Encryption property can be used to encrypt data at rest + with customer managed keys or platform managed keys. + :type encryption: ~azure.mgmt.compute.v2020_06_30.models.Encryption + :param network_access_policy: Possible values include: 'AllowAll', + 'AllowPrivate', 'DenyAll' + :type network_access_policy: str or + ~azure.mgmt.compute.v2020_06_30.models.NetworkAccessPolicy + :param disk_access_id: ARM id of the DiskAccess resource for using private + endpoints on disks. + :type disk_access_id: str + :param tier: Performance tier of the disk (e.g, P4, S10) as described + here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/. + Does not apply to Ultra disks. + :type tier: str + :param tags: Resource tags + :type tags: dict[str, str] + :param sku: + :type sku: ~azure.mgmt.compute.v2020_06_30.models.DiskSku + """ + + _attribute_map = { + 'os_type': {'key': 'properties.osType', 'type': 'OperatingSystemTypes'}, + 'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'}, + 'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'}, + 'disk_iops_read_write': {'key': 'properties.diskIOPSReadWrite', 'type': 'long'}, + 'disk_mbps_read_write': {'key': 'properties.diskMBpsReadWrite', 'type': 'long'}, + 'disk_iops_read_only': {'key': 'properties.diskIOPSReadOnly', 'type': 'long'}, + 'disk_mbps_read_only': {'key': 'properties.diskMBpsReadOnly', 'type': 'long'}, + 'max_shares': {'key': 'properties.maxShares', 'type': 'int'}, + 'encryption': {'key': 'properties.encryption', 'type': 'Encryption'}, + 'network_access_policy': {'key': 'properties.networkAccessPolicy', 'type': 'str'}, + 'disk_access_id': {'key': 'properties.diskAccessId', 'type': 'str'}, + 'tier': {'key': 'properties.tier', 'type': 'str'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'sku': {'key': 'sku', 'type': 'DiskSku'}, + } + + def __init__(self, *, os_type=None, disk_size_gb: int=None, encryption_settings_collection=None, disk_iops_read_write: int=None, disk_mbps_read_write: int=None, disk_iops_read_only: int=None, disk_mbps_read_only: int=None, max_shares: int=None, encryption=None, network_access_policy=None, disk_access_id: str=None, tier: str=None, tags=None, sku=None, **kwargs) -> None: + super(DiskUpdate, self).__init__(**kwargs) + self.os_type = os_type + self.disk_size_gb = disk_size_gb + self.encryption_settings_collection = encryption_settings_collection + self.disk_iops_read_write = disk_iops_read_write + self.disk_mbps_read_write = disk_mbps_read_write + self.disk_iops_read_only = disk_iops_read_only + self.disk_mbps_read_only = disk_mbps_read_only + self.max_shares = max_shares + self.encryption = encryption + self.network_access_policy = network_access_policy + self.disk_access_id = disk_access_id + self.tier = tier + self.tags = tags + self.sku = sku + + +class Encryption(Model): + """Encryption at rest settings for disk or snapshot. + + :param disk_encryption_set_id: ResourceId of the disk encryption set to + use for enabling encryption at rest. + :type disk_encryption_set_id: str + :param type: Possible values include: 'EncryptionAtRestWithPlatformKey', + 'EncryptionAtRestWithCustomerKey', + 'EncryptionAtRestWithPlatformAndCustomerKeys' + :type type: str or ~azure.mgmt.compute.v2020_06_30.models.EncryptionType + """ + + _attribute_map = { + 'disk_encryption_set_id': {'key': 'diskEncryptionSetId', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + def __init__(self, *, disk_encryption_set_id: str=None, type=None, **kwargs) -> None: + super(Encryption, self).__init__(**kwargs) + self.disk_encryption_set_id = disk_encryption_set_id + self.type = type + + +class EncryptionSetIdentity(Model): + """The managed identity for the disk encryption set. It should be given + permission on the key vault before it can be used to encrypt disks. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :param type: The type of Managed Identity used by the DiskEncryptionSet. + Only SystemAssigned is supported. Possible values include: + 'SystemAssigned' + :type type: str or + ~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSetIdentityType + :ivar principal_id: The object id of the Managed Identity Resource. This + will be sent to the RP from ARM via the x-ms-identity-principal-id header + in the PUT request if the resource has a systemAssigned(implicit) identity + :vartype principal_id: str + :ivar tenant_id: The tenant id of the Managed Identity Resource. This will + be sent to the RP from ARM via the x-ms-client-tenant-id header in the PUT + request if the resource has a systemAssigned(implicit) identity + :vartype tenant_id: str + """ + + _validation = { + 'principal_id': {'readonly': True}, + 'tenant_id': {'readonly': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'principal_id': {'key': 'principalId', 'type': 'str'}, + 'tenant_id': {'key': 'tenantId', 'type': 'str'}, + } + + def __init__(self, *, type=None, **kwargs) -> None: + super(EncryptionSetIdentity, self).__init__(**kwargs) + self.type = type + self.principal_id = None + self.tenant_id = None + + +class EncryptionSettingsCollection(Model): + """Encryption settings for disk or snapshot. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Set this flag to true and provide + DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set + this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to + disable encryption. If EncryptionSettings is null in the request object, + the existing settings remain unchanged. + :type enabled: bool + :param encryption_settings: A collection of encryption settings, one for + each disk volume. + :type encryption_settings: + list[~azure.mgmt.compute.v2020_06_30.models.EncryptionSettingsElement] + :param encryption_settings_version: Describes what type of encryption is + used for the disks. Once this field is set, it cannot be overwritten. + '1.0' corresponds to Azure Disk Encryption with AAD app.'1.1' corresponds + to Azure Disk Encryption. + :type encryption_settings_version: str + """ + + _validation = { + 'enabled': {'required': True}, + } + + _attribute_map = { + 'enabled': {'key': 'enabled', 'type': 'bool'}, + 'encryption_settings': {'key': 'encryptionSettings', 'type': '[EncryptionSettingsElement]'}, + 'encryption_settings_version': {'key': 'encryptionSettingsVersion', 'type': 'str'}, + } + + def __init__(self, *, enabled: bool, encryption_settings=None, encryption_settings_version: str=None, **kwargs) -> None: + super(EncryptionSettingsCollection, self).__init__(**kwargs) + self.enabled = enabled + self.encryption_settings = encryption_settings + self.encryption_settings_version = encryption_settings_version + + +class EncryptionSettingsElement(Model): + """Encryption settings for one disk volume. + + :param disk_encryption_key: Key Vault Secret Url and vault id of the disk + encryption key + :type disk_encryption_key: + ~azure.mgmt.compute.v2020_06_30.models.KeyVaultAndSecretReference + :param key_encryption_key: Key Vault Key Url and vault id of the key + encryption key. KeyEncryptionKey is optional and when provided is used to + unwrap the disk encryption key. + :type key_encryption_key: + ~azure.mgmt.compute.v2020_06_30.models.KeyVaultAndKeyReference + """ + + _attribute_map = { + 'disk_encryption_key': {'key': 'diskEncryptionKey', 'type': 'KeyVaultAndSecretReference'}, + 'key_encryption_key': {'key': 'keyEncryptionKey', 'type': 'KeyVaultAndKeyReference'}, + } + + def __init__(self, *, disk_encryption_key=None, key_encryption_key=None, **kwargs) -> None: + super(EncryptionSettingsElement, self).__init__(**kwargs) + self.disk_encryption_key = disk_encryption_key + self.key_encryption_key = key_encryption_key + + +class GrantAccessData(Model): + """Data used for requesting a SAS. + + All required parameters must be populated in order to send to Azure. + + :param access: Required. Possible values include: 'None', 'Read', 'Write' + :type access: str or ~azure.mgmt.compute.v2020_06_30.models.AccessLevel + :param duration_in_seconds: Required. Time duration in seconds until the + SAS access expires. + :type duration_in_seconds: int + """ + + _validation = { + 'access': {'required': True}, + 'duration_in_seconds': {'required': True}, + } + + _attribute_map = { + 'access': {'key': 'access', 'type': 'str'}, + 'duration_in_seconds': {'key': 'durationInSeconds', 'type': 'int'}, + } + + def __init__(self, *, access, duration_in_seconds: int, **kwargs) -> None: + super(GrantAccessData, self).__init__(**kwargs) + self.access = access + self.duration_in_seconds = duration_in_seconds + + +class ImageDiskReference(Model): + """The source image used for creating the disk. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A relative uri containing either a Platform Image + Repository or user image reference. + :type id: str + :param lun: If the disk is created from an image's data disk, this is an + index that indicates which of the data disks in the image to use. For OS + disks, this field is null. + :type lun: int + """ + + _validation = { + 'id': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'lun': {'key': 'lun', 'type': 'int'}, + } + + def __init__(self, *, id: str, lun: int=None, **kwargs) -> None: + super(ImageDiskReference, self).__init__(**kwargs) + self.id = id + self.lun = lun + + +class InnerError(Model): + """Inner error details. + + :param exceptiontype: The exception type. + :type exceptiontype: str + :param errordetail: The internal error message or exception dump. + :type errordetail: str + """ + + _attribute_map = { + 'exceptiontype': {'key': 'exceptiontype', 'type': 'str'}, + 'errordetail': {'key': 'errordetail', 'type': 'str'}, + } + + def __init__(self, *, exceptiontype: str=None, errordetail: str=None, **kwargs) -> None: + super(InnerError, self).__init__(**kwargs) + self.exceptiontype = exceptiontype + self.errordetail = errordetail + + +class KeyVaultAndKeyReference(Model): + """Key Vault Key Url and vault id of KeK, KeK is optional and when provided is + used to unwrap the encryptionKey. + + All required parameters must be populated in order to send to Azure. + + :param source_vault: Required. Resource id of the KeyVault containing the + key or secret + :type source_vault: ~azure.mgmt.compute.v2020_06_30.models.SourceVault + :param key_url: Required. Url pointing to a key or secret in KeyVault + :type key_url: str + """ + + _validation = { + 'source_vault': {'required': True}, + 'key_url': {'required': True}, + } + + _attribute_map = { + 'source_vault': {'key': 'sourceVault', 'type': 'SourceVault'}, + 'key_url': {'key': 'keyUrl', 'type': 'str'}, + } + + def __init__(self, *, source_vault, key_url: str, **kwargs) -> None: + super(KeyVaultAndKeyReference, self).__init__(**kwargs) + self.source_vault = source_vault + self.key_url = key_url + + +class KeyVaultAndSecretReference(Model): + """Key Vault Secret Url and vault id of the encryption key . + + All required parameters must be populated in order to send to Azure. + + :param source_vault: Required. Resource id of the KeyVault containing the + key or secret + :type source_vault: ~azure.mgmt.compute.v2020_06_30.models.SourceVault + :param secret_url: Required. Url pointing to a key or secret in KeyVault + :type secret_url: str + """ + + _validation = { + 'source_vault': {'required': True}, + 'secret_url': {'required': True}, + } + + _attribute_map = { + 'source_vault': {'key': 'sourceVault', 'type': 'SourceVault'}, + 'secret_url': {'key': 'secretUrl', 'type': 'str'}, + } + + def __init__(self, *, source_vault, secret_url: str, **kwargs) -> None: + super(KeyVaultAndSecretReference, self).__init__(**kwargs) + self.source_vault = source_vault + self.secret_url = secret_url + + +class PrivateEndpoint(Model): + """The Private Endpoint resource. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar id: The ARM identifier for Private Endpoint + :vartype id: str + """ + + _validation = { + 'id': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + } + + def __init__(self, **kwargs) -> None: + super(PrivateEndpoint, self).__init__(**kwargs) + self.id = None + + +class PrivateEndpointConnection(Model): + """The Private Endpoint Connection resource. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :param private_endpoint: The resource of private end point. + :type private_endpoint: + ~azure.mgmt.compute.v2020_06_30.models.PrivateEndpoint + :param private_link_service_connection_state: Required. A collection of + information about the state of the connection between DiskAccess and + Virtual Network. + :type private_link_service_connection_state: + ~azure.mgmt.compute.v2020_06_30.models.PrivateLinkServiceConnectionState + :param provisioning_state: The provisioning state of the private endpoint + connection resource. Possible values include: 'Succeeded', 'Creating', + 'Deleting', 'Failed' + :type provisioning_state: str or + ~azure.mgmt.compute.v2020_06_30.models.PrivateEndpointConnectionProvisioningState + :ivar id: private endpoint connection Id + :vartype id: str + :ivar name: private endpoint connection name + :vartype name: str + :ivar type: private endpoint connection type + :vartype type: str + """ + + _validation = { + 'private_link_service_connection_state': {'required': True}, + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + } + + _attribute_map = { + 'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'}, + 'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + def __init__(self, *, private_link_service_connection_state, private_endpoint=None, provisioning_state=None, **kwargs) -> None: + super(PrivateEndpointConnection, self).__init__(**kwargs) + self.private_endpoint = private_endpoint + self.private_link_service_connection_state = private_link_service_connection_state + self.provisioning_state = provisioning_state + self.id = None + self.name = None + self.type = None + + +class PrivateLinkResource(Model): + """A private link resource. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar group_id: The private link resource group id. + :vartype group_id: str + :ivar required_members: The private link resource required member names. + :vartype required_members: list[str] + :param required_zone_names: The private link resource DNS zone name. + :type required_zone_names: list[str] + :ivar id: private link resource Id + :vartype id: str + :ivar name: private link resource name + :vartype name: str + :ivar type: private link resource type + :vartype type: str + """ + + _validation = { + 'group_id': {'readonly': True}, + 'required_members': {'readonly': True}, + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + } + + _attribute_map = { + 'group_id': {'key': 'properties.groupId', 'type': 'str'}, + 'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'}, + 'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'}, + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + def __init__(self, *, required_zone_names=None, **kwargs) -> None: + super(PrivateLinkResource, self).__init__(**kwargs) + self.group_id = None + self.required_members = None + self.required_zone_names = required_zone_names + self.id = None + self.name = None + self.type = None + + +class PrivateLinkResourceListResult(Model): + """A list of private link resources. + + :param value: Array of private link resources + :type value: + list[~azure.mgmt.compute.v2020_06_30.models.PrivateLinkResource] + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[PrivateLinkResource]'}, + } + + def __init__(self, *, value=None, **kwargs) -> None: + super(PrivateLinkResourceListResult, self).__init__(**kwargs) + self.value = value + + +class PrivateLinkServiceConnectionState(Model): + """A collection of information about the state of the connection between + service consumer and provider. + + :param status: Indicates whether the connection has been + Approved/Rejected/Removed by the owner of the service. Possible values + include: 'Pending', 'Approved', 'Rejected' + :type status: str or + ~azure.mgmt.compute.v2020_06_30.models.PrivateEndpointServiceConnectionStatus + :param description: The reason for approval/rejection of the connection. + :type description: str + :param actions_required: A message indicating if changes on the service + provider require any updates on the consumer. + :type actions_required: str + """ + + _attribute_map = { + 'status': {'key': 'status', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'actions_required': {'key': 'actionsRequired', 'type': 'str'}, + } + + def __init__(self, *, status=None, description: str=None, actions_required: str=None, **kwargs) -> None: + super(PrivateLinkServiceConnectionState, self).__init__(**kwargs) + self.status = status + self.description = description + self.actions_required = actions_required + + +class ShareInfoElement(Model): + """ShareInfoElement. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar vm_uri: A relative URI containing the ID of the VM that has the disk + attached. + :vartype vm_uri: str + """ + + _validation = { + 'vm_uri': {'readonly': True}, + } + + _attribute_map = { + 'vm_uri': {'key': 'vmUri', 'type': 'str'}, + } + + def __init__(self, **kwargs) -> None: + super(ShareInfoElement, self).__init__(**kwargs) + self.vm_uri = None + + +class Snapshot(Resource): + """Snapshot resource. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: Resource Id + :vartype id: str + :ivar name: Resource name + :vartype name: str + :ivar type: Resource type + :vartype type: str + :param location: Required. Resource location + :type location: str + :param tags: Resource tags + :type tags: dict[str, str] + :ivar managed_by: Unused. Always Null. + :vartype managed_by: str + :param sku: + :type sku: ~azure.mgmt.compute.v2020_06_30.models.SnapshotSku + :ivar time_created: The time when the snapshot was created. + :vartype time_created: datetime + :param os_type: The Operating System type. Possible values include: + 'Windows', 'Linux' + :type os_type: str or + ~azure.mgmt.compute.v2020_06_30.models.OperatingSystemTypes + :param hyper_vgeneration: The hypervisor generation of the Virtual + Machine. Applicable to OS disks only. Possible values include: 'V1', 'V2' + :type hyper_vgeneration: str or + ~azure.mgmt.compute.v2020_06_30.models.HyperVGeneration + :param creation_data: Required. Disk source information. CreationData + information cannot be changed after the disk has been created. + :type creation_data: ~azure.mgmt.compute.v2020_06_30.models.CreationData + :param disk_size_gb: If creationData.createOption is Empty, this field is + mandatory and it indicates the size of the disk to create. If this field + is present for updates or creation with other options, it indicates a + resize. Resizes are only allowed if the disk is not attached to a running + VM, and can only increase the disk's size. + :type disk_size_gb: int + :ivar disk_size_bytes: The size of the disk in bytes. This field is read + only. + :vartype disk_size_bytes: long + :param disk_state: The state of the snapshot. Possible values include: + 'Unattached', 'Attached', 'Reserved', 'ActiveSAS', 'ReadyToUpload', + 'ActiveUpload' + :type disk_state: str or ~azure.mgmt.compute.v2020_06_30.models.DiskState + :ivar unique_id: Unique Guid identifying the resource. + :vartype unique_id: str + :param encryption_settings_collection: Encryption settings collection used + be Azure Disk Encryption, can contain multiple encryption settings per + disk or snapshot. + :type encryption_settings_collection: + ~azure.mgmt.compute.v2020_06_30.models.EncryptionSettingsCollection + :ivar provisioning_state: The disk provisioning state. + :vartype provisioning_state: str + :param incremental: Whether a snapshot is incremental. Incremental + snapshots on the same disk occupy less space than full snapshots and can + be diffed. + :type incremental: bool + :param encryption: Encryption property can be used to encrypt data at rest + with customer managed keys or platform managed keys. + :type encryption: ~azure.mgmt.compute.v2020_06_30.models.Encryption + :param network_access_policy: Possible values include: 'AllowAll', + 'AllowPrivate', 'DenyAll' + :type network_access_policy: str or + ~azure.mgmt.compute.v2020_06_30.models.NetworkAccessPolicy + :param disk_access_id: ARM id of the DiskAccess resource for using private + endpoints on disks. + :type disk_access_id: str + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'location': {'required': True}, + 'managed_by': {'readonly': True}, + 'time_created': {'readonly': True}, + 'creation_data': {'required': True}, + 'disk_size_bytes': {'readonly': True}, + 'unique_id': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'managed_by': {'key': 'managedBy', 'type': 'str'}, + 'sku': {'key': 'sku', 'type': 'SnapshotSku'}, + 'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'}, + 'os_type': {'key': 'properties.osType', 'type': 'OperatingSystemTypes'}, + 'hyper_vgeneration': {'key': 'properties.hyperVGeneration', 'type': 'str'}, + 'creation_data': {'key': 'properties.creationData', 'type': 'CreationData'}, + 'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'}, + 'disk_size_bytes': {'key': 'properties.diskSizeBytes', 'type': 'long'}, + 'disk_state': {'key': 'properties.diskState', 'type': 'str'}, + 'unique_id': {'key': 'properties.uniqueId', 'type': 'str'}, + 'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'incremental': {'key': 'properties.incremental', 'type': 'bool'}, + 'encryption': {'key': 'properties.encryption', 'type': 'Encryption'}, + 'network_access_policy': {'key': 'properties.networkAccessPolicy', 'type': 'str'}, + 'disk_access_id': {'key': 'properties.diskAccessId', 'type': 'str'}, + } + + def __init__(self, *, location: str, creation_data, tags=None, sku=None, os_type=None, hyper_vgeneration=None, disk_size_gb: int=None, disk_state=None, encryption_settings_collection=None, incremental: bool=None, encryption=None, network_access_policy=None, disk_access_id: str=None, **kwargs) -> None: + super(Snapshot, self).__init__(location=location, tags=tags, **kwargs) + self.managed_by = None + self.sku = sku + self.time_created = None + self.os_type = os_type + self.hyper_vgeneration = hyper_vgeneration + self.creation_data = creation_data + self.disk_size_gb = disk_size_gb + self.disk_size_bytes = None + self.disk_state = disk_state + self.unique_id = None + self.encryption_settings_collection = encryption_settings_collection + self.provisioning_state = None + self.incremental = incremental + self.encryption = encryption + self.network_access_policy = network_access_policy + self.disk_access_id = disk_access_id + + +class SnapshotSku(Model): + """The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :param name: The sku name. Possible values include: 'Standard_LRS', + 'Premium_LRS', 'Standard_ZRS' + :type name: str or + ~azure.mgmt.compute.v2020_06_30.models.SnapshotStorageAccountTypes + :ivar tier: The sku tier. + :vartype tier: str + """ + + _validation = { + 'tier': {'readonly': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'tier': {'key': 'tier', 'type': 'str'}, + } + + def __init__(self, *, name=None, **kwargs) -> None: + super(SnapshotSku, self).__init__(**kwargs) + self.name = name + self.tier = None + + +class SnapshotUpdate(Model): + """Snapshot update resource. + + :param os_type: the Operating System type. Possible values include: + 'Windows', 'Linux' + :type os_type: str or + ~azure.mgmt.compute.v2020_06_30.models.OperatingSystemTypes + :param disk_size_gb: If creationData.createOption is Empty, this field is + mandatory and it indicates the size of the disk to create. If this field + is present for updates or creation with other options, it indicates a + resize. Resizes are only allowed if the disk is not attached to a running + VM, and can only increase the disk's size. + :type disk_size_gb: int + :param encryption_settings_collection: Encryption settings collection used + be Azure Disk Encryption, can contain multiple encryption settings per + disk or snapshot. + :type encryption_settings_collection: + ~azure.mgmt.compute.v2020_06_30.models.EncryptionSettingsCollection + :param encryption: Encryption property can be used to encrypt data at rest + with customer managed keys or platform managed keys. + :type encryption: ~azure.mgmt.compute.v2020_06_30.models.Encryption + :param network_access_policy: Possible values include: 'AllowAll', + 'AllowPrivate', 'DenyAll' + :type network_access_policy: str or + ~azure.mgmt.compute.v2020_06_30.models.NetworkAccessPolicy + :param disk_access_id: ARM id of the DiskAccess resource for using private + endpoints on disks. + :type disk_access_id: str + :param tags: Resource tags + :type tags: dict[str, str] + :param sku: + :type sku: ~azure.mgmt.compute.v2020_06_30.models.SnapshotSku + """ + + _attribute_map = { + 'os_type': {'key': 'properties.osType', 'type': 'OperatingSystemTypes'}, + 'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'}, + 'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'}, + 'encryption': {'key': 'properties.encryption', 'type': 'Encryption'}, + 'network_access_policy': {'key': 'properties.networkAccessPolicy', 'type': 'str'}, + 'disk_access_id': {'key': 'properties.diskAccessId', 'type': 'str'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'sku': {'key': 'sku', 'type': 'SnapshotSku'}, + } + + def __init__(self, *, os_type=None, disk_size_gb: int=None, encryption_settings_collection=None, encryption=None, network_access_policy=None, disk_access_id: str=None, tags=None, sku=None, **kwargs) -> None: + super(SnapshotUpdate, self).__init__(**kwargs) + self.os_type = os_type + self.disk_size_gb = disk_size_gb + self.encryption_settings_collection = encryption_settings_collection + self.encryption = encryption + self.network_access_policy = network_access_policy + self.disk_access_id = disk_access_id + self.tags = tags + self.sku = sku + + +class SourceVault(Model): + """The vault id is an Azure Resource Manager Resource id in the form + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}. + + :param id: Resource Id + :type id: str + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + } + + def __init__(self, *, id: str=None, **kwargs) -> None: + super(SourceVault, self).__init__(**kwargs) + self.id = id diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/models/_paged_models.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/models/_paged_models.py new file mode 100644 index 000000000000..148c66a73aff --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/models/_paged_models.py @@ -0,0 +1,79 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class DiskPaged(Paged): + """ + A paging container for iterating over a list of :class:`Disk ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[Disk]'} + } + + def __init__(self, *args, **kwargs): + + super(DiskPaged, self).__init__(*args, **kwargs) +class SnapshotPaged(Paged): + """ + A paging container for iterating over a list of :class:`Snapshot ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[Snapshot]'} + } + + def __init__(self, *args, **kwargs): + + super(SnapshotPaged, self).__init__(*args, **kwargs) +class DiskEncryptionSetPaged(Paged): + """ + A paging container for iterating over a list of :class:`DiskEncryptionSet ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[DiskEncryptionSet]'} + } + + def __init__(self, *args, **kwargs): + + super(DiskEncryptionSetPaged, self).__init__(*args, **kwargs) +class StrPaged(Paged): + """ + A paging container for iterating over a list of str object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[str]'} + } + + def __init__(self, *args, **kwargs): + + super(StrPaged, self).__init__(*args, **kwargs) +class DiskAccessPaged(Paged): + """ + A paging container for iterating over a list of :class:`DiskAccess ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[DiskAccess]'} + } + + def __init__(self, *args, **kwargs): + + super(DiskAccessPaged, self).__init__(*args, **kwargs) diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/operations/__init__.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/operations/__init__.py new file mode 100644 index 000000000000..2e8301f5ddcd --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/operations/__init__.py @@ -0,0 +1,22 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from ._disks_operations import DisksOperations +from ._snapshots_operations import SnapshotsOperations +from ._disk_encryption_sets_operations import DiskEncryptionSetsOperations +from ._disk_accesses_operations import DiskAccessesOperations + +__all__ = [ + 'DisksOperations', + 'SnapshotsOperations', + 'DiskEncryptionSetsOperations', + 'DiskAccessesOperations', +] diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/operations/_disk_accesses_operations.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/operations/_disk_accesses_operations.py new file mode 100644 index 000000000000..63e39503b548 --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/operations/_disk_accesses_operations.py @@ -0,0 +1,606 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse +from msrestazure.azure_exceptions import CloudError +from msrest.polling import LROPoller, NoPolling +from msrestazure.polling.arm_polling import ARMPolling + +from .. import models + + +class DiskAccessesOperations(object): + """DiskAccessesOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client Api Version. Constant value: "2020-06-30". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2020-06-30" + + self.config = config + + + def _create_or_update_initial( + self, resource_group_name, disk_access_name, location, tags=None, custom_headers=None, raw=False, **operation_config): + disk_access = models.DiskAccess(location=location, tags=tags) + + # Construct URL + url = self.create_or_update.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'diskAccessName': self._serialize.url("disk_access_name", disk_access_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct body + body_content = self._serialize.body(disk_access, 'DiskAccess') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 202]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('DiskAccess', response) + if response.status_code == 202: + deserialized = self._deserialize('DiskAccess', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + def create_or_update( + self, resource_group_name, disk_access_name, location, tags=None, custom_headers=None, raw=False, polling=True, **operation_config): + """Creates or updates a disk access resource. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is + being created. The name can't be changed after the disk encryption set + is created. Supported characters for the name are a-z, A-Z, 0-9 and _. + The maximum name length is 80 characters. + :type disk_access_name: str + :param location: Resource location + :type location: str + :param tags: Resource tags + :type tags: dict[str, str] + :param dict custom_headers: headers that will be added to the request + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns DiskAccess or + ClientRawResponse if raw==True + :rtype: + ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2020_06_30.models.DiskAccess] + or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2020_06_30.models.DiskAccess]] + :raises: :class:`CloudError` + """ + raw_result = self._create_or_update_initial( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + location=location, + tags=tags, + custom_headers=custom_headers, + raw=True, + **operation_config + ) + + def get_long_running_output(response): + deserialized = self._deserialize('DiskAccess', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + lro_delay = operation_config.get( + 'long_running_operation_timeout', + self.config.long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}'} + + + def _update_initial( + self, resource_group_name, disk_access_name, tags=None, custom_headers=None, raw=False, **operation_config): + disk_access = models.DiskAccessUpdate(tags=tags) + + # Construct URL + url = self.update.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'diskAccessName': self._serialize.url("disk_access_name", disk_access_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct body + body_content = self._serialize.body(disk_access, 'DiskAccessUpdate') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 202]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('DiskAccess', response) + if response.status_code == 202: + deserialized = self._deserialize('DiskAccess', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + def update( + self, resource_group_name, disk_access_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config): + """Updates (patches) a disk access resource. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is + being created. The name can't be changed after the disk encryption set + is created. Supported characters for the name are a-z, A-Z, 0-9 and _. + The maximum name length is 80 characters. + :type disk_access_name: str + :param tags: Resource tags + :type tags: dict[str, str] + :param dict custom_headers: headers that will be added to the request + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns DiskAccess or + ClientRawResponse if raw==True + :rtype: + ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2020_06_30.models.DiskAccess] + or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2020_06_30.models.DiskAccess]] + :raises: :class:`CloudError` + """ + raw_result = self._update_initial( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + tags=tags, + custom_headers=custom_headers, + raw=True, + **operation_config + ) + + def get_long_running_output(response): + deserialized = self._deserialize('DiskAccess', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + lro_delay = operation_config.get( + 'long_running_operation_timeout', + self.config.long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}'} + + def get( + self, resource_group_name, disk_access_name, custom_headers=None, raw=False, **operation_config): + """Gets information about a disk access resource. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is + being created. The name can't be changed after the disk encryption set + is created. Supported characters for the name are a-z, A-Z, 0-9 and _. + The maximum name length is 80 characters. + :type disk_access_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: DiskAccess or ClientRawResponse if raw=true + :rtype: ~azure.mgmt.compute.v2020_06_30.models.DiskAccess or + ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'diskAccessName': self._serialize.url("disk_access_name", disk_access_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('DiskAccess', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}'} + + + def _delete_initial( + self, resource_group_name, disk_access_name, custom_headers=None, raw=False, **operation_config): + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'diskAccessName': self._serialize.url("disk_access_name", disk_access_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 202, 204]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + + def delete( + self, resource_group_name, disk_access_name, custom_headers=None, raw=False, polling=True, **operation_config): + """Deletes a disk access resource. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is + being created. The name can't be changed after the disk encryption set + is created. Supported characters for the name are a-z, A-Z, 0-9 and _. + The maximum name length is 80 characters. + :type disk_access_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns None or + ClientRawResponse if raw==True + :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] + :raises: :class:`CloudError` + """ + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + custom_headers=custom_headers, + raw=True, + **operation_config + ) + + def get_long_running_output(response): + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + + lro_delay = operation_config.get( + 'long_running_operation_timeout', + self.config.long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}'} + + def list_by_resource_group( + self, resource_group_name, custom_headers=None, raw=False, **operation_config): + """Lists all the disk access resources under a resource group. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of DiskAccess + :rtype: + ~azure.mgmt.compute.v2020_06_30.models.DiskAccessPaged[~azure.mgmt.compute.v2020_06_30.models.DiskAccess] + :raises: :class:`CloudError` + """ + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list_by_resource_group.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + return response + + # Deserialize response + header_dict = None + if raw: + header_dict = {} + deserialized = models.DiskAccessPaged(internal_paging, self._deserialize.dependencies, header_dict) + + return deserialized + list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses'} + + def list( + self, custom_headers=None, raw=False, **operation_config): + """Lists all the disk access resources under a subscription. + + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of DiskAccess + :rtype: + ~azure.mgmt.compute.v2020_06_30.models.DiskAccessPaged[~azure.mgmt.compute.v2020_06_30.models.DiskAccess] + :raises: :class:`CloudError` + """ + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + return response + + # Deserialize response + header_dict = None + if raw: + header_dict = {} + deserialized = models.DiskAccessPaged(internal_paging, self._deserialize.dependencies, header_dict) + + return deserialized + list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskAccesses'} + + def get_private_link_resources( + self, resource_group_name, disk_access_name, custom_headers=None, raw=False, **operation_config): + """Gets the private link resources possible under disk access resource. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is + being created. The name can't be changed after the disk encryption set + is created. Supported characters for the name are a-z, A-Z, 0-9 and _. + The maximum name length is 80 characters. + :type disk_access_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PrivateLinkResourceListResult or ClientRawResponse if + raw=true + :rtype: + ~azure.mgmt.compute.v2020_06_30.models.PrivateLinkResourceListResult + or ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.get_private_link_resources.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'diskAccessName': self._serialize.url("disk_access_name", disk_access_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('PrivateLinkResourceListResult', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_private_link_resources.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateLinkResources'} diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/operations/_disk_encryption_sets_operations.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/operations/_disk_encryption_sets_operations.py new file mode 100644 index 000000000000..51bcbc2d3fe2 --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/operations/_disk_encryption_sets_operations.py @@ -0,0 +1,611 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse +from msrestazure.azure_exceptions import CloudError +from msrest.polling import LROPoller, NoPolling +from msrestazure.polling.arm_polling import ARMPolling + +from .. import models + + +class DiskEncryptionSetsOperations(object): + """DiskEncryptionSetsOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client Api Version. Constant value: "2020-06-30". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2020-06-30" + + self.config = config + + + def _create_or_update_initial( + self, resource_group_name, disk_encryption_set_name, disk_encryption_set, custom_headers=None, raw=False, **operation_config): + # Construct URL + url = self.create_or_update.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'diskEncryptionSetName': self._serialize.url("disk_encryption_set_name", disk_encryption_set_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct body + body_content = self._serialize.body(disk_encryption_set, 'DiskEncryptionSet') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 202]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('DiskEncryptionSet', response) + if response.status_code == 202: + deserialized = self._deserialize('DiskEncryptionSet', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + def create_or_update( + self, resource_group_name, disk_encryption_set_name, disk_encryption_set, custom_headers=None, raw=False, polling=True, **operation_config): + """Creates or updates a disk encryption set. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set + that is being created. The name can't be changed after the disk + encryption set is created. Supported characters for the name are a-z, + A-Z, 0-9 and _. The maximum name length is 80 characters. + :type disk_encryption_set_name: str + :param disk_encryption_set: disk encryption set object supplied in the + body of the Put disk encryption set operation. + :type disk_encryption_set: + ~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSet + :param dict custom_headers: headers that will be added to the request + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns DiskEncryptionSet or + ClientRawResponse if raw==True + :rtype: + ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSet] + or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSet]] + :raises: :class:`CloudError` + """ + raw_result = self._create_or_update_initial( + resource_group_name=resource_group_name, + disk_encryption_set_name=disk_encryption_set_name, + disk_encryption_set=disk_encryption_set, + custom_headers=custom_headers, + raw=True, + **operation_config + ) + + def get_long_running_output(response): + deserialized = self._deserialize('DiskEncryptionSet', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + lro_delay = operation_config.get( + 'long_running_operation_timeout', + self.config.long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} + + + def _update_initial( + self, resource_group_name, disk_encryption_set_name, disk_encryption_set, custom_headers=None, raw=False, **operation_config): + # Construct URL + url = self.update.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'diskEncryptionSetName': self._serialize.url("disk_encryption_set_name", disk_encryption_set_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct body + body_content = self._serialize.body(disk_encryption_set, 'DiskEncryptionSetUpdate') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 202]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('DiskEncryptionSet', response) + if response.status_code == 202: + deserialized = self._deserialize('DiskEncryptionSet', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + def update( + self, resource_group_name, disk_encryption_set_name, disk_encryption_set, custom_headers=None, raw=False, polling=True, **operation_config): + """Updates (patches) a disk encryption set. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set + that is being created. The name can't be changed after the disk + encryption set is created. Supported characters for the name are a-z, + A-Z, 0-9 and _. The maximum name length is 80 characters. + :type disk_encryption_set_name: str + :param disk_encryption_set: disk encryption set object supplied in the + body of the Patch disk encryption set operation. + :type disk_encryption_set: + ~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSetUpdate + :param dict custom_headers: headers that will be added to the request + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns DiskEncryptionSet or + ClientRawResponse if raw==True + :rtype: + ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSet] + or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSet]] + :raises: :class:`CloudError` + """ + raw_result = self._update_initial( + resource_group_name=resource_group_name, + disk_encryption_set_name=disk_encryption_set_name, + disk_encryption_set=disk_encryption_set, + custom_headers=custom_headers, + raw=True, + **operation_config + ) + + def get_long_running_output(response): + deserialized = self._deserialize('DiskEncryptionSet', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + lro_delay = operation_config.get( + 'long_running_operation_timeout', + self.config.long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} + + def get( + self, resource_group_name, disk_encryption_set_name, custom_headers=None, raw=False, **operation_config): + """Gets information about a disk encryption set. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set + that is being created. The name can't be changed after the disk + encryption set is created. Supported characters for the name are a-z, + A-Z, 0-9 and _. The maximum name length is 80 characters. + :type disk_encryption_set_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: DiskEncryptionSet or ClientRawResponse if raw=true + :rtype: ~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSet or + ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'diskEncryptionSetName': self._serialize.url("disk_encryption_set_name", disk_encryption_set_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('DiskEncryptionSet', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} + + + def _delete_initial( + self, resource_group_name, disk_encryption_set_name, custom_headers=None, raw=False, **operation_config): + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'diskEncryptionSetName': self._serialize.url("disk_encryption_set_name", disk_encryption_set_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 202, 204]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + + def delete( + self, resource_group_name, disk_encryption_set_name, custom_headers=None, raw=False, polling=True, **operation_config): + """Deletes a disk encryption set. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set + that is being created. The name can't be changed after the disk + encryption set is created. Supported characters for the name are a-z, + A-Z, 0-9 and _. The maximum name length is 80 characters. + :type disk_encryption_set_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns None or + ClientRawResponse if raw==True + :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] + :raises: :class:`CloudError` + """ + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + disk_encryption_set_name=disk_encryption_set_name, + custom_headers=custom_headers, + raw=True, + **operation_config + ) + + def get_long_running_output(response): + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + + lro_delay = operation_config.get( + 'long_running_operation_timeout', + self.config.long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} + + def list_by_resource_group( + self, resource_group_name, custom_headers=None, raw=False, **operation_config): + """Lists all the disk encryption sets under a resource group. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of DiskEncryptionSet + :rtype: + ~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSetPaged[~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSet] + :raises: :class:`CloudError` + """ + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list_by_resource_group.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + return response + + # Deserialize response + header_dict = None + if raw: + header_dict = {} + deserialized = models.DiskEncryptionSetPaged(internal_paging, self._deserialize.dependencies, header_dict) + + return deserialized + list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets'} + + def list( + self, custom_headers=None, raw=False, **operation_config): + """Lists all the disk encryption sets under a subscription. + + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of DiskEncryptionSet + :rtype: + ~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSetPaged[~azure.mgmt.compute.v2020_06_30.models.DiskEncryptionSet] + :raises: :class:`CloudError` + """ + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + return response + + # Deserialize response + header_dict = None + if raw: + header_dict = {} + deserialized = models.DiskEncryptionSetPaged(internal_paging, self._deserialize.dependencies, header_dict) + + return deserialized + list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskEncryptionSets'} + + def list_associated_resources( + self, resource_group_name, disk_encryption_set_name, custom_headers=None, raw=False, **operation_config): + """Lists all resources that are encrypted with this disk encryption set. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set + that is being created. The name can't be changed after the disk + encryption set is created. Supported characters for the name are a-z, + A-Z, 0-9 and _. The maximum name length is 80 characters. + :type disk_encryption_set_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of str + :rtype: ~azure.mgmt.compute.v2020_06_30.models.StrPaged[str] + :raises: :class:`CloudError` + """ + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list_associated_resources.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'diskEncryptionSetName': self._serialize.url("disk_encryption_set_name", disk_encryption_set_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + return response + + # Deserialize response + header_dict = None + if raw: + header_dict = {} + deserialized = models.StrPaged(internal_paging, self._deserialize.dependencies, header_dict) + + return deserialized + list_associated_resources.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}/associatedResources'} diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/operations/_disks_operations.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/operations/_disks_operations.py new file mode 100644 index 000000000000..619072628f7d --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/operations/_disks_operations.py @@ -0,0 +1,727 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse +from msrestazure.azure_exceptions import CloudError +from msrest.polling import LROPoller, NoPolling +from msrestazure.polling.arm_polling import ARMPolling + +from .. import models + + +class DisksOperations(object): + """DisksOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client Api Version. Constant value: "2020-06-30". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2020-06-30" + + self.config = config + + + def _create_or_update_initial( + self, resource_group_name, disk_name, disk, custom_headers=None, raw=False, **operation_config): + # Construct URL + url = self.create_or_update.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'diskName': self._serialize.url("disk_name", disk_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct body + body_content = self._serialize.body(disk, 'Disk') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 202]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('Disk', response) + if response.status_code == 202: + deserialized = self._deserialize('Disk', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + def create_or_update( + self, resource_group_name, disk_name, disk, custom_headers=None, raw=False, polling=True, **operation_config): + """Creates or updates a disk. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. + The name can't be changed after the disk is created. Supported + characters for the name are a-z, A-Z, 0-9 and _. The maximum name + length is 80 characters. + :type disk_name: str + :param disk: Disk object supplied in the body of the Put disk + operation. + :type disk: ~azure.mgmt.compute.v2020_06_30.models.Disk + :param dict custom_headers: headers that will be added to the request + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns Disk or + ClientRawResponse if raw==True + :rtype: + ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2020_06_30.models.Disk] + or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2020_06_30.models.Disk]] + :raises: :class:`CloudError` + """ + raw_result = self._create_or_update_initial( + resource_group_name=resource_group_name, + disk_name=disk_name, + disk=disk, + custom_headers=custom_headers, + raw=True, + **operation_config + ) + + def get_long_running_output(response): + deserialized = self._deserialize('Disk', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + lro_delay = operation_config.get( + 'long_running_operation_timeout', + self.config.long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} + + + def _update_initial( + self, resource_group_name, disk_name, disk, custom_headers=None, raw=False, **operation_config): + # Construct URL + url = self.update.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'diskName': self._serialize.url("disk_name", disk_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct body + body_content = self._serialize.body(disk, 'DiskUpdate') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 202]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('Disk', response) + if response.status_code == 202: + deserialized = self._deserialize('Disk', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + def update( + self, resource_group_name, disk_name, disk, custom_headers=None, raw=False, polling=True, **operation_config): + """Updates (patches) a disk. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. + The name can't be changed after the disk is created. Supported + characters for the name are a-z, A-Z, 0-9 and _. The maximum name + length is 80 characters. + :type disk_name: str + :param disk: Disk object supplied in the body of the Patch disk + operation. + :type disk: ~azure.mgmt.compute.v2020_06_30.models.DiskUpdate + :param dict custom_headers: headers that will be added to the request + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns Disk or + ClientRawResponse if raw==True + :rtype: + ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2020_06_30.models.Disk] + or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2020_06_30.models.Disk]] + :raises: :class:`CloudError` + """ + raw_result = self._update_initial( + resource_group_name=resource_group_name, + disk_name=disk_name, + disk=disk, + custom_headers=custom_headers, + raw=True, + **operation_config + ) + + def get_long_running_output(response): + deserialized = self._deserialize('Disk', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + lro_delay = operation_config.get( + 'long_running_operation_timeout', + self.config.long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} + + def get( + self, resource_group_name, disk_name, custom_headers=None, raw=False, **operation_config): + """Gets information about a disk. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. + The name can't be changed after the disk is created. Supported + characters for the name are a-z, A-Z, 0-9 and _. The maximum name + length is 80 characters. + :type disk_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: Disk or ClientRawResponse if raw=true + :rtype: ~azure.mgmt.compute.v2020_06_30.models.Disk or + ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'diskName': self._serialize.url("disk_name", disk_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('Disk', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} + + + def _delete_initial( + self, resource_group_name, disk_name, custom_headers=None, raw=False, **operation_config): + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'diskName': self._serialize.url("disk_name", disk_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 202, 204]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + + def delete( + self, resource_group_name, disk_name, custom_headers=None, raw=False, polling=True, **operation_config): + """Deletes a disk. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. + The name can't be changed after the disk is created. Supported + characters for the name are a-z, A-Z, 0-9 and _. The maximum name + length is 80 characters. + :type disk_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns None or + ClientRawResponse if raw==True + :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] + :raises: :class:`CloudError` + """ + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + disk_name=disk_name, + custom_headers=custom_headers, + raw=True, + **operation_config + ) + + def get_long_running_output(response): + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + + lro_delay = operation_config.get( + 'long_running_operation_timeout', + self.config.long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} + + def list_by_resource_group( + self, resource_group_name, custom_headers=None, raw=False, **operation_config): + """Lists all the disks under a resource group. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of Disk + :rtype: + ~azure.mgmt.compute.v2020_06_30.models.DiskPaged[~azure.mgmt.compute.v2020_06_30.models.Disk] + :raises: :class:`CloudError` + """ + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list_by_resource_group.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + return response + + # Deserialize response + header_dict = None + if raw: + header_dict = {} + deserialized = models.DiskPaged(internal_paging, self._deserialize.dependencies, header_dict) + + return deserialized + list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks'} + + def list( + self, custom_headers=None, raw=False, **operation_config): + """Lists all the disks under a subscription. + + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of Disk + :rtype: + ~azure.mgmt.compute.v2020_06_30.models.DiskPaged[~azure.mgmt.compute.v2020_06_30.models.Disk] + :raises: :class:`CloudError` + """ + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + return response + + # Deserialize response + header_dict = None + if raw: + header_dict = {} + deserialized = models.DiskPaged(internal_paging, self._deserialize.dependencies, header_dict) + + return deserialized + list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks'} + + + def _grant_access_initial( + self, resource_group_name, disk_name, access, duration_in_seconds, custom_headers=None, raw=False, **operation_config): + grant_access_data = models.GrantAccessData(access=access, duration_in_seconds=duration_in_seconds) + + # Construct URL + url = self.grant_access.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'diskName': self._serialize.url("disk_name", disk_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct body + body_content = self._serialize.body(grant_access_data, 'GrantAccessData') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 202]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('AccessUri', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + def grant_access( + self, resource_group_name, disk_name, access, duration_in_seconds, custom_headers=None, raw=False, polling=True, **operation_config): + """Grants access to a disk. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. + The name can't be changed after the disk is created. Supported + characters for the name are a-z, A-Z, 0-9 and _. The maximum name + length is 80 characters. + :type disk_name: str + :param access: Possible values include: 'None', 'Read', 'Write' + :type access: str or + ~azure.mgmt.compute.v2020_06_30.models.AccessLevel + :param duration_in_seconds: Time duration in seconds until the SAS + access expires. + :type duration_in_seconds: int + :param dict custom_headers: headers that will be added to the request + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns AccessUri or + ClientRawResponse if raw==True + :rtype: + ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2020_06_30.models.AccessUri] + or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2020_06_30.models.AccessUri]] + :raises: :class:`CloudError` + """ + raw_result = self._grant_access_initial( + resource_group_name=resource_group_name, + disk_name=disk_name, + access=access, + duration_in_seconds=duration_in_seconds, + custom_headers=custom_headers, + raw=True, + **operation_config + ) + + def get_long_running_output(response): + deserialized = self._deserialize('AccessUri', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + lro_delay = operation_config.get( + 'long_running_operation_timeout', + self.config.long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + grant_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess'} + + + def _revoke_access_initial( + self, resource_group_name, disk_name, custom_headers=None, raw=False, **operation_config): + # Construct URL + url = self.revoke_access.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'diskName': self._serialize.url("disk_name", disk_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 202]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + + def revoke_access( + self, resource_group_name, disk_name, custom_headers=None, raw=False, polling=True, **operation_config): + """Revokes access to a disk. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. + The name can't be changed after the disk is created. Supported + characters for the name are a-z, A-Z, 0-9 and _. The maximum name + length is 80 characters. + :type disk_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns None or + ClientRawResponse if raw==True + :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] + :raises: :class:`CloudError` + """ + raw_result = self._revoke_access_initial( + resource_group_name=resource_group_name, + disk_name=disk_name, + custom_headers=custom_headers, + raw=True, + **operation_config + ) + + def get_long_running_output(response): + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + + lro_delay = operation_config.get( + 'long_running_operation_timeout', + self.config.long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + revoke_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess'} diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/operations/_snapshots_operations.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/operations/_snapshots_operations.py new file mode 100644 index 000000000000..f5641dbca17e --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/operations/_snapshots_operations.py @@ -0,0 +1,727 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse +from msrestazure.azure_exceptions import CloudError +from msrest.polling import LROPoller, NoPolling +from msrestazure.polling.arm_polling import ARMPolling + +from .. import models + + +class SnapshotsOperations(object): + """SnapshotsOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client Api Version. Constant value: "2020-06-30". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2020-06-30" + + self.config = config + + + def _create_or_update_initial( + self, resource_group_name, snapshot_name, snapshot, custom_headers=None, raw=False, **operation_config): + # Construct URL + url = self.create_or_update.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct body + body_content = self._serialize.body(snapshot, 'Snapshot') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 202]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('Snapshot', response) + if response.status_code == 202: + deserialized = self._deserialize('Snapshot', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + def create_or_update( + self, resource_group_name, snapshot_name, snapshot, custom_headers=None, raw=False, polling=True, **operation_config): + """Creates or updates a snapshot. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. + The name can't be changed after the snapshot is created. Supported + characters for the name are a-z, A-Z, 0-9 and _. The max name length + is 80 characters. + :type snapshot_name: str + :param snapshot: Snapshot object supplied in the body of the Put disk + operation. + :type snapshot: ~azure.mgmt.compute.v2020_06_30.models.Snapshot + :param dict custom_headers: headers that will be added to the request + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns Snapshot or + ClientRawResponse if raw==True + :rtype: + ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2020_06_30.models.Snapshot] + or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2020_06_30.models.Snapshot]] + :raises: :class:`CloudError` + """ + raw_result = self._create_or_update_initial( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + snapshot=snapshot, + custom_headers=custom_headers, + raw=True, + **operation_config + ) + + def get_long_running_output(response): + deserialized = self._deserialize('Snapshot', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + lro_delay = operation_config.get( + 'long_running_operation_timeout', + self.config.long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} + + + def _update_initial( + self, resource_group_name, snapshot_name, snapshot, custom_headers=None, raw=False, **operation_config): + # Construct URL + url = self.update.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct body + body_content = self._serialize.body(snapshot, 'SnapshotUpdate') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 202]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('Snapshot', response) + if response.status_code == 202: + deserialized = self._deserialize('Snapshot', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + def update( + self, resource_group_name, snapshot_name, snapshot, custom_headers=None, raw=False, polling=True, **operation_config): + """Updates (patches) a snapshot. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. + The name can't be changed after the snapshot is created. Supported + characters for the name are a-z, A-Z, 0-9 and _. The max name length + is 80 characters. + :type snapshot_name: str + :param snapshot: Snapshot object supplied in the body of the Patch + snapshot operation. + :type snapshot: ~azure.mgmt.compute.v2020_06_30.models.SnapshotUpdate + :param dict custom_headers: headers that will be added to the request + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns Snapshot or + ClientRawResponse if raw==True + :rtype: + ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2020_06_30.models.Snapshot] + or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2020_06_30.models.Snapshot]] + :raises: :class:`CloudError` + """ + raw_result = self._update_initial( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + snapshot=snapshot, + custom_headers=custom_headers, + raw=True, + **operation_config + ) + + def get_long_running_output(response): + deserialized = self._deserialize('Snapshot', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + lro_delay = operation_config.get( + 'long_running_operation_timeout', + self.config.long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} + + def get( + self, resource_group_name, snapshot_name, custom_headers=None, raw=False, **operation_config): + """Gets information about a snapshot. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. + The name can't be changed after the snapshot is created. Supported + characters for the name are a-z, A-Z, 0-9 and _. The max name length + is 80 characters. + :type snapshot_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: Snapshot or ClientRawResponse if raw=true + :rtype: ~azure.mgmt.compute.v2020_06_30.models.Snapshot or + ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('Snapshot', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} + + + def _delete_initial( + self, resource_group_name, snapshot_name, custom_headers=None, raw=False, **operation_config): + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 202, 204]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + + def delete( + self, resource_group_name, snapshot_name, custom_headers=None, raw=False, polling=True, **operation_config): + """Deletes a snapshot. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. + The name can't be changed after the snapshot is created. Supported + characters for the name are a-z, A-Z, 0-9 and _. The max name length + is 80 characters. + :type snapshot_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns None or + ClientRawResponse if raw==True + :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] + :raises: :class:`CloudError` + """ + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + custom_headers=custom_headers, + raw=True, + **operation_config + ) + + def get_long_running_output(response): + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + + lro_delay = operation_config.get( + 'long_running_operation_timeout', + self.config.long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} + + def list_by_resource_group( + self, resource_group_name, custom_headers=None, raw=False, **operation_config): + """Lists snapshots under a resource group. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of Snapshot + :rtype: + ~azure.mgmt.compute.v2020_06_30.models.SnapshotPaged[~azure.mgmt.compute.v2020_06_30.models.Snapshot] + :raises: :class:`CloudError` + """ + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list_by_resource_group.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + return response + + # Deserialize response + header_dict = None + if raw: + header_dict = {} + deserialized = models.SnapshotPaged(internal_paging, self._deserialize.dependencies, header_dict) + + return deserialized + list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots'} + + def list( + self, custom_headers=None, raw=False, **operation_config): + """Lists snapshots under a subscription. + + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of Snapshot + :rtype: + ~azure.mgmt.compute.v2020_06_30.models.SnapshotPaged[~azure.mgmt.compute.v2020_06_30.models.Snapshot] + :raises: :class:`CloudError` + """ + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + return response + + # Deserialize response + header_dict = None + if raw: + header_dict = {} + deserialized = models.SnapshotPaged(internal_paging, self._deserialize.dependencies, header_dict) + + return deserialized + list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots'} + + + def _grant_access_initial( + self, resource_group_name, snapshot_name, access, duration_in_seconds, custom_headers=None, raw=False, **operation_config): + grant_access_data = models.GrantAccessData(access=access, duration_in_seconds=duration_in_seconds) + + # Construct URL + url = self.grant_access.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct body + body_content = self._serialize.body(grant_access_data, 'GrantAccessData') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 202]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('AccessUri', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + def grant_access( + self, resource_group_name, snapshot_name, access, duration_in_seconds, custom_headers=None, raw=False, polling=True, **operation_config): + """Grants access to a snapshot. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. + The name can't be changed after the snapshot is created. Supported + characters for the name are a-z, A-Z, 0-9 and _. The max name length + is 80 characters. + :type snapshot_name: str + :param access: Possible values include: 'None', 'Read', 'Write' + :type access: str or + ~azure.mgmt.compute.v2020_06_30.models.AccessLevel + :param duration_in_seconds: Time duration in seconds until the SAS + access expires. + :type duration_in_seconds: int + :param dict custom_headers: headers that will be added to the request + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns AccessUri or + ClientRawResponse if raw==True + :rtype: + ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2020_06_30.models.AccessUri] + or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2020_06_30.models.AccessUri]] + :raises: :class:`CloudError` + """ + raw_result = self._grant_access_initial( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + access=access, + duration_in_seconds=duration_in_seconds, + custom_headers=custom_headers, + raw=True, + **operation_config + ) + + def get_long_running_output(response): + deserialized = self._deserialize('AccessUri', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + lro_delay = operation_config.get( + 'long_running_operation_timeout', + self.config.long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + grant_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess'} + + + def _revoke_access_initial( + self, resource_group_name, snapshot_name, custom_headers=None, raw=False, **operation_config): + # Construct URL + url = self.revoke_access.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'snapshotName': self._serialize.url("snapshot_name", snapshot_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 202]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + + def revoke_access( + self, resource_group_name, snapshot_name, custom_headers=None, raw=False, polling=True, **operation_config): + """Revokes access to a snapshot. + + :param resource_group_name: The name of the resource group. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. + The name can't be changed after the snapshot is created. Supported + characters for the name are a-z, A-Z, 0-9 and _. The max name length + is 80 characters. + :type snapshot_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns None or + ClientRawResponse if raw==True + :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] + :raises: :class:`CloudError` + """ + raw_result = self._revoke_access_initial( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + custom_headers=custom_headers, + raw=True, + **operation_config + ) + + def get_long_running_output(response): + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + + lro_delay = operation_config.get( + 'long_running_operation_timeout', + self.config.long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + revoke_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess'} diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/version.py new file mode 100644 index 000000000000..3b757a1a8773 --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/version.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +VERSION = "2020-06-30" + diff --git a/sdk/compute/azure-mgmt-compute/setup.py b/sdk/compute/azure-mgmt-compute/setup.py index cfcaa1edbaea..9c66a4d27b46 100644 --- a/sdk/compute/azure-mgmt-compute/setup.py +++ b/sdk/compute/azure-mgmt-compute/setup.py @@ -36,7 +36,7 @@ pass # Version extraction inspired from 'requests' -with open(os.path.join(package_folder_path, 'version.py') +with open(os.path.join(package_folder_path, 'version.py') if os.path.exists(os.path.join(package_folder_path, 'version.py')) else os.path.join(package_folder_path, '_version.py'), 'r') as fd: version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',