diff --git a/src/storagecache/HISTORY.rst b/src/storagecache/HISTORY.rst new file mode 100644 index 00000000000..1c139576ba0 --- /dev/null +++ b/src/storagecache/HISTORY.rst @@ -0,0 +1,8 @@ +.. :changelog: + +Release History +=============== + +0.1.0 +++++++ +* Initial release. diff --git a/src/storagecache/README.md b/src/storagecache/README.md new file mode 100644 index 00000000000..13068c82da7 --- /dev/null +++ b/src/storagecache/README.md @@ -0,0 +1,88 @@ +# Azure CLI storagecache Extension # +This is the extension for storagecache + +### How to use ### +Install this extension using the below CLI command +``` +az extension add --name storagecache +``` + +### Included Features ### +#### storagecache sku #### +##### List ##### +``` +az storagecache sku list +``` +#### storagecache usage-model #### +##### List ##### +``` +az storagecache usage-model list +``` +#### storagecache asc-operation #### +##### Show ##### +``` +az storagecache asc-operation show --operation-id "testoperationid" --location "West US" +``` +#### storagecache cache #### +##### Create ##### +``` +az storagecache cache create --location "westus" --cache-size-gb 3072 \ + --subnet "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1" \ + --sku-name "Standard_2G" --tags "{\\"Dept\\":\\"ContosoAds\\"}" --cache-name "sc1" --resource-group "scgroup" +``` +##### Show ##### +``` +az storagecache cache show --cache-name "sc1" --resource-group "scgroup" +``` +##### List ##### +``` +az storagecache cache list --resource-group "scgroup" +``` +##### Update ##### +``` +az storagecache cache update --location "westus" --cache-size-gb 3072 \ + --subnet "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1" \ + --sku-name "Standard_2G" --tags "{\\"Dept\\":\\"ContosoAds\\"}" --cache-name "sc1" --resource-group "scgroup" +``` +##### Flush ##### +``` +az storagecache cache flush --cache-name "sc" --resource-group "scgroup" +``` +##### Start ##### +``` +az storagecache cache start --cache-name "sc" --resource-group "scgroup" +``` +##### Stop ##### +``` +az storagecache cache stop --cache-name "sc" --resource-group "scgroup" +``` +##### Upgrade-firmware ##### +``` +az storagecache cache upgrade-firmware --cache-name "sc1" --resource-group "scgroup" +``` +##### Delete ##### +``` +az storagecache cache delete --cache-name "sc" --resource-group "scgroup" +``` +#### storagecache storage-target #### +##### Create ##### +``` +az storagecache storage-target create --cache-name "sc1" --resource-group "scgroup" --name "st1" \ + --junctions namespace-path="/path/on/cache" nfs-export="exp1" target-path="/path/on/exp1" \ + --junctions namespace-path="/path2/on/cache" nfs-export="exp2" target-path="/path2/on/exp2" \ + --nfs3 target="10.0.44.44" usage-model="READ_HEAVY_INFREQ" + +az storagecache storage-target wait --created --resource-group "{rg}" --name "{myStorageTarget}" +``` +##### Show ##### +``` +az storagecache storage-target show --cache-name "sc1" --resource-group "scgroup" --name "st1" +``` +##### List ##### +``` +az storagecache storage-target list --cache-name "sc1" --resource-group "scgroup" +``` +##### Delete ##### +``` +az storagecache storage-target delete --cache-name "sc1" --resource-group "scgroup" --name "st1" +``` \ No newline at end of file diff --git a/src/storagecache/azext_storagecache/__init__.py b/src/storagecache/azext_storagecache/__init__.py new file mode 100644 index 00000000000..d87eedc9417 --- /dev/null +++ b/src/storagecache/azext_storagecache/__init__.py @@ -0,0 +1,50 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.cli.core import AzCommandsLoader +from azext_storagecache.generated._help import helps # pylint: disable=unused-import +try: + from azext_storagecache.manual._help import helps # pylint: disable=reimported +except ImportError: + pass + + +class StorageCacheManagementClientCommandsLoader(AzCommandsLoader): + + def __init__(self, cli_ctx=None): + from azure.cli.core.commands import CliCommandType + from azext_storagecache.generated._client_factory import cf_storagecache_cl + storagecache_custom = CliCommandType( + operations_tmpl='azext_storagecache.custom#{}', + client_factory=cf_storagecache_cl) + parent = super(StorageCacheManagementClientCommandsLoader, self) + parent.__init__(cli_ctx=cli_ctx, custom_command_type=storagecache_custom) + + def load_command_table(self, args): + from azext_storagecache.generated.commands import load_command_table + load_command_table(self, args) + try: + from azext_storagecache.manual.commands import load_command_table as load_command_table_manual + load_command_table_manual(self, args) + except ImportError: + pass + return self.command_table + + def load_arguments(self, command): + from azext_storagecache.generated._params import load_arguments + load_arguments(self, command) + try: + from azext_storagecache.manual._params import load_arguments as load_arguments_manual + load_arguments_manual(self, command) + except ImportError: + pass + + +COMMAND_LOADER_CLS = StorageCacheManagementClientCommandsLoader diff --git a/src/storagecache/azext_storagecache/action.py b/src/storagecache/azext_storagecache/action.py new file mode 100644 index 00000000000..d95d53bf711 --- /dev/null +++ b/src/storagecache/azext_storagecache/action.py @@ -0,0 +1,17 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wildcard-import +# pylint: disable=unused-wildcard-import + +from .generated.action import * # noqa: F403 +try: + from .manual.action import * # noqa: F403 +except ImportError: + pass diff --git a/src/storagecache/azext_storagecache/azext_metadata.json b/src/storagecache/azext_storagecache/azext_metadata.json new file mode 100644 index 00000000000..4f48fa652a5 --- /dev/null +++ b/src/storagecache/azext_storagecache/azext_metadata.json @@ -0,0 +1,4 @@ +{ + "azext.isExperimental": true, + "azext.minCliCoreVersion": "2.11.0" +} \ No newline at end of file diff --git a/src/storagecache/azext_storagecache/custom.py b/src/storagecache/azext_storagecache/custom.py new file mode 100644 index 00000000000..dbe9d5f9742 --- /dev/null +++ b/src/storagecache/azext_storagecache/custom.py @@ -0,0 +1,17 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wildcard-import +# pylint: disable=unused-wildcard-import + +from .generated.custom import * # noqa: F403 +try: + from .manual.custom import * # noqa: F403 +except ImportError: + pass diff --git a/src/storagecache/azext_storagecache/generated/__init__.py b/src/storagecache/azext_storagecache/generated/__init__.py new file mode 100644 index 00000000000..c9cfdc73e77 --- /dev/null +++ b/src/storagecache/azext_storagecache/generated/__init__.py @@ -0,0 +1,12 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/src/storagecache/azext_storagecache/generated/_client_factory.py b/src/storagecache/azext_storagecache/generated/_client_factory.py new file mode 100644 index 00000000000..c79266b21c7 --- /dev/null +++ b/src/storagecache/azext_storagecache/generated/_client_factory.py @@ -0,0 +1,36 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + + +def cf_storagecache_cl(cli_ctx, *_): + from azure.cli.core.commands.client_factory import get_mgmt_service_client + from ..vendored_sdks.storagecache import StorageCacheManagementClient + return get_mgmt_service_client(cli_ctx, + StorageCacheManagementClient) + + +def cf_sku(cli_ctx, *_): + return cf_storagecache_cl(cli_ctx).sku + + +def cf_usage_model(cli_ctx, *_): + return cf_storagecache_cl(cli_ctx).usage_model + + +def cf_ascoperation(cli_ctx, *_): + return cf_storagecache_cl(cli_ctx).asc_operation + + +def cf_cache(cli_ctx, *_): + return cf_storagecache_cl(cli_ctx).cache + + +def cf_storage_target(cli_ctx, *_): + return cf_storagecache_cl(cli_ctx).storage_target diff --git a/src/storagecache/azext_storagecache/generated/_help.py b/src/storagecache/azext_storagecache/generated/_help.py new file mode 100644 index 00000000000..a0605c6e29f --- /dev/null +++ b/src/storagecache/azext_storagecache/generated/_help.py @@ -0,0 +1,275 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines + +from knack.help_files import helps + + +helps['storagecache sku'] = """ + type: group + short-summary: storagecache sku +""" + +helps['storagecache sku list'] = """ + type: command + short-summary: "Get the list of StorageCache.Cache SKUs available to this subscription." + examples: + - name: Skus_List + text: |- + az storagecache sku list +""" + +helps['storagecache usage-model'] = """ + type: group + short-summary: storagecache usage-model +""" + +helps['storagecache usage-model list'] = """ + type: command + short-summary: "Get the list of Cache Usage Models available to this subscription." + examples: + - name: UsageModels_List + text: |- + az storagecache usage-model list +""" + +helps['storagecache asc-operation'] = """ + type: group + short-summary: storagecache asc-operation +""" + +helps['storagecache asc-operation show'] = """ + type: command + short-summary: "Gets the status of an asynchronous operation for the Azure HPC cache." + examples: + - name: AscOperations_Get + text: |- + az storagecache asc-operation show --operation-id "testoperationid" --location "West US" +""" + +helps['storagecache cache'] = """ + type: group + short-summary: storagecache cache +""" + +helps['storagecache cache list'] = """ + type: command + short-summary: "Returns all Caches the user has access to under a resource group. And Returns all Caches the user \ +has access to under a subscription." + examples: + - name: Caches_ListByResourceGroup + text: |- + az storagecache cache list --resource-group "scgroup" + - name: Caches_List + text: |- + az storagecache cache list +""" + +helps['storagecache cache show'] = """ + type: command + short-summary: "Returns a Cache." + examples: + - name: Caches_Get + text: |- + az storagecache cache show --cache-name "sc1" --resource-group "scgroup" +""" + +helps['storagecache cache create'] = """ + type: command + short-summary: "Create a Cache." + examples: + - name: Caches_CreateOrUpdate + text: |- + az storagecache cache create --location "westus" --cache-size-gb 3072 --subnet \ +"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks\ +/scvnet/subnets/sub1" --sku-name "Standard_2G" --tags "{\\"Dept\\":\\"ContosoAds\\"}" --cache-name "sc1" \ +--resource-group "scgroup" +""" + +helps['storagecache cache update'] = """ + type: command + short-summary: "Update a Cache instance." + examples: + - name: Caches_Update + text: |- + az storagecache cache update --location "westus" --cache-size-gb 3072 --subnet \ +"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks\ +/scvnet/subnets/sub1" --sku-name "Standard_2G" --tags "{\\"Dept\\":\\"ContosoAds\\"}" --cache-name "sc1" \ +--resource-group "scgroup" +""" + +helps['storagecache cache delete'] = """ + type: command + short-summary: "Schedules a Cache for deletion." + examples: + - name: Caches_Delete + text: |- + az storagecache cache delete --cache-name "sc" --resource-group "scgroup" +""" + +helps['storagecache cache flush'] = """ + type: command + short-summary: "Tells a Cache to write all dirty data to the Storage Target(s). During the flush, clients will see \ +errors returned until the flush is complete." + examples: + - name: Caches_Flush + text: |- + az storagecache cache flush --cache-name "sc" --resource-group "scgroup" +""" + +helps['storagecache cache start'] = """ + type: command + short-summary: "Tells a Stopped state Cache to transition to Active state." + examples: + - name: Caches_Start + text: |- + az storagecache cache start --cache-name "sc" --resource-group "scgroup" +""" + +helps['storagecache cache stop'] = """ + type: command + short-summary: "Tells an Active Cache to transition to Stopped state." + examples: + - name: Caches_Stop + text: |- + az storagecache cache stop --cache-name "sc" --resource-group "scgroup" +""" + +helps['storagecache cache upgrade-firmware'] = """ + type: command + short-summary: "Upgrade a Cache's firmware if a new version is available. Otherwise, this operation has no \ +effect." + examples: + - name: Caches_UpgradeFirmware + text: |- + az storagecache cache upgrade-firmware --cache-name "sc1" --resource-group "scgroup" +""" + +helps['storagecache cache wait'] = """ + type: command + short-summary: Place the CLI in a waiting state until a condition of the storagecache cache is met. + examples: + - name: Pause executing next line of CLI script until the storagecache cache is successfully created. + text: |- + az storagecache cache wait --cache-name "sc1" --resource-group "scgroup" --created + - name: Pause executing next line of CLI script until the storagecache cache is successfully deleted. + text: |- + az storagecache cache wait --cache-name "sc1" --resource-group "scgroup" --deleted +""" + +helps['storagecache storage-target'] = """ + type: group + short-summary: storagecache storage-target +""" + +helps['storagecache storage-target list'] = """ + type: command + short-summary: "Returns a list of Storage Targets for the specified Cache." + examples: + - name: StorageTargets_List + text: |- + az storagecache storage-target list --cache-name "sc1" --resource-group "scgroup" +""" + +helps['storagecache storage-target show'] = """ + type: command + short-summary: "Returns a Storage Target from a Cache." + examples: + - name: StorageTargets_Get + text: |- + az storagecache storage-target show --cache-name "sc1" --resource-group "scgroup" --name "st1" +""" + +helps['storagecache storage-target create'] = """ + type: command + short-summary: "Create a Storage Target. This operation is allowed at any time, but if the Cache is down or \ +unhealthy, the actual creation/modification of the Storage Target may be delayed until the Cache is healthy again." + parameters: + - name: --junctions + short-summary: "List of Cache namespace junctions to target for namespace associations." + long-summary: | + Usage: --junctions namespace-path=XX target-path=XX nfs-export=XX + + namespace-path: Namespace path on a Cache for a Storage Target. + target-path: Path in Storage Target to which namespacePath points. + nfs-export: NFS export where targetPath exists. + + Multiple actions can be specified by using more than one --junctions argument. + - name: --nfs3 + short-summary: "Properties when targetType is nfs3." + long-summary: | + Usage: --nfs3 target=XX usage-model=XX + + target: IP address or host name of an NFSv3 host (e.g., 10.0.44.44). + usage-model: Identifies the primary usage model to be used for this Storage Target. Get choices from \ +.../usageModels + examples: + - name: StorageTargets_CreateOrUpdate + text: |- + az storagecache storage-target create --cache-name "sc1" --resource-group "scgroup" --name "st1" \ +--junctions namespace-path="/path/on/cache" nfs-export="exp1" target-path="/path/on/exp1" --junctions \ +namespace-path="/path2/on/cache" nfs-export="exp2" target-path="/path2/on/exp2" --nfs3 target="10.0.44.44" \ +usage-model="READ_HEAVY_INFREQ" +""" + +helps['storagecache storage-target update'] = """ + type: command + short-summary: "Update a Storage Target. This operation is allowed at any time, but if the Cache is down or \ +unhealthy, the actual creation/modification of the Storage Target may be delayed until the Cache is healthy again." + parameters: + - name: --junctions + short-summary: "List of Cache namespace junctions to target for namespace associations." + long-summary: | + Usage: --junctions namespace-path=XX target-path=XX nfs-export=XX + + namespace-path: Namespace path on a Cache for a Storage Target. + target-path: Path in Storage Target to which namespacePath points. + nfs-export: NFS export where targetPath exists. + + Multiple actions can be specified by using more than one --junctions argument. + - name: --nfs3 + short-summary: "Properties when targetType is nfs3." + long-summary: | + Usage: --nfs3 target=XX usage-model=XX + + target: IP address or host name of an NFSv3 host (e.g., 10.0.44.44). + usage-model: Identifies the primary usage model to be used for this Storage Target. Get choices from \ +.../usageModels +""" + +helps['storagecache storage-target delete'] = """ + type: command + short-summary: "Removes a Storage Target from a Cache. This operation is allowed at any time, but if the Cache is \ +down or unhealthy, the actual removal of the Storage Target may be delayed until the Cache is healthy again. Note that \ +if the Cache has data to flush to the Storage Target, the data will be flushed before the Storage Target will be \ +deleted." + examples: + - name: StorageTargets_Delete + text: |- + az storagecache storage-target delete --cache-name "sc1" --resource-group "scgroup" --name "st1" +""" + +helps['storagecache storage-target wait'] = """ + type: command + short-summary: Place the CLI in a waiting state until a condition of the storagecache storage-target is met. + examples: + - name: Pause executing next line of CLI script until the storagecache storage-target is successfully created. + text: |- + az storagecache storage-target wait --cache-name "sc1" --resource-group "scgroup" --name "st1" \ +--created + - name: Pause executing next line of CLI script until the storagecache storage-target is successfully updated. + text: |- + az storagecache storage-target wait --cache-name "sc1" --resource-group "scgroup" --name "st1" \ +--updated + - name: Pause executing next line of CLI script until the storagecache storage-target is successfully deleted. + text: |- + az storagecache storage-target wait --cache-name "sc1" --resource-group "scgroup" --name "st1" \ +--deleted +""" diff --git a/src/storagecache/azext_storagecache/generated/_params.py b/src/storagecache/azext_storagecache/generated/_params.py new file mode 100644 index 00000000000..e0c4c62d1a4 --- /dev/null +++ b/src/storagecache/azext_storagecache/generated/_params.py @@ -0,0 +1,184 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines +# pylint: disable=too-many-statements + +from azure.cli.core.commands.parameters import ( + tags_type, + get_three_state_flag, + get_enum_type, + resource_group_name_type, + get_location_type +) +from azure.cli.core.commands.validators import ( + get_default_location_from_resource_group, + validate_file_or_dict +) +from azext_storagecache.action import ( + AddJunctions, + AddNfs3, + AddUnknownUnknownMap +) + + +def load_arguments(self, _): + + with self.argument_context('storagecache asc-operation show') as c: + c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') + c.argument('operation_id', type=str, help='The operation id which uniquely identifies the asynchronous ' + 'operation.', id_part='child_name_1') + + with self.argument_context('storagecache cache list') as c: + c.argument('resource_group_name', resource_group_name_type) + + with self.argument_context('storagecache cache show') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', type=str, help='Name of Cache. Length of name must be not greater than 80 and chars ' + 'must be in list of [-0-9a-zA-Z_] char class.') + + with self.argument_context('storagecache cache create') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', type=str, help='Name of Cache. Length of name must be not greater than 80 and chars ' + 'must be in list of [-0-9a-zA-Z_] char class.') + c.argument('tags', tags_type) + c.argument('location', arg_type=get_location_type(self.cli_ctx), + validator=get_default_location_from_resource_group) + c.argument('cache_size_gb', type=int, help='The size of this Cache, in GB.') + c.argument('provisioning_state', arg_type=get_enum_type(['Succeeded', 'Failed', 'Cancelled', 'Creating', '' + 'Deleting', 'Updating']), help='ARM provisioning ' + 'state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisi' + 'oningstate-property') + c.argument('subnet', type=str, help='Subnet used for the Cache.') + c.argument('security_settings_root_squash', arg_type=get_three_state_flag(), help='root squash of cache ' + 'property.') + c.argument('encryption_settings_key_encryption_key', type=validate_file_or_dict, help='Specifies the location ' + 'of the key encryption key in Key Vault. Expected value: json-string/@json-file.') + c.argument('network_settings_mtu', type=int, help='The IPv4 maximum transmission unit configured for the ' + 'subnet.') + c.argument('sku_name', type=str, help='SKU name for this Cache.') + c.argument('identity_type', arg_type=get_enum_type(['SystemAssigned', 'None']), help='The type of identity ' + 'used for the cache') + + with self.argument_context('storagecache cache update') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', type=str, help='Name of Cache. Length of name must be not greater than 80 and chars ' + 'must be in list of [-0-9a-zA-Z_] char class.') + c.argument('tags', tags_type) + c.argument('location', arg_type=get_location_type(self.cli_ctx), + validator=get_default_location_from_resource_group) + c.argument('cache_size_gb', type=int, help='The size of this Cache, in GB.') + c.argument('provisioning_state', arg_type=get_enum_type(['Succeeded', 'Failed', 'Cancelled', 'Creating', '' + 'Deleting', 'Updating']), help='ARM provisioning ' + 'state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisi' + 'oningstate-property') + c.argument('subnet', type=str, help='Subnet used for the Cache.') + c.argument('security_settings_root_squash', arg_type=get_three_state_flag(), help='root squash of cache ' + 'property.') + c.argument('encryption_settings_key_encryption_key', type=validate_file_or_dict, help='Specifies the location ' + 'of the key encryption key in Key Vault. Expected value: json-string/@json-file.') + c.argument('network_settings_mtu', type=int, help='The IPv4 maximum transmission unit configured for the ' + 'subnet.') + c.argument('sku_name', type=str, help='SKU name for this Cache.') + c.argument('identity_type', arg_type=get_enum_type(['SystemAssigned', 'None']), help='The type of identity ' + 'used for the cache') + + with self.argument_context('storagecache cache delete') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', type=str, help='Name of Cache. Length of name must be not greater than 80 and chars ' + 'must be in list of [-0-9a-zA-Z_] char class.') + + with self.argument_context('storagecache cache flush') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', type=str, help='Name of Cache. Length of name must be not greater than 80 and chars ' + 'must be in list of [-0-9a-zA-Z_] char class.') + + with self.argument_context('storagecache cache start') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', type=str, help='Name of Cache. Length of name must be not greater than 80 and chars ' + 'must be in list of [-0-9a-zA-Z_] char class.') + + with self.argument_context('storagecache cache stop') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', type=str, help='Name of Cache. Length of name must be not greater than 80 and chars ' + 'must be in list of [-0-9a-zA-Z_] char class.') + + with self.argument_context('storagecache cache upgrade-firmware') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', type=str, help='Name of Cache. Length of name must be not greater than 80 and chars ' + 'must be in list of [-0-9a-zA-Z_] char class.') + + with self.argument_context('storagecache cache wait') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', type=str, help='Name of Cache. Length of name must be not greater than 80 and chars ' + 'must be in list of [-0-9a-zA-Z_] char class.') + + with self.argument_context('storagecache storage-target list') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', type=str, help='Name of Cache. Length of name must be not greater than 80 and chars ' + 'must be in list of [-0-9a-zA-Z_] char class.') + + with self.argument_context('storagecache storage-target show') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', type=str, help='Name of Cache. Length of name must be not greater than 80 and chars ' + 'must be in list of [-0-9a-zA-Z_] char class.') + c.argument('storage_target_name', options_list=['--name', '-n', '--storage-target-name'], type=str, help='Name ' + 'of the Storage Target. Length of name must be not greater than 80 and chars must be in list of ' + '[-0-9a-zA-Z_] char class.') + + with self.argument_context('storagecache storage-target create') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', type=str, help='Name of Cache. Length of name must be not greater than 80 and chars ' + 'must be in list of [-0-9a-zA-Z_] char class.') + c.argument('storage_target_name', options_list=['--name', '-n', '--storage-target-name'], type=str, help='Name ' + 'of the Storage Target. Length of name must be not greater than 80 and chars must be in list of ' + '[-0-9a-zA-Z_] char class.') + c.argument('junctions', action=AddJunctions, nargs='*', help='List of Cache namespace junctions to target for ' + 'namespace associations.') + c.argument('provisioning_state', arg_type=get_enum_type(['Succeeded', 'Failed', 'Cancelled', 'Creating', '' + 'Deleting', 'Updating']), help='ARM provisioning ' + 'state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisi' + 'oningstate-property') + c.argument('nfs3', action=AddNfs3, nargs='*', help='Properties when targetType is nfs3.') + c.argument('unknown_unknown_map', action=AddUnknownUnknownMap, nargs='*', help='Dictionary of string->string ' + 'pairs containing information about the Storage Target. Expect value: KEY1=VALUE1 KEY2=VALUE2 ...') + c.argument('clfs_target', type=str, help='Resource ID of storage container.') + + with self.argument_context('storagecache storage-target update') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', type=str, help='Name of Cache. Length of name must be not greater than 80 and chars ' + 'must be in list of [-0-9a-zA-Z_] char class.') + c.argument('storage_target_name', options_list=['--name', '-n', '--storage-target-name'], type=str, help='Name ' + 'of the Storage Target. Length of name must be not greater than 80 and chars must be in list of ' + '[-0-9a-zA-Z_] char class.') + c.argument('junctions', action=AddJunctions, nargs='*', help='List of Cache namespace junctions to target for ' + 'namespace associations.') + c.argument('provisioning_state', arg_type=get_enum_type(['Succeeded', 'Failed', 'Cancelled', 'Creating', '' + 'Deleting', 'Updating']), help='ARM provisioning ' + 'state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisi' + 'oningstate-property') + c.argument('nfs3', action=AddNfs3, nargs='*', help='Properties when targetType is nfs3.') + c.argument('unknown_unknown_map', action=AddUnknownUnknownMap, nargs='*', help='Dictionary of string->string ' + 'pairs containing information about the Storage Target. Expect value: KEY1=VALUE1 KEY2=VALUE2 ...') + c.argument('clfs_target', type=str, help='Resource ID of storage container.') + + with self.argument_context('storagecache storage-target delete') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', type=str, help='Name of Cache. Length of name must be not greater than 80 and chars ' + 'must be in list of [-0-9a-zA-Z_] char class.') + c.argument('storage_target_name', options_list=['--name', '-n', '--storage-target-name'], type=str, help='Name ' + 'of Storage Target.') + + with self.argument_context('storagecache storage-target wait') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', type=str, help='Name of Cache. Length of name must be not greater than 80 and chars ' + 'must be in list of [-0-9a-zA-Z_] char class.') + c.argument('storage_target_name', options_list=['--name', '-n', '--storage-target-name'], type=str, help='Name ' + 'of the Storage Target. Length of name must be not greater than 80 and chars must be in list of ' + '[-0-9a-zA-Z_] char class.') diff --git a/src/storagecache/azext_storagecache/generated/_validators.py b/src/storagecache/azext_storagecache/generated/_validators.py new file mode 100644 index 00000000000..b33a44c1ebf --- /dev/null +++ b/src/storagecache/azext_storagecache/generated/_validators.py @@ -0,0 +1,9 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- diff --git a/src/storagecache/azext_storagecache/generated/action.py b/src/storagecache/azext_storagecache/generated/action.py new file mode 100644 index 00000000000..561422cc7a3 --- /dev/null +++ b/src/storagecache/azext_storagecache/generated/action.py @@ -0,0 +1,84 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=protected-access + +import argparse +from collections import defaultdict +from knack.util import CLIError + + +class AddJunctions(argparse._AppendAction): + def __call__(self, parser, namespace, values, option_string=None): + action = self.get_action(values, option_string) + super(AddJunctions, self).__call__(parser, namespace, action, option_string) + + def get_action(self, values, option_string): # pylint: disable=no-self-use + try: + properties = defaultdict(list) + for (k, v) in (x.split('=', 1) for x in values): + properties[k].append(v) + properties = dict(properties) + except ValueError: + raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string)) + d = {} + for k in properties: + kl = k.lower() + v = properties[k] + if kl == 'namespace-path': + d['namespace_path'] = v[0] + elif kl == 'target-path': + d['target_path'] = v[0] + elif kl == 'nfs-export': + d['nfs_export'] = v[0] + return d + + +class AddNfs3(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + action = self.get_action(values, option_string) + namespace.nfs3 = action + + def get_action(self, values, option_string): # pylint: disable=no-self-use + try: + properties = defaultdict(list) + for (k, v) in (x.split('=', 1) for x in values): + properties[k].append(v) + properties = dict(properties) + except ValueError: + raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string)) + d = {} + for k in properties: + kl = k.lower() + v = properties[k] + if kl == 'target': + d['target'] = v[0] + elif kl == 'usage-model': + d['usage_model'] = v[0] + return d + + +class AddUnknownUnknownMap(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + action = self.get_action(values, option_string) + namespace.unknown_unknown_map = action + + def get_action(self, values, option_string): # pylint: disable=no-self-use + try: + properties = defaultdict(list) + for (k, v) in (x.split('=', 1) for x in values): + properties[k].append(v) + properties = dict(properties) + except ValueError: + raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string)) + d = {} + for k in properties: + v = properties[k] + d[k] = v[0] + return d diff --git a/src/storagecache/azext_storagecache/generated/commands.py b/src/storagecache/azext_storagecache/generated/commands.py new file mode 100644 index 00000000000..f15bd7b50e0 --- /dev/null +++ b/src/storagecache/azext_storagecache/generated/commands.py @@ -0,0 +1,72 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-statements +# pylint: disable=too-many-locals + +from azure.cli.core.commands import CliCommandType + + +def load_command_table(self, _): + + from azext_storagecache.generated._client_factory import cf_sku + storagecache_sku = CliCommandType( + operations_tmpl='azext_storagecache.vendored_sdks.storagecache.operations._sku_operations#SkuOperations.{}', + client_factory=cf_sku) + with self.command_group('storagecache sku', storagecache_sku, client_factory=cf_sku, is_experimental=True) as g: + g.custom_command('list', 'storagecache_sku_list') + + from azext_storagecache.generated._client_factory import cf_usage_model + storagecache_usage_model = CliCommandType( + operations_tmpl='azext_storagecache.vendored_sdks.storagecache.operations._usage_model_operations#UsageModelOpe' + 'rations.{}', + client_factory=cf_usage_model) + with self.command_group('storagecache usage-model', storagecache_usage_model, client_factory=cf_usage_model, + is_experimental=True) as g: + g.custom_command('list', 'storagecache_usage_model_list') + + from azext_storagecache.generated._client_factory import cf_ascoperation + storagecache_ascoperation = CliCommandType( + operations_tmpl='azext_storagecache.vendored_sdks.storagecache.operations._asc_operation_operations#ASCOperatio' + 'nOperations.{}', + client_factory=cf_ascoperation) + with self.command_group('storagecache asc-operation', storagecache_ascoperation, client_factory=cf_ascoperation, + is_experimental=True) as g: + g.custom_show_command('show', 'storagecache_asc_operation_show') + + from azext_storagecache.generated._client_factory import cf_cache + storagecache_cache = CliCommandType( + operations_tmpl='azext_storagecache.vendored_sdks.storagecache.operations._cache_operations#CacheOperations.{}', + client_factory=cf_cache) + with self.command_group('storagecache cache', storagecache_cache, client_factory=cf_cache, + is_experimental=True) as g: + g.custom_command('list', 'storagecache_cache_list') + g.custom_show_command('show', 'storagecache_cache_show') + g.custom_command('create', 'storagecache_cache_create', supports_no_wait=True) + g.custom_command('update', 'storagecache_cache_update') + g.custom_command('delete', 'storagecache_cache_delete', supports_no_wait=True, confirmation=True) + g.custom_command('flush', 'storagecache_cache_flush', supports_no_wait=True) + g.custom_command('start', 'storagecache_cache_start', supports_no_wait=True) + g.custom_command('stop', 'storagecache_cache_stop', supports_no_wait=True) + g.custom_command('upgrade-firmware', 'storagecache_cache_upgrade_firmware', supports_no_wait=True) + g.custom_wait_command('wait', 'storagecache_cache_show') + + from azext_storagecache.generated._client_factory import cf_storage_target + storagecache_storage_target = CliCommandType( + operations_tmpl='azext_storagecache.vendored_sdks.storagecache.operations._storage_target_operations#StorageTar' + 'getOperations.{}', + client_factory=cf_storage_target) + with self.command_group('storagecache storage-target', storagecache_storage_target, + client_factory=cf_storage_target, is_experimental=True) as g: + g.custom_command('list', 'storagecache_storage_target_list') + g.custom_show_command('show', 'storagecache_storage_target_show') + g.custom_command('create', 'storagecache_storage_target_create', supports_no_wait=True) + g.custom_command('update', 'storagecache_storage_target_update', supports_no_wait=True) + g.custom_command('delete', 'storagecache_storage_target_delete', supports_no_wait=True, confirmation=True) + g.custom_wait_command('wait', 'storagecache_storage_target_show') diff --git a/src/storagecache/azext_storagecache/generated/custom.py b/src/storagecache/azext_storagecache/generated/custom.py new file mode 100644 index 00000000000..8614d4a59a8 --- /dev/null +++ b/src/storagecache/azext_storagecache/generated/custom.py @@ -0,0 +1,228 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines + +from azure.cli.core.util import sdk_no_wait + + +def storagecache_sku_list(client): + return client.list() + + +def storagecache_usage_model_list(client): + return client.list() + + +def storagecache_asc_operation_show(client, + location, + operation_id): + return client.get(location=location, + operation_id=operation_id) + + +def storagecache_cache_list(client, + resource_group_name=None): + if resource_group_name: + return client.list_by_resource_group(resource_group_name=resource_group_name) + return client.list() + + +def storagecache_cache_show(client, + resource_group_name, + cache_name): + return client.get(resource_group_name=resource_group_name, + cache_name=cache_name) + + +def storagecache_cache_create(client, + resource_group_name, + cache_name, + tags=None, + location=None, + cache_size_gb=None, + provisioning_state=None, + subnet=None, + security_settings_root_squash=None, + encryption_settings_key_encryption_key=None, + network_settings_mtu=None, + sku_name=None, + identity_type=None, + no_wait=False): + if network_settings_mtu is None: + network_settings_mtu = 1500 + return sdk_no_wait(no_wait, + client.begin_create_or_update, + resource_group_name=resource_group_name, + cache_name=cache_name, + tags=tags, + location=location, + cache_size_gb=cache_size_gb, + provisioning_state=provisioning_state, + subnet=subnet, + upgrade_status=None, + root_squash=security_settings_root_squash, + key_encryption_key=encryption_settings_key_encryption_key, + mtu=network_settings_mtu, + name=sku_name, + type=identity_type) + + +def storagecache_cache_update(client, + resource_group_name, + cache_name, + tags=None, + location=None, + cache_size_gb=None, + provisioning_state=None, + subnet=None, + security_settings_root_squash=None, + encryption_settings_key_encryption_key=None, + network_settings_mtu=None, + sku_name=None, + identity_type=None): + if network_settings_mtu is None: + network_settings_mtu = 1500 + return client.update(resource_group_name=resource_group_name, + cache_name=cache_name, + tags=tags, + location=location, + cache_size_gb=cache_size_gb, + provisioning_state=provisioning_state, + subnet=subnet, + upgrade_status=None, + root_squash=security_settings_root_squash, + key_encryption_key=encryption_settings_key_encryption_key, + mtu=network_settings_mtu, + name=sku_name, + type=identity_type) + + +def storagecache_cache_delete(client, + resource_group_name, + cache_name, + no_wait=False): + return sdk_no_wait(no_wait, + client.begin_delete, + resource_group_name=resource_group_name, + cache_name=cache_name) + + +def storagecache_cache_flush(client, + resource_group_name, + cache_name, + no_wait=False): + return sdk_no_wait(no_wait, + client.begin_flush, + resource_group_name=resource_group_name, + cache_name=cache_name) + + +def storagecache_cache_start(client, + resource_group_name, + cache_name, + no_wait=False): + return sdk_no_wait(no_wait, + client.begin_start, + resource_group_name=resource_group_name, + cache_name=cache_name) + + +def storagecache_cache_stop(client, + resource_group_name, + cache_name, + no_wait=False): + return sdk_no_wait(no_wait, + client.begin_stop, + resource_group_name=resource_group_name, + cache_name=cache_name) + + +def storagecache_cache_upgrade_firmware(client, + resource_group_name, + cache_name, + no_wait=False): + return sdk_no_wait(no_wait, + client.begin_upgrade_firmware, + resource_group_name=resource_group_name, + cache_name=cache_name) + + +def storagecache_storage_target_list(client, + resource_group_name, + cache_name): + return client.list_by_cache(resource_group_name=resource_group_name, + cache_name=cache_name) + + +def storagecache_storage_target_show(client, + resource_group_name, + cache_name, + storage_target_name): + return client.get(resource_group_name=resource_group_name, + cache_name=cache_name, + storage_target_name=storage_target_name) + + +def storagecache_storage_target_create(client, + resource_group_name, + cache_name, + storage_target_name, + junctions=None, + provisioning_state=None, + nfs3=None, + unknown_unknown_map=None, + clfs_target=None, + no_wait=False): + return sdk_no_wait(no_wait, + client.begin_create_or_update, + resource_group_name=resource_group_name, + cache_name=cache_name, + storage_target_name=storage_target_name, + junctions=junctions, + target_type=target_type, + provisioning_state=provisioning_state, + nfs3=nfs3, + unknown_map=unknown_unknown_map, + target=clfs_target) + + +def storagecache_storage_target_update(client, + resource_group_name, + cache_name, + storage_target_name, + junctions=None, + provisioning_state=None, + nfs3=None, + unknown_unknown_map=None, + clfs_target=None, + no_wait=False): + return sdk_no_wait(no_wait, + client.begin_create_or_update, + resource_group_name=resource_group_name, + cache_name=cache_name, + storage_target_name=storage_target_name, + junctions=junctions, + target_type=target_type, + provisioning_state=provisioning_state, + nfs3=nfs3, + unknown_map=unknown_unknown_map, + target=clfs_target) + + +def storagecache_storage_target_delete(client, + resource_group_name, + cache_name, + storage_target_name, + no_wait=False): + return sdk_no_wait(no_wait, + client.begin_delete, + resource_group_name=resource_group_name, + cache_name=cache_name, + storage_target_name=storage_target_name) diff --git a/src/storagecache/azext_storagecache/manual/__init__.py b/src/storagecache/azext_storagecache/manual/__init__.py new file mode 100644 index 00000000000..c9cfdc73e77 --- /dev/null +++ b/src/storagecache/azext_storagecache/manual/__init__.py @@ -0,0 +1,12 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/src/storagecache/azext_storagecache/tests/__init__.py b/src/storagecache/azext_storagecache/tests/__init__.py new file mode 100644 index 00000000000..50e0627daff --- /dev/null +++ b/src/storagecache/azext_storagecache/tests/__init__.py @@ -0,0 +1,114 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +import inspect +import logging +import os +import sys +import traceback +import datetime as dt + +from azure.core.exceptions import AzureError +from azure.cli.testsdk.exceptions import CliTestError, CliExecutionError, JMESPathCheckAssertionError + + +logger = logging.getLogger('azure.cli.testsdk') +logger.addHandler(logging.StreamHandler()) +__path__ = __import__('pkgutil').extend_path(__path__, __name__) +exceptions = [] +test_map = dict() +SUCCESSED = "successed" +FAILED = "failed" + + +def try_manual(func): + def import_manual_function(origin_func): + from importlib import import_module + decorated_path = inspect.getfile(origin_func) + module_path = __path__[0] + if not decorated_path.startswith(module_path): + raise Exception("Decorator can only be used in submodules!") + manual_path = os.path.join( + decorated_path[module_path.rfind(os.path.sep) + 1:]) + manual_file_path, manual_file_name = os.path.split(manual_path) + module_name, _ = os.path.splitext(manual_file_name) + manual_module = "..manual." + \ + ".".join(manual_file_path.split(os.path.sep) + [module_name, ]) + return getattr(import_module(manual_module, package=__name__), origin_func.__name__) + + def get_func_to_call(): + func_to_call = func + try: + func_to_call = import_manual_function(func) + func_to_call = import_manual_function(func) + logger.info("Found manual override for %s(...)", func.__name__) + except (ImportError, AttributeError): + pass + return func_to_call + + def wrapper(*args, **kwargs): + func_to_call = get_func_to_call() + logger.info("running %s()...", func.__name__) + try: + test_map[func.__name__] = dict() + test_map[func.__name__]["result"] = SUCCESSED + test_map[func.__name__]["error_message"] = "" + test_map[func.__name__]["error_stack"] = "" + test_map[func.__name__]["error_normalized"] = "" + test_map[func.__name__]["start_dt"] = dt.datetime.utcnow() + ret = func_to_call(*args, **kwargs) + except (AssertionError, AzureError, CliTestError, CliExecutionError, SystemExit, + JMESPathCheckAssertionError) as e: + test_map[func.__name__]["end_dt"] = dt.datetime.utcnow() + test_map[func.__name__]["result"] = FAILED + test_map[func.__name__]["error_message"] = str(e).replace("\r\n", " ").replace("\n", " ")[:500] + test_map[func.__name__]["error_stack"] = traceback.format_exc().replace( + "\r\n", " ").replace("\n", " ")[:500] + logger.info("--------------------------------------") + logger.info("step exception: %s", e) + logger.error("--------------------------------------") + logger.error("step exception in %s: %s", func.__name__, e) + logger.info(traceback.format_exc()) + exceptions.append((func.__name__, sys.exc_info())) + else: + test_map[func.__name__]["end_dt"] = dt.datetime.utcnow() + return ret + + if inspect.isclass(func): + return get_func_to_call() + return wrapper + + +def calc_coverage(filename): + filename = filename.split(".")[0] + coverage_name = filename + "_coverage.md" + with open(coverage_name, "w") as f: + f.write("|Scenario|Result|ErrorMessage|ErrorStack|ErrorNormalized|StartDt|EndDt|\n") + total = len(test_map) + covered = 0 + for k, v in test_map.items(): + if not k.startswith("step_"): + total -= 1 + continue + if v["result"] == SUCCESSED: + covered += 1 + f.write("|{step_name}|{result}|{error_message}|{error_stack}|{error_normalized}|{start_dt}|" + "{end_dt}|\n".format(step_name=k, **v)) + f.write("Coverage: {}/{}\n".format(covered, total)) + print("Create coverage\n", file=sys.stderr) + + +def raise_if(): + if exceptions: + if len(exceptions) <= 1: + raise exceptions[0][1][1] + message = "{}\nFollowed with exceptions in other steps:\n".format(str(exceptions[0][1][1])) + message += "\n".join(["{}: {}".format(h[0], h[1][1]) for h in exceptions[1:]]) + raise exceptions[0][1][0](message).with_traceback(exceptions[0][1][2]) diff --git a/src/storagecache/azext_storagecache/tests/latest/__init__.py b/src/storagecache/azext_storagecache/tests/latest/__init__.py new file mode 100644 index 00000000000..c9cfdc73e77 --- /dev/null +++ b/src/storagecache/azext_storagecache/tests/latest/__init__.py @@ -0,0 +1,12 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/src/storagecache/azext_storagecache/tests/latest/preparers.py b/src/storagecache/azext_storagecache/tests/latest/preparers.py new file mode 100644 index 00000000000..0879e51945a --- /dev/null +++ b/src/storagecache/azext_storagecache/tests/latest/preparers.py @@ -0,0 +1,159 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import os +from datetime import datetime +from azure_devtools.scenario_tests import SingleValueReplacer +from azure.cli.testsdk.preparers import NoTrafficRecordingPreparer +from azure.cli.testsdk.exceptions import CliTestError +from azure.cli.testsdk.reverse_dependency import get_dummy_cli + + +KEY_RESOURCE_GROUP = 'rg' +KEY_VIRTUAL_NETWORK = 'vnet' +KEY_VNET_SUBNET = 'subnet' +KEY_VNET_NIC = 'nic' + + +class VirtualNetworkPreparer(NoTrafficRecordingPreparer, SingleValueReplacer): + def __init__(self, name_prefix='clitest.vn', + parameter_name='virtual_network', + resource_group_name=None, + resource_group_key=KEY_RESOURCE_GROUP, + dev_setting_name='AZURE_CLI_TEST_DEV_VIRTUAL_NETWORK_NAME', + random_name_length=24, key=KEY_VIRTUAL_NETWORK): + if ' ' in name_prefix: + raise CliTestError( + 'Error: Space character in name prefix \'%s\'' % name_prefix) + super(VirtualNetworkPreparer, self).__init__( + name_prefix, random_name_length) + self.cli_ctx = get_dummy_cli() + self.parameter_name = parameter_name + self.key = key + self.resource_group_name = resource_group_name + self.resource_group_key = resource_group_key + self.dev_setting_name = os.environ.get(dev_setting_name, None) + + def create_resource(self, name, **_): + if self.dev_setting_name: + return {self.parameter_name: self.dev_setting_name, } + + if not self.resource_group_name: + self.resource_group_name = self.test_class_instance.kwargs.get( + self.resource_group_key) + if not self.resource_group_name: + raise CliTestError("Error: No resource group configured!") + + tags = {'product': 'azurecli', 'cause': 'automation', + 'date': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')} + if 'ENV_JOB_NAME' in os.environ: + tags['job'] = os.environ['ENV_JOB_NAME'] + tags = ' '.join(['{}={}'.format(key, value) + for key, value in tags.items()]) + template = 'az network vnet create --resource-group {} --name {} --subnet-name default --tag ' + tags + self.live_only_execute(self.cli_ctx, template.format( + self.resource_group_name, name)) + + self.test_class_instance.kwargs[self.key] = name + return {self.parameter_name: name} + + def remove_resource(self, name, **_): + # delete vnet if test is being recorded and if the vnet is not a dev rg + if not self.dev_setting_name: + self.live_only_execute( + self.cli_ctx, + 'az network vnet delete --name {} --resource-group {}'.format(name, self.resource_group_name)) + + +class VnetSubnetPreparer(NoTrafficRecordingPreparer, SingleValueReplacer): + def __init__(self, name_prefix='clitest.vn', + parameter_name='subnet', + resource_group_key=KEY_RESOURCE_GROUP, + vnet_key=KEY_VIRTUAL_NETWORK, + address_prefixes="11.0.0.0/24", + dev_setting_name='AZURE_CLI_TEST_DEV_VNET_SUBNET_NAME', + key=KEY_VNET_SUBNET): + if ' ' in name_prefix: + raise CliTestError( + 'Error: Space character in name prefix \'%s\'' % name_prefix) + super(VnetSubnetPreparer, self).__init__(name_prefix, 15) + self.cli_ctx = get_dummy_cli() + self.parameter_name = parameter_name + self.key = key + self.resource_group = [resource_group_key, None] + self.vnet = [vnet_key, None] + self.address_prefixes = address_prefixes + self.dev_setting_name = os.environ.get(dev_setting_name, None) + + def create_resource(self, name, **_): + if self.dev_setting_name: + return {self.parameter_name: self.dev_setting_name, } + + if not self.resource_group[1]: + self.resource_group[1] = self.test_class_instance.kwargs.get( + self.resource_group[0]) + if not self.resource_group[1]: + raise CliTestError("Error: No resource group configured!") + if not self.vnet[1]: + self.vnet[1] = self.test_class_instance.kwargs.get(self.vnet[0]) + if not self.vnet[1]: + raise CliTestError("Error: No vnet configured!") + + self.test_class_instance.kwargs[self.key] = 'default' + return {self.parameter_name: name} + + def remove_resource(self, name, **_): + pass + + +class VnetNicPreparer(NoTrafficRecordingPreparer, SingleValueReplacer): + def __init__(self, name_prefix='clitest.nic', + parameter_name='subnet', + resource_group_key=KEY_RESOURCE_GROUP, + vnet_key=KEY_VIRTUAL_NETWORK, + dev_setting_name='AZURE_CLI_TEST_DEV_VNET_NIC_NAME', + key=KEY_VNET_NIC): + if ' ' in name_prefix: + raise CliTestError( + 'Error: Space character in name prefix \'%s\'' % name_prefix) + super(VnetNicPreparer, self).__init__(name_prefix, 15) + self.cli_ctx = get_dummy_cli() + self.parameter_name = parameter_name + self.key = key + self.resource_group = [resource_group_key, None] + self.vnet = [vnet_key, None] + self.dev_setting_name = os.environ.get(dev_setting_name, None) + + def create_resource(self, name, **_): + if self.dev_setting_name: + return {self.parameter_name: self.dev_setting_name, } + + if not self.resource_group[1]: + self.resource_group[1] = self.test_class_instance.kwargs.get( + self.resource_group[0]) + if not self.resource_group[1]: + raise CliTestError("Error: No resource group configured!") + if not self.vnet[1]: + self.vnet[1] = self.test_class_instance.kwargs.get(self.vnet[0]) + if not self.vnet[1]: + raise CliTestError("Error: No vnet configured!") + + template = 'az network nic create --resource-group {} --name {} --vnet-name {} --subnet default ' + self.live_only_execute(self.cli_ctx, template.format( + self.resource_group[1], name, self.vnet[1])) + + self.test_class_instance.kwargs[self.key] = name + return {self.parameter_name: name} + + def remove_resource(self, name, **_): + if not self.dev_setting_name: + self.live_only_execute( + self.cli_ctx, + 'az network nic delete --name {} --resource-group {}'.format(name, self.resource_group[1])) diff --git a/src/storagecache/azext_storagecache/tests/latest/test_storagecache_scenario.py b/src/storagecache/azext_storagecache/tests/latest/test_storagecache_scenario.py new file mode 100644 index 00000000000..7c0b6bcf41c --- /dev/null +++ b/src/storagecache/azext_storagecache/tests/latest/test_storagecache_scenario.py @@ -0,0 +1,251 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import os +from azure.cli.testsdk import ScenarioTest +from .. import try_manual, raise_if, calc_coverage +from azure.cli.testsdk import ResourceGroupPreparer +from .preparers import VirtualNetworkPreparer + + +TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..')) + + +# Env setup +@try_manual +def setup(test, rg): + pass + + +# EXAMPLE: /AscOperations/get/AscOperations_Get +@try_manual +def step__ascoperations_get_ascoperations_get(test, rg): + test.cmd('az storagecache asc-operation show ' + '--operation-id "testoperationid" ' + '--location "West US"', + checks=[]) + + +# EXAMPLE: /Caches/put/Caches_CreateOrUpdate +@try_manual +def step__caches_put_caches_createorupdate(test, rg): + test.cmd('az storagecache cache create ' + '--location "westus" ' + '--cache-size-gb 3072 ' + '--subnet "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Network/virtualNetwork' + 's/{vn}/subnets/default" ' + '--sku-name "Standard_2G" ' + '--tags "{{\\"Dept\\":\\"ContosoAds\\"}}" ' + '--cache-name "sc1" ' + '--resource-group "{rg}"', + checks=[]) + + +# EXAMPLE: /Caches/get/Caches_Get +@try_manual +def step__caches_get_caches_get(test, rg): + test.cmd('az storagecache cache show ' + '--cache-name "sc1" ' + '--resource-group "{rg}"', + checks=[]) + + +# EXAMPLE: /Caches/get/Caches_List +@try_manual +def step__caches_get_caches_list(test, rg): + test.cmd('az storagecache cache list ' + '-g ""', + checks=[]) + + +# EXAMPLE: /Caches/get/Caches_ListByResourceGroup +@try_manual +def step__caches_get_caches_listbyresourcegroup(test, rg): + test.cmd('az storagecache cache list ' + '--resource-group "{rg}"', + checks=[]) + + +# EXAMPLE: /Caches/patch/Caches_Update +@try_manual +def step__caches_patch_caches_update(test, rg): + test.cmd('az storagecache cache update ' + '--location "westus" ' + '--cache-size-gb 3072 ' + '--subnet "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Network/virtualNetwork' + 's/{vn}/subnets/default" ' + '--sku-name "Standard_2G" ' + '--tags "{{\\"Dept\\":\\"ContosoAds\\"}}" ' + '--cache-name "sc1" ' + '--resource-group "{rg}"', + checks=[]) + + +# EXAMPLE: /Caches/post/Caches_Flush +@try_manual +def step__caches_post_caches_flush(test, rg): + test.cmd('az storagecache cache flush ' + '--cache-name "sc" ' + '--resource-group "{rg}"', + checks=[]) + + +# EXAMPLE: /Caches/post/Caches_Start +@try_manual +def step__caches_post_caches_start(test, rg): + test.cmd('az storagecache cache start ' + '--cache-name "sc" ' + '--resource-group "{rg}"', + checks=[]) + + +# EXAMPLE: /Caches/post/Caches_Stop +@try_manual +def step__caches_post_caches_stop(test, rg): + test.cmd('az storagecache cache stop ' + '--cache-name "sc" ' + '--resource-group "{rg}"', + checks=[]) + + +# EXAMPLE: /Caches/post/Caches_UpgradeFirmware +@try_manual +def step__caches_post_caches_upgradefirmware(test, rg): + test.cmd('az storagecache cache upgrade-firmware ' + '--cache-name "sc1" ' + '--resource-group "{rg}"', + checks=[]) + + +# EXAMPLE: /Caches/delete/Caches_Delete +@try_manual +def step__caches_delete_caches_delete(test, rg): + test.cmd('az storagecache cache delete -y ' + '--cache-name "sc" ' + '--resource-group "{rg}"', + checks=[]) + + +# EXAMPLE: /Skus/get/Skus_List +@try_manual +def step__skus_get_skus_list(test, rg): + test.cmd('az storagecache sku list', + checks=[]) + + +# EXAMPLE: /StorageTargets/put/StorageTargets_CreateOrUpdate +@try_manual +def step__storagetargets_put(test, rg): + test.cmd('az storagecache storage-target create ' + '--cache-name "sc1" ' + '--resource-group "{rg}" ' + '--name "{myStorageTarget}" ' + '--junctions namespace-path="/path/on/cache" nfs-export="exp1" target-path="/path/on/exp1" ' + '--junctions namespace-path="/path2/on/cache" nfs-export="exp2" target-path="/path2/on/exp2" ' + '--nfs3 target="10.0.44.44" usage-model="READ_HEAVY_INFREQ"', + checks=[ + test.check("name", "{myStorageTarget}", case_sensitive=False), + test.check("nfs3.target", "10.0.44.44", case_sensitive=False), + test.check("nfs3.usageModel", "READ_HEAVY_INFREQ", case_sensitive=False), + ]) + test.cmd('az storagecache storage-target wait --created ' + '--resource-group "{rg}" ' + '--name "{myStorageTarget}"', + checks=[]) + + +# EXAMPLE: /StorageTargets/get/StorageTargets_Get +@try_manual +def step__storagetargets_get_storagetargets_get(test, rg): + test.cmd('az storagecache storage-target show ' + '--cache-name "sc1" ' + '--resource-group "{rg}" ' + '--name "{myStorageTarget}"', + checks=[ + test.check("name", "{myStorageTarget}", case_sensitive=False), + ]) + + +# EXAMPLE: /StorageTargets/get/StorageTargets_List +@try_manual +def step__storagetargets_get_storagetargets_list(test, rg): + test.cmd('az storagecache storage-target list ' + '--cache-name "sc1" ' + '--resource-group "{rg}"', + checks=[ + test.check('length(@)', 1), + ]) + + +# EXAMPLE: /StorageTargets/delete/StorageTargets_Delete +@try_manual +def step__storagetargets_delete_storagetargets_delete(test, rg): + test.cmd('az storagecache storage-target delete -y ' + '--cache-name "sc1" ' + '--resource-group "{rg}" ' + '--name "{myStorageTarget}"', + checks=[]) + + +# EXAMPLE: /UsageModels/get/UsageModels_List +@try_manual +def step__usagemodels_get_usagemodels_list(test, rg): + test.cmd('az storagecache usage-model list', + checks=[]) + + +# Env cleanup +@try_manual +def cleanup(test, rg): + pass + + +# Testcase +@try_manual +def call_scenario(test, rg): + setup(test, rg) + step__ascoperations_get_ascoperations_get(test, rg) + step__caches_put_caches_createorupdate(test, rg) + step__caches_get_caches_get(test, rg) + step__caches_get_caches_list(test, rg) + step__caches_get_caches_listbyresourcegroup(test, rg) + step__caches_patch_caches_update(test, rg) + step__caches_post_caches_flush(test, rg) + step__caches_post_caches_start(test, rg) + step__caches_post_caches_stop(test, rg) + step__caches_post_caches_upgradefirmware(test, rg) + step__caches_delete_caches_delete(test, rg) + step__skus_get_skus_list(test, rg) + step__storagetargets_put(test, rg) + step__storagetargets_get_storagetargets_get(test, rg) + step__storagetargets_get_storagetargets_list(test, rg) + step__storagetargets_delete_storagetargets_delete(test, rg) + step__usagemodels_get_usagemodels_list(test, rg) + cleanup(test, rg) + + +@try_manual +class StorageCacheManagementClientScenarioTest(ScenarioTest): + + @ResourceGroupPreparer(name_prefix='cliteststoragecache_scgroup'[:7], key='rg', parameter_name='rg') + @VirtualNetworkPreparer(name_prefix='cliteststoragecache_scvnet'[:7], key='vn', resource_group_key='rg') + def test_storagecache(self, rg): + + self.kwargs.update({ + 'subscription_id': self.get_subscription_id() + }) + + self.kwargs.update({ + 'myStorageTarget': 'st1', + }) + + call_scenario(self, rg) + calc_coverage(__file__) + raise_if() diff --git a/src/storagecache/azext_storagecache/vendored_sdks/__init__.py b/src/storagecache/azext_storagecache/vendored_sdks/__init__.py new file mode 100644 index 00000000000..c9cfdc73e77 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/__init__.py @@ -0,0 +1,12 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/__init__.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/__init__.py new file mode 100644 index 00000000000..1d1e4ecf4e2 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/__init__.py @@ -0,0 +1,16 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._storage_cache_management_client import StorageCacheManagementClient +__all__ = ['StorageCacheManagementClient'] + +try: + from ._patch import patch_sdk # type: ignore + patch_sdk() +except ImportError: + pass diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/_configuration.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/_configuration.py new file mode 100644 index 00000000000..f426ad8ef15 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/_configuration.py @@ -0,0 +1,70 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import TYPE_CHECKING + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies +from azure.mgmt.core.policies import ARMHttpLoggingPolicy + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any + + from azure.core.credentials import TokenCredential + +VERSION = "unknown" + +class StorageCacheManagementClientConfiguration(Configuration): + """Configuration for StorageCacheManagementClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credential: Credential needed for the client to connect to Azure. + :type credential: ~azure.core.credentials.TokenCredential + :param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call. + :type subscription_id: str + """ + + def __init__( + self, + credential, # type: "TokenCredential" + subscription_id, # type: str + **kwargs # type: Any + ): + # type: (...) -> None + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + super(StorageCacheManagementClientConfiguration, self).__init__(**kwargs) + + self.credential = credential + self.subscription_id = subscription_id + self.api_version = "2020-03-01" + self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default']) + kwargs.setdefault('sdk_moniker', 'storagecachemanagementclient/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs # type: Any + ): + # type: (...) -> None + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/_storage_cache_management_client.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/_storage_cache_management_client.py new file mode 100644 index 00000000000..f0f1db4bf8d --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/_storage_cache_management_client.py @@ -0,0 +1,94 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import TYPE_CHECKING + +from azure.mgmt.core import ARMPipelineClient +from msrest import Deserializer, Serializer + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Optional + + from azure.core.credentials import TokenCredential + +from ._configuration import StorageCacheManagementClientConfiguration +from .operations import OperationOperations +from .operations import SkuOperations +from .operations import UsageModelOperations +from .operations import ASCOperationOperations +from .operations import CacheOperations +from .operations import StorageTargetOperations +from . import models + + +class StorageCacheManagementClient(object): + """A Storage Cache provides scalable caching service for NAS clients, serving data from either NFSv3 or Blob at-rest storage (referred to as "Storage Targets"). These operations allow you to manage Caches. + + :ivar operation: OperationOperations operations + :vartype operation: storage_cache_management_client.operations.OperationOperations + :ivar sku: SkuOperations operations + :vartype sku: storage_cache_management_client.operations.SkuOperations + :ivar usage_model: UsageModelOperations operations + :vartype usage_model: storage_cache_management_client.operations.UsageModelOperations + :ivar asc_operation: ASCOperationOperations operations + :vartype asc_operation: storage_cache_management_client.operations.ASCOperationOperations + :ivar cache: CacheOperations operations + :vartype cache: storage_cache_management_client.operations.CacheOperations + :ivar storage_target: StorageTargetOperations operations + :vartype storage_target: storage_cache_management_client.operations.StorageTargetOperations + :param credential: Credential needed for the client to connect to Azure. + :type credential: ~azure.core.credentials.TokenCredential + :param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call. + :type subscription_id: str + :param str base_url: Service URL + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + """ + + def __init__( + self, + credential, # type: "TokenCredential" + subscription_id, # type: str + base_url=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + if not base_url: + base_url = 'https://management.azure.com' + self._config = StorageCacheManagementClientConfiguration(credential, subscription_id, **kwargs) + self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.operation = OperationOperations( + self._client, self._config, self._serialize, self._deserialize) + self.sku = SkuOperations( + self._client, self._config, self._serialize, self._deserialize) + self.usage_model = UsageModelOperations( + self._client, self._config, self._serialize, self._deserialize) + self.asc_operation = ASCOperationOperations( + self._client, self._config, self._serialize, self._deserialize) + self.cache = CacheOperations( + self._client, self._config, self._serialize, self._deserialize) + self.storage_target = StorageTargetOperations( + self._client, self._config, self._serialize, self._deserialize) + + def close(self): + # type: () -> None + self._client.close() + + def __enter__(self): + # type: () -> StorageCacheManagementClient + self._client.__enter__() + return self + + def __exit__(self, *exc_details): + # type: (Any) -> None + self._client.__exit__(*exc_details) diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/__init__.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/__init__.py new file mode 100644 index 00000000000..b1121365385 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/__init__.py @@ -0,0 +1,10 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._storage_cache_management_client import StorageCacheManagementClient +__all__ = ['StorageCacheManagementClient'] diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/_configuration.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/_configuration.py new file mode 100644 index 00000000000..9f275cad8ca --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/_configuration.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies +from azure.mgmt.core.policies import ARMHttpLoggingPolicy + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + +VERSION = "unknown" + +class StorageCacheManagementClientConfiguration(Configuration): + """Configuration for StorageCacheManagementClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credential: Credential needed for the client to connect to Azure. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call. + :type subscription_id: str + """ + + def __init__( + self, + credential: "AsyncTokenCredential", + subscription_id: str, + **kwargs: Any + ) -> None: + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + super(StorageCacheManagementClientConfiguration, self).__init__(**kwargs) + + self.credential = credential + self.subscription_id = subscription_id + self.api_version = "2020-03-01" + self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default']) + kwargs.setdefault('sdk_moniker', 'storagecachemanagementclient/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs: Any + ) -> None: + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/_storage_cache_management_client.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/_storage_cache_management_client.py new file mode 100644 index 00000000000..2276716af3d --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/_storage_cache_management_client.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, Optional, TYPE_CHECKING + +from azure.mgmt.core import AsyncARMPipelineClient +from msrest import Deserializer, Serializer + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + +from ._configuration import StorageCacheManagementClientConfiguration +from .operations import OperationOperations +from .operations import SkuOperations +from .operations import UsageModelOperations +from .operations import ASCOperationOperations +from .operations import CacheOperations +from .operations import StorageTargetOperations +from .. import models + + +class StorageCacheManagementClient(object): + """A Storage Cache provides scalable caching service for NAS clients, serving data from either NFSv3 or Blob at-rest storage (referred to as "Storage Targets"). These operations allow you to manage Caches. + + :ivar operation: OperationOperations operations + :vartype operation: storage_cache_management_client.aio.operations.OperationOperations + :ivar sku: SkuOperations operations + :vartype sku: storage_cache_management_client.aio.operations.SkuOperations + :ivar usage_model: UsageModelOperations operations + :vartype usage_model: storage_cache_management_client.aio.operations.UsageModelOperations + :ivar asc_operation: ASCOperationOperations operations + :vartype asc_operation: storage_cache_management_client.aio.operations.ASCOperationOperations + :ivar cache: CacheOperations operations + :vartype cache: storage_cache_management_client.aio.operations.CacheOperations + :ivar storage_target: StorageTargetOperations operations + :vartype storage_target: storage_cache_management_client.aio.operations.StorageTargetOperations + :param credential: Credential needed for the client to connect to Azure. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call. + :type subscription_id: str + :param str base_url: Service URL + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + """ + + def __init__( + self, + credential: "AsyncTokenCredential", + subscription_id: str, + base_url: Optional[str] = None, + **kwargs: Any + ) -> None: + if not base_url: + base_url = 'https://management.azure.com' + self._config = StorageCacheManagementClientConfiguration(credential, subscription_id, **kwargs) + self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.operation = OperationOperations( + self._client, self._config, self._serialize, self._deserialize) + self.sku = SkuOperations( + self._client, self._config, self._serialize, self._deserialize) + self.usage_model = UsageModelOperations( + self._client, self._config, self._serialize, self._deserialize) + self.asc_operation = ASCOperationOperations( + self._client, self._config, self._serialize, self._deserialize) + self.cache = CacheOperations( + self._client, self._config, self._serialize, self._deserialize) + self.storage_target = StorageTargetOperations( + self._client, self._config, self._serialize, self._deserialize) + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "StorageCacheManagementClient": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details) -> None: + await self._client.__aexit__(*exc_details) diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/__init__.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/__init__.py new file mode 100644 index 00000000000..52d521bf575 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/__init__.py @@ -0,0 +1,23 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._operation_operations import OperationOperations +from ._sku_operations import SkuOperations +from ._usage_model_operations import UsageModelOperations +from ._asc_operation_operations import ASCOperationOperations +from ._cache_operations import CacheOperations +from ._storage_target_operations import StorageTargetOperations + +__all__ = [ + 'OperationOperations', + 'SkuOperations', + 'UsageModelOperations', + 'ASCOperationOperations', + 'CacheOperations', + 'StorageTargetOperations', +] diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_asc_operation_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_asc_operation_operations.py new file mode 100644 index 00000000000..e52cf0e271b --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_asc_operation_operations.py @@ -0,0 +1,99 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest +from azure.mgmt.core.exceptions import ARMErrorFormat + +from ... import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class ASCOperationOperations: + """ASCOperationOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def get( + self, + location: str, + operation_id: str, + **kwargs + ) -> "models.ASCOperation": + """Gets the status of an asynchronous operation for the Azure HPC cache. + + :param location: The region name which the operation will lookup into. + :type location: str + :param operation_id: The operation id which uniquely identifies the asynchronous operation. + :type operation_id: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ASCOperation, or the result of cls(response) + :rtype: ~storage_cache_management_client.models.ASCOperation + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ASCOperation"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + # Construct URL + url = self.get.metadata['url'] # type: ignore + path_format_arguments = { + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'location': self._serialize.url("location", location, 'str'), + 'operationId': self._serialize.url("operation_id", operation_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize('ASCOperation', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/locations/{location}/ascOperations/{operationId}'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_cache_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_cache_operations.py new file mode 100644 index 00000000000..fed61f8eb8b --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_cache_operations.py @@ -0,0 +1,1142 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest +from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling + +from ... import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class CacheOperations: + """CacheOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list( + self, + **kwargs + ) -> AsyncIterable["models.CachesListResult"]: + """Returns all Caches the user has access to under a subscription. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either CachesListResult or the result of cls(response) + :rtype: ~azure.core.async_paging.AsyncItemPaged[~storage_cache_management_client.models.CachesListResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.CachesListResult"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + def prepare_request(next_link=None): + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + if not next_link: + # Construct URL + url = self.list.metadata['url'] # type: ignore + path_format_arguments = { + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + request = self._client.get(url, query_parameters, header_parameters) + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize('CachesListResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged( + get_next, extract_data + ) + list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/caches'} # type: ignore + + def list_by_resource_group( + self, + resource_group_name: str, + **kwargs + ) -> AsyncIterable["models.CachesListResult"]: + """Returns all Caches the user has access to under a resource group. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either CachesListResult or the result of cls(response) + :rtype: ~azure.core.async_paging.AsyncItemPaged[~storage_cache_management_client.models.CachesListResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.CachesListResult"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + def prepare_request(next_link=None): + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + if not next_link: + # Construct URL + url = self.list_by_resource_group.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + request = self._client.get(url, query_parameters, header_parameters) + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize('CachesListResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged( + get_next, extract_data + ) + list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches'} # type: ignore + + async def _delete_initial( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> object: + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + # Construct URL + url = self._delete_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + async def begin_delete( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> AsyncLROPoller[object]: + """Schedules a Cache for deletion. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either object or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[object] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = await self._delete_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + + if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = AsyncNoPolling() + else: polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + async def get( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> "models.Cache": + """Returns a Cache. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Cache, or the result of cls(response) + :rtype: ~storage_cache_management_client.models.Cache + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + # Construct URL + url = self.get.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize('Cache', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + async def _create_or_update_initial( + self, + resource_group_name: str, + cache_name: str, + tags: Optional[object] = None, + location: Optional[str] = None, + cache_size_gb: Optional[int] = None, + provisioning_state: Optional[Union[str, "models.ProvisioningStateType"]] = None, + subnet: Optional[str] = None, + upgrade_status: Optional["models.CacheUpgradeStatus"] = None, + root_squash: Optional[bool] = None, + key_encryption_key: Optional["models.KeyVaultKeyReference"] = None, + mtu: Optional[int] = 1500, + name: Optional[str] = None, + type: Optional[Union[str, "models.CacheIdentityType"]] = None, + **kwargs + ) -> "models.Cache": + cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + cache = models.Cache(tags=tags, location=location, cache_size_gb=cache_size_gb, provisioning_state=provisioning_state, subnet=subnet, upgrade_status=upgrade_status, root_squash=root_squash, key_encryption_key=key_encryption_key, mtu=mtu, name_sku_name=name, type_identity_type=type) + api_version = "2020-03-01" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self._create_or_update_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if cache is not None: + body_content = self._serialize.body(cache, 'Cache') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize('Cache', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('Cache', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('Cache', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + async def begin_create_or_update( + self, + resource_group_name: str, + cache_name: str, + tags: Optional[object] = None, + location: Optional[str] = None, + cache_size_gb: Optional[int] = None, + provisioning_state: Optional[Union[str, "models.ProvisioningStateType"]] = None, + subnet: Optional[str] = None, + upgrade_status: Optional["models.CacheUpgradeStatus"] = None, + root_squash: Optional[bool] = None, + key_encryption_key: Optional["models.KeyVaultKeyReference"] = None, + mtu: Optional[int] = 1500, + name: Optional[str] = None, + type: Optional[Union[str, "models.CacheIdentityType"]] = None, + **kwargs + ) -> AsyncLROPoller["models.Cache"]: + """Create or update a Cache. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :param tags: ARM tags as name/value pairs. + :type tags: object + :param location: Region name string. + :type location: str + :param cache_size_gb: The size of this Cache, in GB. + :type cache_size_gb: int + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param subnet: Subnet used for the Cache. + :type subnet: str + :param upgrade_status: Upgrade status of the Cache. + :type upgrade_status: ~storage_cache_management_client.models.CacheUpgradeStatus + :param root_squash: root squash of cache property. + :type root_squash: bool + :param key_encryption_key: Specifies the location of the key encryption key in Key Vault. + :type key_encryption_key: ~storage_cache_management_client.models.KeyVaultKeyReference + :param mtu: The IPv4 maximum transmission unit configured for the subnet. + :type mtu: int + :param name: SKU name for this Cache. + :type name: str + :param type: The type of identity used for the cache. + :type type: str or ~storage_cache_management_client.models.CacheIdentityType + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either Cache or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~storage_cache_management_client.models.Cache] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = await self._create_or_update_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + tags=tags, + location=location, + cache_size_gb=cache_size_gb, + provisioning_state=provisioning_state, + subnet=subnet, + upgrade_status=upgrade_status, + root_squash=root_squash, + key_encryption_key=key_encryption_key, + mtu=mtu, + name=name, + type=type, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('Cache', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + + if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = AsyncNoPolling() + else: polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + async def update( + self, + resource_group_name: str, + cache_name: str, + tags: Optional[object] = None, + location: Optional[str] = None, + cache_size_gb: Optional[int] = None, + provisioning_state: Optional[Union[str, "models.ProvisioningStateType"]] = None, + subnet: Optional[str] = None, + upgrade_status: Optional["models.CacheUpgradeStatus"] = None, + root_squash: Optional[bool] = None, + key_encryption_key: Optional["models.KeyVaultKeyReference"] = None, + mtu: Optional[int] = 1500, + name: Optional[str] = None, + type: Optional[Union[str, "models.CacheIdentityType"]] = None, + **kwargs + ) -> "models.Cache": + """Update a Cache instance. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :param tags: ARM tags as name/value pairs. + :type tags: object + :param location: Region name string. + :type location: str + :param cache_size_gb: The size of this Cache, in GB. + :type cache_size_gb: int + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param subnet: Subnet used for the Cache. + :type subnet: str + :param upgrade_status: Upgrade status of the Cache. + :type upgrade_status: ~storage_cache_management_client.models.CacheUpgradeStatus + :param root_squash: root squash of cache property. + :type root_squash: bool + :param key_encryption_key: Specifies the location of the key encryption key in Key Vault. + :type key_encryption_key: ~storage_cache_management_client.models.KeyVaultKeyReference + :param mtu: The IPv4 maximum transmission unit configured for the subnet. + :type mtu: int + :param name: SKU name for this Cache. + :type name: str + :param type: The type of identity used for the cache. + :type type: str or ~storage_cache_management_client.models.CacheIdentityType + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Cache, or the result of cls(response) + :rtype: ~storage_cache_management_client.models.Cache + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + cache = models.Cache(tags=tags, location=location, cache_size_gb=cache_size_gb, provisioning_state=provisioning_state, subnet=subnet, upgrade_status=upgrade_status, root_squash=root_squash, key_encryption_key=key_encryption_key, mtu=mtu, name_sku_name=name, type_identity_type=type) + api_version = "2020-03-01" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.update.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if cache is not None: + body_content = self._serialize.body(cache, 'Cache') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize('Cache', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + async def _flush_initial( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> object: + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + # Construct URL + url = self._flush_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _flush_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/flush'} # type: ignore + + async def begin_flush( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> AsyncLROPoller[object]: + """Tells a Cache to write all dirty data to the Storage Target(s). During the flush, clients will + see errors returned until the flush is complete. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either object or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[object] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = await self._flush_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + + if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = AsyncNoPolling() + else: polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_flush.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/flush'} # type: ignore + + async def _start_initial( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> object: + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + # Construct URL + url = self._start_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/start'} # type: ignore + + async def begin_start( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> AsyncLROPoller[object]: + """Tells a Stopped state Cache to transition to Active state. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either object or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[object] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = await self._start_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + + if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = AsyncNoPolling() + else: polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/start'} # type: ignore + + async def _stop_initial( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> object: + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + # Construct URL + url = self._stop_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/stop'} # type: ignore + + async def begin_stop( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> AsyncLROPoller[object]: + """Tells an Active Cache to transition to Stopped state. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either object or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[object] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = await self._stop_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + + if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = AsyncNoPolling() + else: polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/stop'} # type: ignore + + async def _upgrade_firmware_initial( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> object: + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + # Construct URL + url = self._upgrade_firmware_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 201: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _upgrade_firmware_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/upgrade'} # type: ignore + + async def begin_upgrade_firmware( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> AsyncLROPoller[object]: + """Upgrade a Cache's firmware if a new version is available. Otherwise, this operation has no + effect. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either object or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[object] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = await self._upgrade_firmware_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + + if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = AsyncNoPolling() + else: polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_upgrade_firmware.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/upgrade'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_operation_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_operation_operations.py new file mode 100644 index 00000000000..63e6ddc50db --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_operation_operations.py @@ -0,0 +1,104 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar +import warnings + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest +from azure.mgmt.core.exceptions import ARMErrorFormat + +from ... import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class OperationOperations: + """OperationOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list( + self, + **kwargs + ) -> AsyncIterable["models.ApiOperationListResult"]: + """Lists all of the available Resource Provider operations. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either ApiOperationListResult or the result of cls(response) + :rtype: ~azure.core.async_paging.AsyncItemPaged[~storage_cache_management_client.models.ApiOperationListResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ApiOperationListResult"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + def prepare_request(next_link=None): + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + if not next_link: + # Construct URL + url = self.list.metadata['url'] # type: ignore + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + request = self._client.get(url, query_parameters, header_parameters) + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize('ApiOperationListResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged( + get_next, extract_data + ) + list.metadata = {'url': '/providers/Microsoft.StorageCache/operations'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_sku_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_sku_operations.py new file mode 100644 index 00000000000..1ac40a15376 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_sku_operations.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar +import warnings + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest +from azure.mgmt.core.exceptions import ARMErrorFormat + +from ... import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class SkuOperations: + """SkuOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list( + self, + **kwargs + ) -> AsyncIterable["models.ResourceSkusResult"]: + """Get the list of StorageCache.Cache SKUs available to this subscription. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either ResourceSkusResult or the result of cls(response) + :rtype: ~azure.core.async_paging.AsyncItemPaged[~storage_cache_management_client.models.ResourceSkusResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ResourceSkusResult"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + def prepare_request(next_link=None): + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + if not next_link: + # Construct URL + url = self.list.metadata['url'] # type: ignore + path_format_arguments = { + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + request = self._client.get(url, query_parameters, header_parameters) + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize('ResourceSkusResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged( + get_next, extract_data + ) + list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/skus'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_storage_target_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_storage_target_operations.py new file mode 100644 index 00000000000..dabfcbedd2f --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_storage_target_operations.py @@ -0,0 +1,489 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union +import warnings + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest +from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling + +from ... import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class StorageTargetOperations: + """StorageTargetOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list_by_cache( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> AsyncIterable["models.StorageTargetsResult"]: + """Returns a list of Storage Targets for the specified Cache. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either StorageTargetsResult or the result of cls(response) + :rtype: ~azure.core.async_paging.AsyncItemPaged[~storage_cache_management_client.models.StorageTargetsResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTargetsResult"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + def prepare_request(next_link=None): + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + if not next_link: + # Construct URL + url = self.list_by_cache.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + request = self._client.get(url, query_parameters, header_parameters) + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize('StorageTargetsResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged( + get_next, extract_data + ) + list_by_cache.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets'} # type: ignore + + async def _delete_initial( + self, + resource_group_name: str, + cache_name: str, + storage_target_name: str, + **kwargs + ) -> object: + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + # Construct URL + url = self._delete_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore + + async def begin_delete( + self, + resource_group_name: str, + cache_name: str, + storage_target_name: str, + **kwargs + ) -> AsyncLROPoller[object]: + """Removes a Storage Target from a Cache. This operation is allowed at any time, but if the Cache + is down or unhealthy, the actual removal of the Storage Target may be delayed until the Cache + is healthy again. Note that if the Cache has data to flush to the Storage Target, the data will + be flushed before the Storage Target will be deleted. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :param storage_target_name: Name of Storage Target. + :type storage_target_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either object or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[object] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = await self._delete_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + storage_target_name=storage_target_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + + if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = AsyncNoPolling() + else: polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore + + async def get( + self, + resource_group_name: str, + cache_name: str, + storage_target_name: str, + **kwargs + ) -> "models.StorageTarget": + """Returns a Storage Target from a Cache. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :param storage_target_name: Name of the Storage Target. Length of name must be not greater than + 80 and chars must be in list of [-0-9a-zA-Z_] char class. + :type storage_target_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: StorageTarget, or the result of cls(response) + :rtype: ~storage_cache_management_client.models.StorageTarget + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTarget"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + # Construct URL + url = self.get.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize('StorageTarget', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore + + async def _create_or_update_initial( + self, + resource_group_name: str, + cache_name: str, + storage_target_name: str, + junctions: Optional[List["models.NamespaceJunction"]] = None, + target_type: Optional[Union[str, "models.StorageTargetType"]] = None, + provisioning_state: Optional[Union[str, "models.ProvisioningStateType"]] = None, + nfs3: Optional["models.Nfs3Target"] = None, + unknown_map: Optional[Dict[str, str]] = None, + target: Optional[str] = None, + **kwargs + ) -> "models.StorageTarget": + cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTarget"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + storagetarget = models.StorageTarget(junctions=junctions, target_type=target_type, provisioning_state=provisioning_state, nfs3=nfs3, unknown_map=unknown_map, target=target) + api_version = "2020-03-01" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self._create_or_update_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if storagetarget is not None: + body_content = self._serialize.body(storagetarget, 'StorageTarget') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize('StorageTarget', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('StorageTarget', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('StorageTarget', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore + + async def begin_create_or_update( + self, + resource_group_name: str, + cache_name: str, + storage_target_name: str, + junctions: Optional[List["models.NamespaceJunction"]] = None, + target_type: Optional[Union[str, "models.StorageTargetType"]] = None, + provisioning_state: Optional[Union[str, "models.ProvisioningStateType"]] = None, + nfs3: Optional["models.Nfs3Target"] = None, + unknown_map: Optional[Dict[str, str]] = None, + target: Optional[str] = None, + **kwargs + ) -> AsyncLROPoller["models.StorageTarget"]: + """Create or update a Storage Target. This operation is allowed at any time, but if the Cache is + down or unhealthy, the actual creation/modification of the Storage Target may be delayed until + the Cache is healthy again. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :param storage_target_name: Name of the Storage Target. Length of name must be not greater than + 80 and chars must be in list of [-0-9a-zA-Z_] char class. + :type storage_target_name: str + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Type of the Storage Target. + :type target_type: str or ~storage_cache_management_client.models.StorageTargetType + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param unknown_map: Dictionary of string->string pairs containing information about the Storage + Target. + :type unknown_map: dict[str, str] + :param target: Resource ID of storage container. + :type target: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either StorageTarget or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~storage_cache_management_client.models.StorageTarget] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTarget"] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = await self._create_or_update_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + storage_target_name=storage_target_name, + junctions=junctions, + target_type=target_type, + provisioning_state=provisioning_state, + nfs3=nfs3, + unknown_map=unknown_map, + target=target, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('StorageTarget', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + + if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = AsyncNoPolling() + else: polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_usage_model_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_usage_model_operations.py new file mode 100644 index 00000000000..b986a847bce --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_usage_model_operations.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar +import warnings + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest +from azure.mgmt.core.exceptions import ARMErrorFormat + +from ... import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class UsageModelOperations: + """UsageModelOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list( + self, + **kwargs + ) -> AsyncIterable["models.UsageModelsResult"]: + """Get the list of Cache Usage Models available to this subscription. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either UsageModelsResult or the result of cls(response) + :rtype: ~azure.core.async_paging.AsyncItemPaged[~storage_cache_management_client.models.UsageModelsResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.UsageModelsResult"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + def prepare_request(next_link=None): + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + if not next_link: + # Construct URL + url = self.list.metadata['url'] # type: ignore + path_format_arguments = { + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + request = self._client.get(url, query_parameters, header_parameters) + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize('UsageModelsResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged( + get_next, extract_data + ) + list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/usageModels'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/__init__.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/__init__.py new file mode 100644 index 00000000000..7a9f6249f57 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/__init__.py @@ -0,0 +1,137 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +try: + from ._models_py3 import ASCOperation + from ._models_py3 import ApiOperation + from ._models_py3 import ApiOperationDisplay + from ._models_py3 import ApiOperationListResult + from ._models_py3 import ApiOperationPropertiesServiceSpecification + from ._models_py3 import Cache + from ._models_py3 import CacheHealth + from ._models_py3 import CacheUpgradeStatus + from ._models_py3 import CachesListResult + from ._models_py3 import ClfsTarget + from ._models_py3 import ClfsTargetProperties + from ._models_py3 import CloudErrorBody + from ._models_py3 import ErrorResponse + from ._models_py3 import KeyVaultKeyReference + from ._models_py3 import KeyVaultKeyReferenceSourceVault + from ._models_py3 import MetricDimension + from ._models_py3 import MetricSpecification + from ._models_py3 import NamespaceJunction + from ._models_py3 import Nfs3Target + from ._models_py3 import Nfs3TargetProperties + from ._models_py3 import ResourceSku + from ._models_py3 import ResourceSkuCapabilities + from ._models_py3 import ResourceSkuLocationInfo + from ._models_py3 import ResourceSkusResult + from ._models_py3 import Restriction + from ._models_py3 import StorageTarget + from ._models_py3 import StorageTargetProperties + from ._models_py3 import StorageTargetResource + from ._models_py3 import StorageTargetsResult + from ._models_py3 import SystemData + from ._models_py3 import UnknownTarget + from ._models_py3 import UnknownTargetProperties + from ._models_py3 import UsageModel + from ._models_py3 import UsageModelDisplay + from ._models_py3 import UsageModelsResult +except (SyntaxError, ImportError): + from ._models import ASCOperation # type: ignore + from ._models import ApiOperation # type: ignore + from ._models import ApiOperationDisplay # type: ignore + from ._models import ApiOperationListResult # type: ignore + from ._models import ApiOperationPropertiesServiceSpecification # type: ignore + from ._models import Cache # type: ignore + from ._models import CacheHealth # type: ignore + from ._models import CacheUpgradeStatus # type: ignore + from ._models import CachesListResult # type: ignore + from ._models import ClfsTarget # type: ignore + from ._models import ClfsTargetProperties # type: ignore + from ._models import CloudErrorBody # type: ignore + from ._models import ErrorResponse # type: ignore + from ._models import KeyVaultKeyReference # type: ignore + from ._models import KeyVaultKeyReferenceSourceVault # type: ignore + from ._models import MetricDimension # type: ignore + from ._models import MetricSpecification # type: ignore + from ._models import NamespaceJunction # type: ignore + from ._models import Nfs3Target # type: ignore + from ._models import Nfs3TargetProperties # type: ignore + from ._models import ResourceSku # type: ignore + from ._models import ResourceSkuCapabilities # type: ignore + from ._models import ResourceSkuLocationInfo # type: ignore + from ._models import ResourceSkusResult # type: ignore + from ._models import Restriction # type: ignore + from ._models import StorageTarget # type: ignore + from ._models import StorageTargetProperties # type: ignore + from ._models import StorageTargetResource # type: ignore + from ._models import StorageTargetsResult # type: ignore + from ._models import SystemData # type: ignore + from ._models import UnknownTarget # type: ignore + from ._models import UnknownTargetProperties # type: ignore + from ._models import UsageModel # type: ignore + from ._models import UsageModelDisplay # type: ignore + from ._models import UsageModelsResult # type: ignore + +from ._storage_cache_management_client_enums import ( + CacheIdentityType, + CreatedByType, + FirmwareStatusType, + HealthStateType, + MetricAggregationType, + ProvisioningStateType, + ReasonCode, + StorageTargetType, +) + +__all__ = [ + 'ASCOperation', + 'ApiOperation', + 'ApiOperationDisplay', + 'ApiOperationListResult', + 'ApiOperationPropertiesServiceSpecification', + 'Cache', + 'CacheHealth', + 'CacheUpgradeStatus', + 'CachesListResult', + 'ClfsTarget', + 'ClfsTargetProperties', + 'CloudErrorBody', + 'ErrorResponse', + 'KeyVaultKeyReference', + 'KeyVaultKeyReferenceSourceVault', + 'MetricDimension', + 'MetricSpecification', + 'NamespaceJunction', + 'Nfs3Target', + 'Nfs3TargetProperties', + 'ResourceSku', + 'ResourceSkuCapabilities', + 'ResourceSkuLocationInfo', + 'ResourceSkusResult', + 'Restriction', + 'StorageTarget', + 'StorageTargetProperties', + 'StorageTargetResource', + 'StorageTargetsResult', + 'SystemData', + 'UnknownTarget', + 'UnknownTargetProperties', + 'UsageModel', + 'UsageModelDisplay', + 'UsageModelsResult', + 'CacheIdentityType', + 'CreatedByType', + 'FirmwareStatusType', + 'HealthStateType', + 'MetricAggregationType', + 'ProvisioningStateType', + 'ReasonCode', + 'StorageTargetType', +] diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_models.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_models.py new file mode 100644 index 00000000000..6724aeb9f57 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_models.py @@ -0,0 +1,1243 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import msrest.serialization + + +class ApiOperation(msrest.serialization.Model): + """REST API operation description: see https://github.com/Azure/azure-rest-api-specs/blob/master/documentation/openapi-authoring-automated-guidelines.md#r3023-operationsapiimplementation. + + :param display: The object that represents the operation. + :type display: ~storage_cache_management_client.models.ApiOperationDisplay + :param origin: Origin of the operation. + :type origin: str + :param is_data_action: The flag that indicates whether the operation applies to data plane. + :type is_data_action: bool + :param name: Operation name: {provider}/{resource}/{operation}. + :type name: str + :param service_specification: Specification of the all the metrics provided for a resource + type. + :type service_specification: + ~storage_cache_management_client.models.ApiOperationPropertiesServiceSpecification + """ + + _attribute_map = { + 'display': {'key': 'display', 'type': 'ApiOperationDisplay'}, + 'origin': {'key': 'origin', 'type': 'str'}, + 'is_data_action': {'key': 'isDataAction', 'type': 'bool'}, + 'name': {'key': 'name', 'type': 'str'}, + 'service_specification': {'key': 'properties.serviceSpecification', 'type': 'ApiOperationPropertiesServiceSpecification'}, + } + + def __init__( + self, + **kwargs + ): + super(ApiOperation, self).__init__(**kwargs) + self.display = kwargs.get('display', None) + self.origin = kwargs.get('origin', None) + self.is_data_action = kwargs.get('is_data_action', None) + self.name = kwargs.get('name', None) + self.service_specification = kwargs.get('service_specification', None) + + +class ApiOperationDisplay(msrest.serialization.Model): + """The object that represents the operation. + + :param operation: Operation type: Read, write, delete, etc. + :type operation: str + :param provider: Service provider: Microsoft.StorageCache. + :type provider: str + :param resource: Resource on which the operation is performed: Cache, etc. + :type resource: str + :param description: The description of the operation. + :type description: str + """ + + _attribute_map = { + 'operation': {'key': 'operation', 'type': 'str'}, + 'provider': {'key': 'provider', 'type': 'str'}, + 'resource': {'key': 'resource', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ApiOperationDisplay, self).__init__(**kwargs) + self.operation = kwargs.get('operation', None) + self.provider = kwargs.get('provider', None) + self.resource = kwargs.get('resource', None) + self.description = kwargs.get('description', None) + + +class ApiOperationListResult(msrest.serialization.Model): + """Result of the request to list Resource Provider operations. It contains a list of operations and a URL link to get the next set of results. + + :param next_link: URL to get the next set of operation list results if there are any. + :type next_link: str + :param value: List of Resource Provider operations supported by the Microsoft.StorageCache + resource provider. + :type value: list[~storage_cache_management_client.models.ApiOperation] + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'value': {'key': 'value', 'type': '[ApiOperation]'}, + } + + def __init__( + self, + **kwargs + ): + super(ApiOperationListResult, self).__init__(**kwargs) + self.next_link = kwargs.get('next_link', None) + self.value = kwargs.get('value', None) + + +class ApiOperationPropertiesServiceSpecification(msrest.serialization.Model): + """Specification of the all the metrics provided for a resource type. + + :param metric_specifications: Details about operations related to metrics. + :type metric_specifications: list[~storage_cache_management_client.models.MetricSpecification] + """ + + _attribute_map = { + 'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'}, + } + + def __init__( + self, + **kwargs + ): + super(ApiOperationPropertiesServiceSpecification, self).__init__(**kwargs) + self.metric_specifications = kwargs.get('metric_specifications', None) + + +class ASCOperation(msrest.serialization.Model): + """The status of operation. + + :param id: The operation Id. + :type id: str + :param name: The operation name. + :type name: str + :param start_time: The start time of the operation. + :type start_time: str + :param end_time: The end time of the operation. + :type end_time: str + :param status: The status of the operation. + :type status: str + :param error: The error detail of the operation if any. + :type error: ~storage_cache_management_client.models.ErrorResponse + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'str'}, + 'end_time': {'key': 'endTime', 'type': 'str'}, + 'status': {'key': 'status', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'ErrorResponse'}, + } + + def __init__( + self, + **kwargs + ): + super(ASCOperation, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.name = kwargs.get('name', None) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.status = kwargs.get('status', None) + self.error = kwargs.get('error', None) + + +class Cache(msrest.serialization.Model): + """A Cache instance. Follows Azure Resource Manager standards: https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/resource-api-reference.md. + + Variables are only populated by the server, and will be ignored when sending a request. + + :param tags: A set of tags. ARM tags as name/value pairs. + :type tags: object + :ivar id: Resource ID of the Cache. + :vartype id: str + :param location: Region name string. + :type location: str + :ivar name: Name of Cache. + :vartype name: str + :ivar type: Type of the Cache; Microsoft.StorageCache/Cache. + :vartype type: str + :ivar system_data: The system meta data relating to this resource. + :vartype system_data: ~storage_cache_management_client.models.SystemData + :param cache_size_gb: The size of this Cache, in GB. + :type cache_size_gb: int + :ivar health: Health of the Cache. + :vartype health: ~storage_cache_management_client.models.CacheHealth + :ivar mount_addresses: Array of IP addresses that can be used by clients mounting this Cache. + :vartype mount_addresses: list[str] + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param subnet: Subnet used for the Cache. + :type subnet: str + :param upgrade_status: Upgrade status of the Cache. + :type upgrade_status: ~storage_cache_management_client.models.CacheUpgradeStatus + :param root_squash: root squash of cache property. + :type root_squash: bool + :param key_encryption_key: Specifies the location of the key encryption key in Key Vault. + :type key_encryption_key: ~storage_cache_management_client.models.KeyVaultKeyReference + :param mtu: The IPv4 maximum transmission unit configured for the subnet. + :type mtu: int + :ivar utility_addresses: Array of additional IP addresses used by this Cache. + :vartype utility_addresses: list[str] + :param name_sku_name: SKU name for this Cache. + :type name_sku_name: str + :ivar principal_id: The principal id of the cache. + :vartype principal_id: str + :ivar tenant_id: The tenant id associated with the cache. + :vartype tenant_id: str + :param type_identity_type: The type of identity used for the cache. Possible values include: + "SystemAssigned", "None". + :type type_identity_type: str or ~storage_cache_management_client.models.CacheIdentityType + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True, 'pattern': r'^[-0-9a-zA-Z_]{1,80}$'}, + 'type': {'readonly': True}, + 'system_data': {'readonly': True}, + 'health': {'readonly': True}, + 'mount_addresses': {'readonly': True}, + 'mtu': {'maximum': 1500, 'minimum': 576}, + 'utility_addresses': {'readonly': True}, + 'principal_id': {'readonly': True}, + 'tenant_id': {'readonly': True}, + } + + _attribute_map = { + 'tags': {'key': 'tags', 'type': 'object'}, + 'id': {'key': 'id', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, + 'cache_size_gb': {'key': 'properties.cacheSizeGB', 'type': 'int'}, + 'health': {'key': 'properties.health', 'type': 'CacheHealth'}, + 'mount_addresses': {'key': 'properties.mountAddresses', 'type': '[str]'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'subnet': {'key': 'properties.subnet', 'type': 'str'}, + 'upgrade_status': {'key': 'properties.upgradeStatus', 'type': 'CacheUpgradeStatus'}, + 'root_squash': {'key': 'securitySettings.rootSquash', 'type': 'bool'}, + 'key_encryption_key': {'key': 'encryptionSettings.keyEncryptionKey', 'type': 'KeyVaultKeyReference'}, + 'mtu': {'key': 'networkSettings.mtu', 'type': 'int'}, + 'utility_addresses': {'key': 'networkSettings.utilityAddresses', 'type': '[str]'}, + 'name_sku_name': {'key': 'sku.name', 'type': 'str'}, + 'principal_id': {'key': 'identity.principalId', 'type': 'str'}, + 'tenant_id': {'key': 'identity.tenantId', 'type': 'str'}, + 'type_identity_type': {'key': 'identity.type', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(Cache, self).__init__(**kwargs) + self.tags = kwargs.get('tags', None) + self.id = None + self.location = kwargs.get('location', None) + self.name = None + self.type = None + self.system_data = None + self.cache_size_gb = kwargs.get('cache_size_gb', None) + self.health = None + self.mount_addresses = None + self.provisioning_state = kwargs.get('provisioning_state', None) + self.subnet = kwargs.get('subnet', None) + self.upgrade_status = kwargs.get('upgrade_status', None) + self.root_squash = kwargs.get('root_squash', None) + self.key_encryption_key = kwargs.get('key_encryption_key', None) + self.mtu = kwargs.get('mtu', 1500) + self.utility_addresses = None + self.name_sku_name = kwargs.get('name_sku_name', None) + self.principal_id = None + self.tenant_id = None + self.type_identity_type = kwargs.get('type_identity_type', None) + + +class CacheHealth(msrest.serialization.Model): + """An indication of Cache health. Gives more information about health than just that related to provisioning. + + :param state: List of Cache health states. Possible values include: "Unknown", "Healthy", + "Degraded", "Down", "Transitioning", "Stopping", "Stopped", "Upgrading", "Flushing". + :type state: str or ~storage_cache_management_client.models.HealthStateType + :param status_description: Describes explanation of state. + :type status_description: str + """ + + _attribute_map = { + 'state': {'key': 'state', 'type': 'str'}, + 'status_description': {'key': 'statusDescription', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(CacheHealth, self).__init__(**kwargs) + self.state = kwargs.get('state', None) + self.status_description = kwargs.get('status_description', None) + + +class CachesListResult(msrest.serialization.Model): + """Result of the request to list Caches. It contains a list of Caches and a URL link to get the next set of results. + + :param next_link: URL to get the next set of Cache list results, if there are any. + :type next_link: str + :param value: List of Caches. + :type value: list[~storage_cache_management_client.models.Cache] + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'value': {'key': 'value', 'type': '[Cache]'}, + } + + def __init__( + self, + **kwargs + ): + super(CachesListResult, self).__init__(**kwargs) + self.next_link = kwargs.get('next_link', None) + self.value = kwargs.get('value', None) + + +class CacheUpgradeStatus(msrest.serialization.Model): + """Properties describing the software upgrade state of the Cache. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar current_firmware_version: Version string of the firmware currently installed on this + Cache. + :vartype current_firmware_version: str + :ivar firmware_update_status: True if there is a firmware update ready to install on this + Cache. The firmware will automatically be installed after firmwareUpdateDeadline if not + triggered earlier via the upgrade operation. Possible values include: "available", + "unavailable". + :vartype firmware_update_status: str or + ~storage_cache_management_client.models.FirmwareStatusType + :ivar firmware_update_deadline: Time at which the pending firmware update will automatically be + installed on the Cache. + :vartype firmware_update_deadline: ~datetime.datetime + :ivar last_firmware_update: Time of the last successful firmware update. + :vartype last_firmware_update: ~datetime.datetime + :ivar pending_firmware_version: When firmwareUpdateAvailable is true, this field holds the + version string for the update. + :vartype pending_firmware_version: str + """ + + _validation = { + 'current_firmware_version': {'readonly': True}, + 'firmware_update_status': {'readonly': True}, + 'firmware_update_deadline': {'readonly': True}, + 'last_firmware_update': {'readonly': True}, + 'pending_firmware_version': {'readonly': True}, + } + + _attribute_map = { + 'current_firmware_version': {'key': 'currentFirmwareVersion', 'type': 'str'}, + 'firmware_update_status': {'key': 'firmwareUpdateStatus', 'type': 'str'}, + 'firmware_update_deadline': {'key': 'firmwareUpdateDeadline', 'type': 'iso-8601'}, + 'last_firmware_update': {'key': 'lastFirmwareUpdate', 'type': 'iso-8601'}, + 'pending_firmware_version': {'key': 'pendingFirmwareVersion', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(CacheUpgradeStatus, self).__init__(**kwargs) + self.current_firmware_version = None + self.firmware_update_status = None + self.firmware_update_deadline = None + self.last_firmware_update = None + self.pending_firmware_version = None + + +class ClfsTarget(msrest.serialization.Model): + """Properties pertained to ClfsTarget. + + :param target: Resource ID of storage container. + :type target: str + """ + + _attribute_map = { + 'target': {'key': 'target', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ClfsTarget, self).__init__(**kwargs) + self.target = kwargs.get('target', None) + + +class StorageTargetProperties(msrest.serialization.Model): + """Properties of the Storage Target. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ClfsTargetProperties, Nfs3TargetProperties, UnknownTargetProperties. + + All required parameters must be populated in order to send to Azure. + + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Required. Type of the Storage Target.Constant filled by server. Possible + values include: "nfs3", "clfs", "unknown". + :type target_type: str or ~storage_cache_management_client.models.StorageTargetType + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param clfs: Properties when targetType is clfs. + :type clfs: ~storage_cache_management_client.models.ClfsTarget + :param unknown: Properties when targetType is unknown. + :type unknown: ~storage_cache_management_client.models.UnknownTarget + """ + + _validation = { + 'target_type': {'required': True}, + } + + _attribute_map = { + 'junctions': {'key': 'junctions', 'type': '[NamespaceJunction]'}, + 'target_type': {'key': 'targetType', 'type': 'str'}, + 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, + 'nfs3': {'key': 'nfs3', 'type': 'Nfs3Target'}, + 'clfs': {'key': 'clfs', 'type': 'ClfsTarget'}, + 'unknown': {'key': 'unknown', 'type': 'UnknownTarget'}, + } + + _subtype_map = { + 'target_type': {'clfs': 'ClfsTargetProperties', 'nfs3': 'Nfs3TargetProperties', 'unknown': 'UnknownTargetProperties'} + } + + def __init__( + self, + **kwargs + ): + super(StorageTargetProperties, self).__init__(**kwargs) + self.junctions = kwargs.get('junctions', None) + self.target_type = None # type: Optional[str] + self.provisioning_state = kwargs.get('provisioning_state', None) + self.nfs3 = kwargs.get('nfs3', None) + self.clfs = kwargs.get('clfs', None) + self.unknown = kwargs.get('unknown', None) + + +class ClfsTargetProperties(StorageTargetProperties): + """Storage container for use as a CLFS Storage Target. + + All required parameters must be populated in order to send to Azure. + + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Required. Type of the Storage Target.Constant filled by server. Possible + values include: "nfs3", "clfs", "unknown". + :type target_type: str or ~storage_cache_management_client.models.StorageTargetType + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param clfs: Properties when targetType is clfs. + :type clfs: ~storage_cache_management_client.models.ClfsTarget + :param unknown: Properties when targetType is unknown. + :type unknown: ~storage_cache_management_client.models.UnknownTarget + """ + + _validation = { + 'target_type': {'required': True}, + } + + _attribute_map = { + 'junctions': {'key': 'junctions', 'type': '[NamespaceJunction]'}, + 'target_type': {'key': 'targetType', 'type': 'str'}, + 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, + 'nfs3': {'key': 'nfs3', 'type': 'Nfs3Target'}, + 'clfs': {'key': 'clfs', 'type': 'ClfsTarget'}, + 'unknown': {'key': 'unknown', 'type': 'UnknownTarget'}, + } + + def __init__( + self, + **kwargs + ): + super(ClfsTargetProperties, self).__init__(**kwargs) + self.target_type = 'clfs' # type: str + + +class CloudErrorBody(msrest.serialization.Model): + """An error response. + + :param code: An identifier for the error. Codes are invariant and are intended to be consumed + programmatically. + :type code: str + :param details: A list of additional details about the error. + :type details: list[~storage_cache_management_client.models.CloudErrorBody] + :param message: A message describing the error, intended to be suitable for display in a user + interface. + :type message: str + :param target: The target of the particular error. For example, the name of the property in + error. + :type target: str + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[CloudErrorBody]'}, + 'message': {'key': 'message', 'type': 'str'}, + 'target': {'key': 'target', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(CloudErrorBody, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.details = kwargs.get('details', None) + self.message = kwargs.get('message', None) + self.target = kwargs.get('target', None) + + +class ErrorResponse(msrest.serialization.Model): + """Describes the format of Error response. + + :param code: Error code. + :type code: str + :param message: Error message indicating why the operation failed. + :type message: str + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ErrorResponse, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + + +class KeyVaultKeyReference(msrest.serialization.Model): + """Describes a reference to Key Vault Key. + + All required parameters must be populated in order to send to Azure. + + :param key_url: Required. The URL referencing a key encryption key in Key Vault. + :type key_url: str + :param source_vault: Required. Describes a resource Id to source Key Vault. + :type source_vault: ~storage_cache_management_client.models.KeyVaultKeyReferenceSourceVault + """ + + _validation = { + 'key_url': {'required': True}, + 'source_vault': {'required': True}, + } + + _attribute_map = { + 'key_url': {'key': 'keyUrl', 'type': 'str'}, + 'source_vault': {'key': 'sourceVault', 'type': 'KeyVaultKeyReferenceSourceVault'}, + } + + def __init__( + self, + **kwargs + ): + super(KeyVaultKeyReference, self).__init__(**kwargs) + self.key_url = kwargs['key_url'] + self.source_vault = kwargs['source_vault'] + + +class KeyVaultKeyReferenceSourceVault(msrest.serialization.Model): + """Describes a resource Id to source Key Vault. + + :param id: Resource Id. + :type id: str + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(KeyVaultKeyReferenceSourceVault, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + + +class MetricDimension(msrest.serialization.Model): + """Specifications of the Dimension of metrics. + + :param name: Name of the dimension. + :type name: str + :param display_name: Localized friendly display name of the dimension. + :type display_name: str + :param internal_name: Internal name of the dimension. + :type internal_name: str + :param to_be_exported_for_shoebox: To be exported to shoe box. + :type to_be_exported_for_shoebox: bool + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'internal_name': {'key': 'internalName', 'type': 'str'}, + 'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(MetricDimension, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.display_name = kwargs.get('display_name', None) + self.internal_name = kwargs.get('internal_name', None) + self.to_be_exported_for_shoebox = kwargs.get('to_be_exported_for_shoebox', None) + + +class MetricSpecification(msrest.serialization.Model): + """Details about operation related to metrics. + + :param name: The name of the metric. + :type name: str + :param display_name: Localized display name of the metric. + :type display_name: str + :param display_description: The description of the metric. + :type display_description: str + :param unit: The unit that the metric is measured in. + :type unit: str + :param aggregation_type: The type of metric aggregation. + :type aggregation_type: str + :param supported_aggregation_types: Support metric aggregation type. + :type supported_aggregation_types: list[str or + ~storage_cache_management_client.models.MetricAggregationType] + :param metric_class: Type of metrics. + :type metric_class: str + :param dimensions: Dimensions of the metric. + :type dimensions: list[~storage_cache_management_client.models.MetricDimension] + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'display_description': {'key': 'displayDescription', 'type': 'str'}, + 'unit': {'key': 'unit', 'type': 'str'}, + 'aggregation_type': {'key': 'aggregationType', 'type': 'str'}, + 'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'}, + 'metric_class': {'key': 'metricClass', 'type': 'str'}, + 'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'}, + } + + def __init__( + self, + **kwargs + ): + super(MetricSpecification, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.display_name = kwargs.get('display_name', None) + self.display_description = kwargs.get('display_description', None) + self.unit = kwargs.get('unit', None) + self.aggregation_type = kwargs.get('aggregation_type', None) + self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None) + self.metric_class = kwargs.get('metric_class', None) + self.dimensions = kwargs.get('dimensions', None) + + +class NamespaceJunction(msrest.serialization.Model): + """A namespace junction. + + :param namespace_path: Namespace path on a Cache for a Storage Target. + :type namespace_path: str + :param target_path: Path in Storage Target to which namespacePath points. + :type target_path: str + :param nfs_export: NFS export where targetPath exists. + :type nfs_export: str + """ + + _attribute_map = { + 'namespace_path': {'key': 'namespacePath', 'type': 'str'}, + 'target_path': {'key': 'targetPath', 'type': 'str'}, + 'nfs_export': {'key': 'nfsExport', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(NamespaceJunction, self).__init__(**kwargs) + self.namespace_path = kwargs.get('namespace_path', None) + self.target_path = kwargs.get('target_path', None) + self.nfs_export = kwargs.get('nfs_export', None) + + +class Nfs3Target(msrest.serialization.Model): + """Properties pertained to Nfs3Target. + + :param target: IP address or host name of an NFSv3 host (e.g., 10.0.44.44). + :type target: str + :param usage_model: Identifies the primary usage model to be used for this Storage Target. Get + choices from .../usageModels. + :type usage_model: str + """ + + _validation = { + 'target': {'pattern': r'^[-.0-9a-zA-Z]+$'}, + } + + _attribute_map = { + 'target': {'key': 'target', 'type': 'str'}, + 'usage_model': {'key': 'usageModel', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(Nfs3Target, self).__init__(**kwargs) + self.target = kwargs.get('target', None) + self.usage_model = kwargs.get('usage_model', None) + + +class Nfs3TargetProperties(StorageTargetProperties): + """An NFSv3 mount point for use as a Storage Target. + + All required parameters must be populated in order to send to Azure. + + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Required. Type of the Storage Target.Constant filled by server. Possible + values include: "nfs3", "clfs", "unknown". + :type target_type: str or ~storage_cache_management_client.models.StorageTargetType + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param clfs: Properties when targetType is clfs. + :type clfs: ~storage_cache_management_client.models.ClfsTarget + :param unknown: Properties when targetType is unknown. + :type unknown: ~storage_cache_management_client.models.UnknownTarget + """ + + _validation = { + 'target_type': {'required': True}, + } + + _attribute_map = { + 'junctions': {'key': 'junctions', 'type': '[NamespaceJunction]'}, + 'target_type': {'key': 'targetType', 'type': 'str'}, + 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, + 'nfs3': {'key': 'nfs3', 'type': 'Nfs3Target'}, + 'clfs': {'key': 'clfs', 'type': 'ClfsTarget'}, + 'unknown': {'key': 'unknown', 'type': 'UnknownTarget'}, + } + + def __init__( + self, + **kwargs + ): + super(Nfs3TargetProperties, self).__init__(**kwargs) + self.target_type = 'nfs3' # type: str + + +class ResourceSku(msrest.serialization.Model): + """A resource SKU. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar resource_type: The type of resource the SKU applies to. + :vartype resource_type: str + :param capabilities: A list of capabilities of this SKU, such as throughput or ops/sec. + :type capabilities: list[~storage_cache_management_client.models.ResourceSkuCapabilities] + :ivar locations: The set of locations that the SKU is available. This will be supported and + registered Azure Geo Regions (e.g., West US, East US, Southeast Asia, etc.). + :vartype locations: list[str] + :param location_info: The set of locations that the SKU is available. + :type location_info: list[~storage_cache_management_client.models.ResourceSkuLocationInfo] + :param name: The name of this SKU. + :type name: str + :param restrictions: The restrictions preventing this SKU from being used. This is empty if + there are no restrictions. + :type restrictions: list[~storage_cache_management_client.models.Restriction] + """ + + _validation = { + 'resource_type': {'readonly': True}, + 'locations': {'readonly': True}, + } + + _attribute_map = { + 'resource_type': {'key': 'resourceType', 'type': 'str'}, + 'capabilities': {'key': 'capabilities', 'type': '[ResourceSkuCapabilities]'}, + 'locations': {'key': 'locations', 'type': '[str]'}, + 'location_info': {'key': 'locationInfo', 'type': '[ResourceSkuLocationInfo]'}, + 'name': {'key': 'name', 'type': 'str'}, + 'restrictions': {'key': 'restrictions', 'type': '[Restriction]'}, + } + + def __init__( + self, + **kwargs + ): + super(ResourceSku, self).__init__(**kwargs) + self.resource_type = None + self.capabilities = kwargs.get('capabilities', None) + self.locations = None + self.location_info = kwargs.get('location_info', None) + self.name = kwargs.get('name', None) + self.restrictions = kwargs.get('restrictions', None) + + +class ResourceSkuCapabilities(msrest.serialization.Model): + """A resource SKU capability. + + :param name: Name of a capability, such as ops/sec. + :type name: str + :param value: Quantity, if the capability is measured by quantity. + :type value: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ResourceSkuCapabilities, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.value = kwargs.get('value', None) + + +class ResourceSkuLocationInfo(msrest.serialization.Model): + """Resource SKU location information. + + :param location: Location where this SKU is available. + :type location: str + :param zones: Zones if any. + :type zones: list[str] + """ + + _attribute_map = { + 'location': {'key': 'location', 'type': 'str'}, + 'zones': {'key': 'zones', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(ResourceSkuLocationInfo, self).__init__(**kwargs) + self.location = kwargs.get('location', None) + self.zones = kwargs.get('zones', None) + + +class ResourceSkusResult(msrest.serialization.Model): + """The response from the List Cache SKUs operation. + + Variables are only populated by the server, and will be ignored when sending a request. + + :param next_link: The URI to fetch the next page of Cache SKUs. + :type next_link: str + :ivar value: The list of SKUs available for the subscription. + :vartype value: list[~storage_cache_management_client.models.ResourceSku] + """ + + _validation = { + 'value': {'readonly': True}, + } + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'value': {'key': 'value', 'type': '[ResourceSku]'}, + } + + def __init__( + self, + **kwargs + ): + super(ResourceSkusResult, self).__init__(**kwargs) + self.next_link = kwargs.get('next_link', None) + self.value = None + + +class Restriction(msrest.serialization.Model): + """The restrictions preventing this SKU from being used. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar type: The type of restrictions. In this version, the only possible value for this is + location. + :vartype type: str + :ivar values: The value of restrictions. If the restriction type is set to location, then this + would be the different locations where the SKU is restricted. + :vartype values: list[str] + :param reason_code: The reason for the restriction. As of now this can be "QuotaId" or + "NotAvailableForSubscription". "QuotaId" is set when the SKU has requiredQuotas parameter as + the subscription does not belong to that quota. "NotAvailableForSubscription" is related to + capacity at the datacenter. Possible values include: "QuotaId", "NotAvailableForSubscription". + :type reason_code: str or ~storage_cache_management_client.models.ReasonCode + """ + + _validation = { + 'type': {'readonly': True}, + 'values': {'readonly': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[str]'}, + 'reason_code': {'key': 'reasonCode', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(Restriction, self).__init__(**kwargs) + self.type = None + self.values = None + self.reason_code = kwargs.get('reason_code', None) + + +class StorageTargetResource(msrest.serialization.Model): + """Resource used by a Cache. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar name: Name of the Storage Target. + :vartype name: str + :ivar id: Resource ID of the Storage Target. + :vartype id: str + :ivar type: Type of the Storage Target; Microsoft.StorageCache/Cache/StorageTarget. + :vartype type: str + :ivar location: Region name string. + :vartype location: str + :ivar system_data: The system meta data relating to this resource. + :vartype system_data: ~storage_cache_management_client.models.SystemData + """ + + _validation = { + 'name': {'readonly': True, 'pattern': r'^[-0-9a-zA-Z_]{1,80}$'}, + 'id': {'readonly': True}, + 'type': {'readonly': True}, + 'location': {'readonly': True}, + 'system_data': {'readonly': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'id': {'key': 'id', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, + } + + def __init__( + self, + **kwargs + ): + super(StorageTargetResource, self).__init__(**kwargs) + self.name = None + self.id = None + self.type = None + self.location = None + self.system_data = None + + +class StorageTarget(StorageTargetResource): + """Type of the Storage Target. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar name: Name of the Storage Target. + :vartype name: str + :ivar id: Resource ID of the Storage Target. + :vartype id: str + :ivar type: Type of the Storage Target; Microsoft.StorageCache/Cache/StorageTarget. + :vartype type: str + :ivar location: Region name string. + :vartype location: str + :ivar system_data: The system meta data relating to this resource. + :vartype system_data: ~storage_cache_management_client.models.SystemData + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Type of the Storage Target.Constant filled by server. Possible values + include: "nfs3", "clfs", "unknown". + :type target_type: str or ~storage_cache_management_client.models.StorageTargetType + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param unknown_map: Dictionary of string->string pairs containing information about the Storage + Target. + :type unknown_map: dict[str, str] + :param target: Resource ID of storage container. + :type target: str + """ + + _validation = { + 'name': {'readonly': True, 'pattern': r'^[-0-9a-zA-Z_]{1,80}$'}, + 'id': {'readonly': True}, + 'type': {'readonly': True}, + 'location': {'readonly': True}, + 'system_data': {'readonly': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'id': {'key': 'id', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, + 'junctions': {'key': 'properties.junctions', 'type': '[NamespaceJunction]'}, + 'target_type': {'key': 'properties.targetType', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'nfs3': {'key': 'properties.nfs3', 'type': 'Nfs3Target'}, + 'unknown_map': {'key': 'unknown.unknownMap', 'type': '{str}'}, + 'target': {'key': 'clfs.target', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(StorageTarget, self).__init__(**kwargs) + self.junctions = kwargs.get('junctions', None) + self.target_type = None # type: Optional[str] + self.provisioning_state = kwargs.get('provisioning_state', None) + self.nfs3 = kwargs.get('nfs3', None) + self.unknown_map = kwargs.get('unknown_map', None) + self.target = kwargs.get('target', None) + + +class StorageTargetsResult(msrest.serialization.Model): + """A list of Storage Targets. + + :param next_link: The URI to fetch the next page of Storage Targets. + :type next_link: str + :param value: The list of Storage Targets defined for the Cache. + :type value: list[~storage_cache_management_client.models.StorageTarget] + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'value': {'key': 'value', 'type': '[StorageTarget]'}, + } + + def __init__( + self, + **kwargs + ): + super(StorageTargetsResult, self).__init__(**kwargs) + self.next_link = kwargs.get('next_link', None) + self.value = kwargs.get('value', None) + + +class SystemData(msrest.serialization.Model): + """Metadata pertaining to creation and last modification of the resource. + + :param created_by: The identity that created the resource. + :type created_by: str + :param created_by_type: The type of identity that created the resource. Possible values + include: "User", "Application", "ManagedIdentity", "Key". + :type created_by_type: str or ~storage_cache_management_client.models.CreatedByType + :param created_at: The timestamp of resource creation (UTC). + :type created_at: ~datetime.datetime + :param last_modified_by: The identity that last modified the resource. + :type last_modified_by: str + :param last_modified_by_type: The type of identity that last modified the resource. Possible + values include: "User", "Application", "ManagedIdentity", "Key". + :type last_modified_by_type: str or ~storage_cache_management_client.models.CreatedByType + :param last_modified_at: The type of identity that last modified the resource. + :type last_modified_at: ~datetime.datetime + """ + + _attribute_map = { + 'created_by': {'key': 'createdBy', 'type': 'str'}, + 'created_by_type': {'key': 'createdByType', 'type': 'str'}, + 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, + 'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'}, + 'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'}, + 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, + } + + def __init__( + self, + **kwargs + ): + super(SystemData, self).__init__(**kwargs) + self.created_by = kwargs.get('created_by', None) + self.created_by_type = kwargs.get('created_by_type', None) + self.created_at = kwargs.get('created_at', None) + self.last_modified_by = kwargs.get('last_modified_by', None) + self.last_modified_by_type = kwargs.get('last_modified_by_type', None) + self.last_modified_at = kwargs.get('last_modified_at', None) + + +class UnknownTarget(msrest.serialization.Model): + """Properties pertained to UnknownTarget. + + :param unknown_map: Dictionary of string->string pairs containing information about the Storage + Target. + :type unknown_map: dict[str, str] + """ + + _attribute_map = { + 'unknown_map': {'key': 'unknownMap', 'type': '{str}'}, + } + + def __init__( + self, + **kwargs + ): + super(UnknownTarget, self).__init__(**kwargs) + self.unknown_map = kwargs.get('unknown_map', None) + + +class UnknownTargetProperties(StorageTargetProperties): + """Storage container for use as an Unknown Storage Target. + + All required parameters must be populated in order to send to Azure. + + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Required. Type of the Storage Target.Constant filled by server. Possible + values include: "nfs3", "clfs", "unknown". + :type target_type: str or ~storage_cache_management_client.models.StorageTargetType + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param clfs: Properties when targetType is clfs. + :type clfs: ~storage_cache_management_client.models.ClfsTarget + :param unknown: Properties when targetType is unknown. + :type unknown: ~storage_cache_management_client.models.UnknownTarget + """ + + _validation = { + 'target_type': {'required': True}, + } + + _attribute_map = { + 'junctions': {'key': 'junctions', 'type': '[NamespaceJunction]'}, + 'target_type': {'key': 'targetType', 'type': 'str'}, + 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, + 'nfs3': {'key': 'nfs3', 'type': 'Nfs3Target'}, + 'clfs': {'key': 'clfs', 'type': 'ClfsTarget'}, + 'unknown': {'key': 'unknown', 'type': 'UnknownTarget'}, + } + + def __init__( + self, + **kwargs + ): + super(UnknownTargetProperties, self).__init__(**kwargs) + self.target_type = 'unknown' # type: str + + +class UsageModel(msrest.serialization.Model): + """A usage model. + + :param display: Localized information describing this usage model. + :type display: ~storage_cache_management_client.models.UsageModelDisplay + :param model_name: Non-localized keyword name for this usage model. + :type model_name: str + :param target_type: The type of Storage Target to which this model is applicable (only nfs3 as + of this version). + :type target_type: str + """ + + _attribute_map = { + 'display': {'key': 'display', 'type': 'UsageModelDisplay'}, + 'model_name': {'key': 'modelName', 'type': 'str'}, + 'target_type': {'key': 'targetType', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(UsageModel, self).__init__(**kwargs) + self.display = kwargs.get('display', None) + self.model_name = kwargs.get('model_name', None) + self.target_type = kwargs.get('target_type', None) + + +class UsageModelDisplay(msrest.serialization.Model): + """Localized information describing this usage model. + + :param description: String to display for this usage model. + :type description: str + """ + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(UsageModelDisplay, self).__init__(**kwargs) + self.description = kwargs.get('description', None) + + +class UsageModelsResult(msrest.serialization.Model): + """A list of Cache usage models. + + :param next_link: The URI to fetch the next page of Cache usage models. + :type next_link: str + :param value: The list of usage models available for the subscription. + :type value: list[~storage_cache_management_client.models.UsageModel] + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'value': {'key': 'value', 'type': '[UsageModel]'}, + } + + def __init__( + self, + **kwargs + ): + super(UsageModelsResult, self).__init__(**kwargs) + self.next_link = kwargs.get('next_link', None) + self.value = kwargs.get('value', None) diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_models_py3.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_models_py3.py new file mode 100644 index 00000000000..262be8e3eb4 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_models_py3.py @@ -0,0 +1,1391 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import datetime +from typing import Dict, List, Optional, Union + +import msrest.serialization + +from ._storage_cache_management_client_enums import * + + +class ApiOperation(msrest.serialization.Model): + """REST API operation description: see https://github.com/Azure/azure-rest-api-specs/blob/master/documentation/openapi-authoring-automated-guidelines.md#r3023-operationsapiimplementation. + + :param display: The object that represents the operation. + :type display: ~storage_cache_management_client.models.ApiOperationDisplay + :param origin: Origin of the operation. + :type origin: str + :param is_data_action: The flag that indicates whether the operation applies to data plane. + :type is_data_action: bool + :param name: Operation name: {provider}/{resource}/{operation}. + :type name: str + :param service_specification: Specification of the all the metrics provided for a resource + type. + :type service_specification: + ~storage_cache_management_client.models.ApiOperationPropertiesServiceSpecification + """ + + _attribute_map = { + 'display': {'key': 'display', 'type': 'ApiOperationDisplay'}, + 'origin': {'key': 'origin', 'type': 'str'}, + 'is_data_action': {'key': 'isDataAction', 'type': 'bool'}, + 'name': {'key': 'name', 'type': 'str'}, + 'service_specification': {'key': 'properties.serviceSpecification', 'type': 'ApiOperationPropertiesServiceSpecification'}, + } + + def __init__( + self, + *, + display: Optional["ApiOperationDisplay"] = None, + origin: Optional[str] = None, + is_data_action: Optional[bool] = None, + name: Optional[str] = None, + service_specification: Optional["ApiOperationPropertiesServiceSpecification"] = None, + **kwargs + ): + super(ApiOperation, self).__init__(**kwargs) + self.display = display + self.origin = origin + self.is_data_action = is_data_action + self.name = name + self.service_specification = service_specification + + +class ApiOperationDisplay(msrest.serialization.Model): + """The object that represents the operation. + + :param operation: Operation type: Read, write, delete, etc. + :type operation: str + :param provider: Service provider: Microsoft.StorageCache. + :type provider: str + :param resource: Resource on which the operation is performed: Cache, etc. + :type resource: str + :param description: The description of the operation. + :type description: str + """ + + _attribute_map = { + 'operation': {'key': 'operation', 'type': 'str'}, + 'provider': {'key': 'provider', 'type': 'str'}, + 'resource': {'key': 'resource', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + } + + def __init__( + self, + *, + operation: Optional[str] = None, + provider: Optional[str] = None, + resource: Optional[str] = None, + description: Optional[str] = None, + **kwargs + ): + super(ApiOperationDisplay, self).__init__(**kwargs) + self.operation = operation + self.provider = provider + self.resource = resource + self.description = description + + +class ApiOperationListResult(msrest.serialization.Model): + """Result of the request to list Resource Provider operations. It contains a list of operations and a URL link to get the next set of results. + + :param next_link: URL to get the next set of operation list results if there are any. + :type next_link: str + :param value: List of Resource Provider operations supported by the Microsoft.StorageCache + resource provider. + :type value: list[~storage_cache_management_client.models.ApiOperation] + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'value': {'key': 'value', 'type': '[ApiOperation]'}, + } + + def __init__( + self, + *, + next_link: Optional[str] = None, + value: Optional[List["ApiOperation"]] = None, + **kwargs + ): + super(ApiOperationListResult, self).__init__(**kwargs) + self.next_link = next_link + self.value = value + + +class ApiOperationPropertiesServiceSpecification(msrest.serialization.Model): + """Specification of the all the metrics provided for a resource type. + + :param metric_specifications: Details about operations related to metrics. + :type metric_specifications: list[~storage_cache_management_client.models.MetricSpecification] + """ + + _attribute_map = { + 'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'}, + } + + def __init__( + self, + *, + metric_specifications: Optional[List["MetricSpecification"]] = None, + **kwargs + ): + super(ApiOperationPropertiesServiceSpecification, self).__init__(**kwargs) + self.metric_specifications = metric_specifications + + +class ASCOperation(msrest.serialization.Model): + """The status of operation. + + :param id: The operation Id. + :type id: str + :param name: The operation name. + :type name: str + :param start_time: The start time of the operation. + :type start_time: str + :param end_time: The end time of the operation. + :type end_time: str + :param status: The status of the operation. + :type status: str + :param error: The error detail of the operation if any. + :type error: ~storage_cache_management_client.models.ErrorResponse + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'str'}, + 'end_time': {'key': 'endTime', 'type': 'str'}, + 'status': {'key': 'status', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'ErrorResponse'}, + } + + def __init__( + self, + *, + id: Optional[str] = None, + name: Optional[str] = None, + start_time: Optional[str] = None, + end_time: Optional[str] = None, + status: Optional[str] = None, + error: Optional["ErrorResponse"] = None, + **kwargs + ): + super(ASCOperation, self).__init__(**kwargs) + self.id = id + self.name = name + self.start_time = start_time + self.end_time = end_time + self.status = status + self.error = error + + +class Cache(msrest.serialization.Model): + """A Cache instance. Follows Azure Resource Manager standards: https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/resource-api-reference.md. + + Variables are only populated by the server, and will be ignored when sending a request. + + :param tags: A set of tags. ARM tags as name/value pairs. + :type tags: object + :ivar id: Resource ID of the Cache. + :vartype id: str + :param location: Region name string. + :type location: str + :ivar name: Name of Cache. + :vartype name: str + :ivar type: Type of the Cache; Microsoft.StorageCache/Cache. + :vartype type: str + :ivar system_data: The system meta data relating to this resource. + :vartype system_data: ~storage_cache_management_client.models.SystemData + :param cache_size_gb: The size of this Cache, in GB. + :type cache_size_gb: int + :ivar health: Health of the Cache. + :vartype health: ~storage_cache_management_client.models.CacheHealth + :ivar mount_addresses: Array of IP addresses that can be used by clients mounting this Cache. + :vartype mount_addresses: list[str] + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param subnet: Subnet used for the Cache. + :type subnet: str + :param upgrade_status: Upgrade status of the Cache. + :type upgrade_status: ~storage_cache_management_client.models.CacheUpgradeStatus + :param root_squash: root squash of cache property. + :type root_squash: bool + :param key_encryption_key: Specifies the location of the key encryption key in Key Vault. + :type key_encryption_key: ~storage_cache_management_client.models.KeyVaultKeyReference + :param mtu: The IPv4 maximum transmission unit configured for the subnet. + :type mtu: int + :ivar utility_addresses: Array of additional IP addresses used by this Cache. + :vartype utility_addresses: list[str] + :param name_sku_name: SKU name for this Cache. + :type name_sku_name: str + :ivar principal_id: The principal id of the cache. + :vartype principal_id: str + :ivar tenant_id: The tenant id associated with the cache. + :vartype tenant_id: str + :param type_identity_type: The type of identity used for the cache. Possible values include: + "SystemAssigned", "None". + :type type_identity_type: str or ~storage_cache_management_client.models.CacheIdentityType + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True, 'pattern': r'^[-0-9a-zA-Z_]{1,80}$'}, + 'type': {'readonly': True}, + 'system_data': {'readonly': True}, + 'health': {'readonly': True}, + 'mount_addresses': {'readonly': True}, + 'mtu': {'maximum': 1500, 'minimum': 576}, + 'utility_addresses': {'readonly': True}, + 'principal_id': {'readonly': True}, + 'tenant_id': {'readonly': True}, + } + + _attribute_map = { + 'tags': {'key': 'tags', 'type': 'object'}, + 'id': {'key': 'id', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, + 'cache_size_gb': {'key': 'properties.cacheSizeGB', 'type': 'int'}, + 'health': {'key': 'properties.health', 'type': 'CacheHealth'}, + 'mount_addresses': {'key': 'properties.mountAddresses', 'type': '[str]'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'subnet': {'key': 'properties.subnet', 'type': 'str'}, + 'upgrade_status': {'key': 'properties.upgradeStatus', 'type': 'CacheUpgradeStatus'}, + 'root_squash': {'key': 'securitySettings.rootSquash', 'type': 'bool'}, + 'key_encryption_key': {'key': 'encryptionSettings.keyEncryptionKey', 'type': 'KeyVaultKeyReference'}, + 'mtu': {'key': 'networkSettings.mtu', 'type': 'int'}, + 'utility_addresses': {'key': 'networkSettings.utilityAddresses', 'type': '[str]'}, + 'name_sku_name': {'key': 'sku.name', 'type': 'str'}, + 'principal_id': {'key': 'identity.principalId', 'type': 'str'}, + 'tenant_id': {'key': 'identity.tenantId', 'type': 'str'}, + 'type_identity_type': {'key': 'identity.type', 'type': 'str'}, + } + + def __init__( + self, + *, + tags: Optional[object] = None, + location: Optional[str] = None, + cache_size_gb: Optional[int] = None, + provisioning_state: Optional[Union[str, "ProvisioningStateType"]] = None, + subnet: Optional[str] = None, + upgrade_status: Optional["CacheUpgradeStatus"] = None, + root_squash: Optional[bool] = None, + key_encryption_key: Optional["KeyVaultKeyReference"] = None, + mtu: Optional[int] = 1500, + name_sku_name: Optional[str] = None, + type_identity_type: Optional[Union[str, "CacheIdentityType"]] = None, + **kwargs + ): + super(Cache, self).__init__(**kwargs) + self.tags = tags + self.id = None + self.location = location + self.name = None + self.type = None + self.system_data = None + self.cache_size_gb = cache_size_gb + self.health = None + self.mount_addresses = None + self.provisioning_state = provisioning_state + self.subnet = subnet + self.upgrade_status = upgrade_status + self.root_squash = root_squash + self.key_encryption_key = key_encryption_key + self.mtu = mtu + self.utility_addresses = None + self.name_sku_name = name_sku_name + self.principal_id = None + self.tenant_id = None + self.type_identity_type = type_identity_type + + +class CacheHealth(msrest.serialization.Model): + """An indication of Cache health. Gives more information about health than just that related to provisioning. + + :param state: List of Cache health states. Possible values include: "Unknown", "Healthy", + "Degraded", "Down", "Transitioning", "Stopping", "Stopped", "Upgrading", "Flushing". + :type state: str or ~storage_cache_management_client.models.HealthStateType + :param status_description: Describes explanation of state. + :type status_description: str + """ + + _attribute_map = { + 'state': {'key': 'state', 'type': 'str'}, + 'status_description': {'key': 'statusDescription', 'type': 'str'}, + } + + def __init__( + self, + *, + state: Optional[Union[str, "HealthStateType"]] = None, + status_description: Optional[str] = None, + **kwargs + ): + super(CacheHealth, self).__init__(**kwargs) + self.state = state + self.status_description = status_description + + +class CachesListResult(msrest.serialization.Model): + """Result of the request to list Caches. It contains a list of Caches and a URL link to get the next set of results. + + :param next_link: URL to get the next set of Cache list results, if there are any. + :type next_link: str + :param value: List of Caches. + :type value: list[~storage_cache_management_client.models.Cache] + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'value': {'key': 'value', 'type': '[Cache]'}, + } + + def __init__( + self, + *, + next_link: Optional[str] = None, + value: Optional[List["Cache"]] = None, + **kwargs + ): + super(CachesListResult, self).__init__(**kwargs) + self.next_link = next_link + self.value = value + + +class CacheUpgradeStatus(msrest.serialization.Model): + """Properties describing the software upgrade state of the Cache. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar current_firmware_version: Version string of the firmware currently installed on this + Cache. + :vartype current_firmware_version: str + :ivar firmware_update_status: True if there is a firmware update ready to install on this + Cache. The firmware will automatically be installed after firmwareUpdateDeadline if not + triggered earlier via the upgrade operation. Possible values include: "available", + "unavailable". + :vartype firmware_update_status: str or + ~storage_cache_management_client.models.FirmwareStatusType + :ivar firmware_update_deadline: Time at which the pending firmware update will automatically be + installed on the Cache. + :vartype firmware_update_deadline: ~datetime.datetime + :ivar last_firmware_update: Time of the last successful firmware update. + :vartype last_firmware_update: ~datetime.datetime + :ivar pending_firmware_version: When firmwareUpdateAvailable is true, this field holds the + version string for the update. + :vartype pending_firmware_version: str + """ + + _validation = { + 'current_firmware_version': {'readonly': True}, + 'firmware_update_status': {'readonly': True}, + 'firmware_update_deadline': {'readonly': True}, + 'last_firmware_update': {'readonly': True}, + 'pending_firmware_version': {'readonly': True}, + } + + _attribute_map = { + 'current_firmware_version': {'key': 'currentFirmwareVersion', 'type': 'str'}, + 'firmware_update_status': {'key': 'firmwareUpdateStatus', 'type': 'str'}, + 'firmware_update_deadline': {'key': 'firmwareUpdateDeadline', 'type': 'iso-8601'}, + 'last_firmware_update': {'key': 'lastFirmwareUpdate', 'type': 'iso-8601'}, + 'pending_firmware_version': {'key': 'pendingFirmwareVersion', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(CacheUpgradeStatus, self).__init__(**kwargs) + self.current_firmware_version = None + self.firmware_update_status = None + self.firmware_update_deadline = None + self.last_firmware_update = None + self.pending_firmware_version = None + + +class ClfsTarget(msrest.serialization.Model): + """Properties pertained to ClfsTarget. + + :param target: Resource ID of storage container. + :type target: str + """ + + _attribute_map = { + 'target': {'key': 'target', 'type': 'str'}, + } + + def __init__( + self, + *, + target: Optional[str] = None, + **kwargs + ): + super(ClfsTarget, self).__init__(**kwargs) + self.target = target + + +class StorageTargetProperties(msrest.serialization.Model): + """Properties of the Storage Target. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ClfsTargetProperties, Nfs3TargetProperties, UnknownTargetProperties. + + All required parameters must be populated in order to send to Azure. + + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Required. Type of the Storage Target.Constant filled by server. Possible + values include: "nfs3", "clfs", "unknown". + :type target_type: str or ~storage_cache_management_client.models.StorageTargetType + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param clfs: Properties when targetType is clfs. + :type clfs: ~storage_cache_management_client.models.ClfsTarget + :param unknown: Properties when targetType is unknown. + :type unknown: ~storage_cache_management_client.models.UnknownTarget + """ + + _validation = { + 'target_type': {'required': True}, + } + + _attribute_map = { + 'junctions': {'key': 'junctions', 'type': '[NamespaceJunction]'}, + 'target_type': {'key': 'targetType', 'type': 'str'}, + 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, + 'nfs3': {'key': 'nfs3', 'type': 'Nfs3Target'}, + 'clfs': {'key': 'clfs', 'type': 'ClfsTarget'}, + 'unknown': {'key': 'unknown', 'type': 'UnknownTarget'}, + } + + _subtype_map = { + 'target_type': {'clfs': 'ClfsTargetProperties', 'nfs3': 'Nfs3TargetProperties', 'unknown': 'UnknownTargetProperties'} + } + + def __init__( + self, + *, + junctions: Optional[List["NamespaceJunction"]] = None, + provisioning_state: Optional[Union[str, "ProvisioningStateType"]] = None, + nfs3: Optional["Nfs3Target"] = None, + clfs: Optional["ClfsTarget"] = None, + unknown: Optional["UnknownTarget"] = None, + **kwargs + ): + super(StorageTargetProperties, self).__init__(**kwargs) + self.junctions = junctions + self.target_type = None # type: Optional[str] + self.provisioning_state = provisioning_state + self.nfs3 = nfs3 + self.clfs = clfs + self.unknown = unknown + + +class ClfsTargetProperties(StorageTargetProperties): + """Storage container for use as a CLFS Storage Target. + + All required parameters must be populated in order to send to Azure. + + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Required. Type of the Storage Target.Constant filled by server. Possible + values include: "nfs3", "clfs", "unknown". + :type target_type: str or ~storage_cache_management_client.models.StorageTargetType + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param clfs: Properties when targetType is clfs. + :type clfs: ~storage_cache_management_client.models.ClfsTarget + :param unknown: Properties when targetType is unknown. + :type unknown: ~storage_cache_management_client.models.UnknownTarget + """ + + _validation = { + 'target_type': {'required': True}, + } + + _attribute_map = { + 'junctions': {'key': 'junctions', 'type': '[NamespaceJunction]'}, + 'target_type': {'key': 'targetType', 'type': 'str'}, + 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, + 'nfs3': {'key': 'nfs3', 'type': 'Nfs3Target'}, + 'clfs': {'key': 'clfs', 'type': 'ClfsTarget'}, + 'unknown': {'key': 'unknown', 'type': 'UnknownTarget'}, + } + + def __init__( + self, + *, + junctions: Optional[List["NamespaceJunction"]] = None, + provisioning_state: Optional[Union[str, "ProvisioningStateType"]] = None, + nfs3: Optional["Nfs3Target"] = None, + clfs: Optional["ClfsTarget"] = None, + unknown: Optional["UnknownTarget"] = None, + **kwargs + ): + super(ClfsTargetProperties, self).__init__(junctions=junctions, provisioning_state=provisioning_state, nfs3=nfs3, clfs=clfs, unknown=unknown, **kwargs) + self.target_type = 'clfs' # type: str + + +class CloudErrorBody(msrest.serialization.Model): + """An error response. + + :param code: An identifier for the error. Codes are invariant and are intended to be consumed + programmatically. + :type code: str + :param details: A list of additional details about the error. + :type details: list[~storage_cache_management_client.models.CloudErrorBody] + :param message: A message describing the error, intended to be suitable for display in a user + interface. + :type message: str + :param target: The target of the particular error. For example, the name of the property in + error. + :type target: str + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[CloudErrorBody]'}, + 'message': {'key': 'message', 'type': 'str'}, + 'target': {'key': 'target', 'type': 'str'}, + } + + def __init__( + self, + *, + code: Optional[str] = None, + details: Optional[List["CloudErrorBody"]] = None, + message: Optional[str] = None, + target: Optional[str] = None, + **kwargs + ): + super(CloudErrorBody, self).__init__(**kwargs) + self.code = code + self.details = details + self.message = message + self.target = target + + +class ErrorResponse(msrest.serialization.Model): + """Describes the format of Error response. + + :param code: Error code. + :type code: str + :param message: Error message indicating why the operation failed. + :type message: str + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + } + + def __init__( + self, + *, + code: Optional[str] = None, + message: Optional[str] = None, + **kwargs + ): + super(ErrorResponse, self).__init__(**kwargs) + self.code = code + self.message = message + + +class KeyVaultKeyReference(msrest.serialization.Model): + """Describes a reference to Key Vault Key. + + All required parameters must be populated in order to send to Azure. + + :param key_url: Required. The URL referencing a key encryption key in Key Vault. + :type key_url: str + :param source_vault: Required. Describes a resource Id to source Key Vault. + :type source_vault: ~storage_cache_management_client.models.KeyVaultKeyReferenceSourceVault + """ + + _validation = { + 'key_url': {'required': True}, + 'source_vault': {'required': True}, + } + + _attribute_map = { + 'key_url': {'key': 'keyUrl', 'type': 'str'}, + 'source_vault': {'key': 'sourceVault', 'type': 'KeyVaultKeyReferenceSourceVault'}, + } + + def __init__( + self, + *, + key_url: str, + source_vault: "KeyVaultKeyReferenceSourceVault", + **kwargs + ): + super(KeyVaultKeyReference, self).__init__(**kwargs) + self.key_url = key_url + self.source_vault = source_vault + + +class KeyVaultKeyReferenceSourceVault(msrest.serialization.Model): + """Describes a resource Id to source Key Vault. + + :param id: Resource Id. + :type id: str + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + } + + def __init__( + self, + *, + id: Optional[str] = None, + **kwargs + ): + super(KeyVaultKeyReferenceSourceVault, self).__init__(**kwargs) + self.id = id + + +class MetricDimension(msrest.serialization.Model): + """Specifications of the Dimension of metrics. + + :param name: Name of the dimension. + :type name: str + :param display_name: Localized friendly display name of the dimension. + :type display_name: str + :param internal_name: Internal name of the dimension. + :type internal_name: str + :param to_be_exported_for_shoebox: To be exported to shoe box. + :type to_be_exported_for_shoebox: bool + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'internal_name': {'key': 'internalName', 'type': 'str'}, + 'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'}, + } + + def __init__( + self, + *, + name: Optional[str] = None, + display_name: Optional[str] = None, + internal_name: Optional[str] = None, + to_be_exported_for_shoebox: Optional[bool] = None, + **kwargs + ): + super(MetricDimension, self).__init__(**kwargs) + self.name = name + self.display_name = display_name + self.internal_name = internal_name + self.to_be_exported_for_shoebox = to_be_exported_for_shoebox + + +class MetricSpecification(msrest.serialization.Model): + """Details about operation related to metrics. + + :param name: The name of the metric. + :type name: str + :param display_name: Localized display name of the metric. + :type display_name: str + :param display_description: The description of the metric. + :type display_description: str + :param unit: The unit that the metric is measured in. + :type unit: str + :param aggregation_type: The type of metric aggregation. + :type aggregation_type: str + :param supported_aggregation_types: Support metric aggregation type. + :type supported_aggregation_types: list[str or + ~storage_cache_management_client.models.MetricAggregationType] + :param metric_class: Type of metrics. + :type metric_class: str + :param dimensions: Dimensions of the metric. + :type dimensions: list[~storage_cache_management_client.models.MetricDimension] + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'display_description': {'key': 'displayDescription', 'type': 'str'}, + 'unit': {'key': 'unit', 'type': 'str'}, + 'aggregation_type': {'key': 'aggregationType', 'type': 'str'}, + 'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'}, + 'metric_class': {'key': 'metricClass', 'type': 'str'}, + 'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'}, + } + + def __init__( + self, + *, + name: Optional[str] = None, + display_name: Optional[str] = None, + display_description: Optional[str] = None, + unit: Optional[str] = None, + aggregation_type: Optional[str] = None, + supported_aggregation_types: Optional[List[Union[str, "MetricAggregationType"]]] = None, + metric_class: Optional[str] = None, + dimensions: Optional[List["MetricDimension"]] = None, + **kwargs + ): + super(MetricSpecification, self).__init__(**kwargs) + self.name = name + self.display_name = display_name + self.display_description = display_description + self.unit = unit + self.aggregation_type = aggregation_type + self.supported_aggregation_types = supported_aggregation_types + self.metric_class = metric_class + self.dimensions = dimensions + + +class NamespaceJunction(msrest.serialization.Model): + """A namespace junction. + + :param namespace_path: Namespace path on a Cache for a Storage Target. + :type namespace_path: str + :param target_path: Path in Storage Target to which namespacePath points. + :type target_path: str + :param nfs_export: NFS export where targetPath exists. + :type nfs_export: str + """ + + _attribute_map = { + 'namespace_path': {'key': 'namespacePath', 'type': 'str'}, + 'target_path': {'key': 'targetPath', 'type': 'str'}, + 'nfs_export': {'key': 'nfsExport', 'type': 'str'}, + } + + def __init__( + self, + *, + namespace_path: Optional[str] = None, + target_path: Optional[str] = None, + nfs_export: Optional[str] = None, + **kwargs + ): + super(NamespaceJunction, self).__init__(**kwargs) + self.namespace_path = namespace_path + self.target_path = target_path + self.nfs_export = nfs_export + + +class Nfs3Target(msrest.serialization.Model): + """Properties pertained to Nfs3Target. + + :param target: IP address or host name of an NFSv3 host (e.g., 10.0.44.44). + :type target: str + :param usage_model: Identifies the primary usage model to be used for this Storage Target. Get + choices from .../usageModels. + :type usage_model: str + """ + + _validation = { + 'target': {'pattern': r'^[-.0-9a-zA-Z]+$'}, + } + + _attribute_map = { + 'target': {'key': 'target', 'type': 'str'}, + 'usage_model': {'key': 'usageModel', 'type': 'str'}, + } + + def __init__( + self, + *, + target: Optional[str] = None, + usage_model: Optional[str] = None, + **kwargs + ): + super(Nfs3Target, self).__init__(**kwargs) + self.target = target + self.usage_model = usage_model + + +class Nfs3TargetProperties(StorageTargetProperties): + """An NFSv3 mount point for use as a Storage Target. + + All required parameters must be populated in order to send to Azure. + + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Required. Type of the Storage Target.Constant filled by server. Possible + values include: "nfs3", "clfs", "unknown". + :type target_type: str or ~storage_cache_management_client.models.StorageTargetType + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param clfs: Properties when targetType is clfs. + :type clfs: ~storage_cache_management_client.models.ClfsTarget + :param unknown: Properties when targetType is unknown. + :type unknown: ~storage_cache_management_client.models.UnknownTarget + """ + + _validation = { + 'target_type': {'required': True}, + } + + _attribute_map = { + 'junctions': {'key': 'junctions', 'type': '[NamespaceJunction]'}, + 'target_type': {'key': 'targetType', 'type': 'str'}, + 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, + 'nfs3': {'key': 'nfs3', 'type': 'Nfs3Target'}, + 'clfs': {'key': 'clfs', 'type': 'ClfsTarget'}, + 'unknown': {'key': 'unknown', 'type': 'UnknownTarget'}, + } + + def __init__( + self, + *, + junctions: Optional[List["NamespaceJunction"]] = None, + provisioning_state: Optional[Union[str, "ProvisioningStateType"]] = None, + nfs3: Optional["Nfs3Target"] = None, + clfs: Optional["ClfsTarget"] = None, + unknown: Optional["UnknownTarget"] = None, + **kwargs + ): + super(Nfs3TargetProperties, self).__init__(junctions=junctions, provisioning_state=provisioning_state, nfs3=nfs3, clfs=clfs, unknown=unknown, **kwargs) + self.target_type = 'nfs3' # type: str + + +class ResourceSku(msrest.serialization.Model): + """A resource SKU. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar resource_type: The type of resource the SKU applies to. + :vartype resource_type: str + :param capabilities: A list of capabilities of this SKU, such as throughput or ops/sec. + :type capabilities: list[~storage_cache_management_client.models.ResourceSkuCapabilities] + :ivar locations: The set of locations that the SKU is available. This will be supported and + registered Azure Geo Regions (e.g., West US, East US, Southeast Asia, etc.). + :vartype locations: list[str] + :param location_info: The set of locations that the SKU is available. + :type location_info: list[~storage_cache_management_client.models.ResourceSkuLocationInfo] + :param name: The name of this SKU. + :type name: str + :param restrictions: The restrictions preventing this SKU from being used. This is empty if + there are no restrictions. + :type restrictions: list[~storage_cache_management_client.models.Restriction] + """ + + _validation = { + 'resource_type': {'readonly': True}, + 'locations': {'readonly': True}, + } + + _attribute_map = { + 'resource_type': {'key': 'resourceType', 'type': 'str'}, + 'capabilities': {'key': 'capabilities', 'type': '[ResourceSkuCapabilities]'}, + 'locations': {'key': 'locations', 'type': '[str]'}, + 'location_info': {'key': 'locationInfo', 'type': '[ResourceSkuLocationInfo]'}, + 'name': {'key': 'name', 'type': 'str'}, + 'restrictions': {'key': 'restrictions', 'type': '[Restriction]'}, + } + + def __init__( + self, + *, + capabilities: Optional[List["ResourceSkuCapabilities"]] = None, + location_info: Optional[List["ResourceSkuLocationInfo"]] = None, + name: Optional[str] = None, + restrictions: Optional[List["Restriction"]] = None, + **kwargs + ): + super(ResourceSku, self).__init__(**kwargs) + self.resource_type = None + self.capabilities = capabilities + self.locations = None + self.location_info = location_info + self.name = name + self.restrictions = restrictions + + +class ResourceSkuCapabilities(msrest.serialization.Model): + """A resource SKU capability. + + :param name: Name of a capability, such as ops/sec. + :type name: str + :param value: Quantity, if the capability is measured by quantity. + :type value: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__( + self, + *, + name: Optional[str] = None, + value: Optional[str] = None, + **kwargs + ): + super(ResourceSkuCapabilities, self).__init__(**kwargs) + self.name = name + self.value = value + + +class ResourceSkuLocationInfo(msrest.serialization.Model): + """Resource SKU location information. + + :param location: Location where this SKU is available. + :type location: str + :param zones: Zones if any. + :type zones: list[str] + """ + + _attribute_map = { + 'location': {'key': 'location', 'type': 'str'}, + 'zones': {'key': 'zones', 'type': '[str]'}, + } + + def __init__( + self, + *, + location: Optional[str] = None, + zones: Optional[List[str]] = None, + **kwargs + ): + super(ResourceSkuLocationInfo, self).__init__(**kwargs) + self.location = location + self.zones = zones + + +class ResourceSkusResult(msrest.serialization.Model): + """The response from the List Cache SKUs operation. + + Variables are only populated by the server, and will be ignored when sending a request. + + :param next_link: The URI to fetch the next page of Cache SKUs. + :type next_link: str + :ivar value: The list of SKUs available for the subscription. + :vartype value: list[~storage_cache_management_client.models.ResourceSku] + """ + + _validation = { + 'value': {'readonly': True}, + } + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'value': {'key': 'value', 'type': '[ResourceSku]'}, + } + + def __init__( + self, + *, + next_link: Optional[str] = None, + **kwargs + ): + super(ResourceSkusResult, self).__init__(**kwargs) + self.next_link = next_link + self.value = None + + +class Restriction(msrest.serialization.Model): + """The restrictions preventing this SKU from being used. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar type: The type of restrictions. In this version, the only possible value for this is + location. + :vartype type: str + :ivar values: The value of restrictions. If the restriction type is set to location, then this + would be the different locations where the SKU is restricted. + :vartype values: list[str] + :param reason_code: The reason for the restriction. As of now this can be "QuotaId" or + "NotAvailableForSubscription". "QuotaId" is set when the SKU has requiredQuotas parameter as + the subscription does not belong to that quota. "NotAvailableForSubscription" is related to + capacity at the datacenter. Possible values include: "QuotaId", "NotAvailableForSubscription". + :type reason_code: str or ~storage_cache_management_client.models.ReasonCode + """ + + _validation = { + 'type': {'readonly': True}, + 'values': {'readonly': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[str]'}, + 'reason_code': {'key': 'reasonCode', 'type': 'str'}, + } + + def __init__( + self, + *, + reason_code: Optional[Union[str, "ReasonCode"]] = None, + **kwargs + ): + super(Restriction, self).__init__(**kwargs) + self.type = None + self.values = None + self.reason_code = reason_code + + +class StorageTargetResource(msrest.serialization.Model): + """Resource used by a Cache. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar name: Name of the Storage Target. + :vartype name: str + :ivar id: Resource ID of the Storage Target. + :vartype id: str + :ivar type: Type of the Storage Target; Microsoft.StorageCache/Cache/StorageTarget. + :vartype type: str + :ivar location: Region name string. + :vartype location: str + :ivar system_data: The system meta data relating to this resource. + :vartype system_data: ~storage_cache_management_client.models.SystemData + """ + + _validation = { + 'name': {'readonly': True, 'pattern': r'^[-0-9a-zA-Z_]{1,80}$'}, + 'id': {'readonly': True}, + 'type': {'readonly': True}, + 'location': {'readonly': True}, + 'system_data': {'readonly': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'id': {'key': 'id', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, + } + + def __init__( + self, + **kwargs + ): + super(StorageTargetResource, self).__init__(**kwargs) + self.name = None + self.id = None + self.type = None + self.location = None + self.system_data = None + + +class StorageTarget(StorageTargetResource): + """Type of the Storage Target. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar name: Name of the Storage Target. + :vartype name: str + :ivar id: Resource ID of the Storage Target. + :vartype id: str + :ivar type: Type of the Storage Target; Microsoft.StorageCache/Cache/StorageTarget. + :vartype type: str + :ivar location: Region name string. + :vartype location: str + :ivar system_data: The system meta data relating to this resource. + :vartype system_data: ~storage_cache_management_client.models.SystemData + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Type of the Storage Target.Constant filled by server. Possible values + include: "nfs3", "clfs", "unknown". + :type target_type: str or ~storage_cache_management_client.models.StorageTargetType + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param unknown_map: Dictionary of string->string pairs containing information about the Storage + Target. + :type unknown_map: dict[str, str] + :param target: Resource ID of storage container. + :type target: str + """ + + _validation = { + 'name': {'readonly': True, 'pattern': r'^[-0-9a-zA-Z_]{1,80}$'}, + 'id': {'readonly': True}, + 'type': {'readonly': True}, + 'location': {'readonly': True}, + 'system_data': {'readonly': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'id': {'key': 'id', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, + 'junctions': {'key': 'properties.junctions', 'type': '[NamespaceJunction]'}, + 'target_type': {'key': 'properties.targetType', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'nfs3': {'key': 'properties.nfs3', 'type': 'Nfs3Target'}, + 'unknown_map': {'key': 'unknown.unknownMap', 'type': '{str}'}, + 'target': {'key': 'clfs.target', 'type': 'str'}, + } + + def __init__( + self, + *, + junctions: Optional[List["NamespaceJunction"]] = None, + provisioning_state: Optional[Union[str, "ProvisioningStateType"]] = None, + nfs3: Optional["Nfs3Target"] = None, + unknown_map: Optional[Dict[str, str]] = None, + target: Optional[str] = None, + **kwargs + ): + super(StorageTarget, self).__init__(**kwargs) + self.junctions = junctions + self.target_type = None # type: Optional[str] + self.provisioning_state = provisioning_state + self.nfs3 = nfs3 + self.unknown_map = unknown_map + self.target = target + + +class StorageTargetsResult(msrest.serialization.Model): + """A list of Storage Targets. + + :param next_link: The URI to fetch the next page of Storage Targets. + :type next_link: str + :param value: The list of Storage Targets defined for the Cache. + :type value: list[~storage_cache_management_client.models.StorageTarget] + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'value': {'key': 'value', 'type': '[StorageTarget]'}, + } + + def __init__( + self, + *, + next_link: Optional[str] = None, + value: Optional[List["StorageTarget"]] = None, + **kwargs + ): + super(StorageTargetsResult, self).__init__(**kwargs) + self.next_link = next_link + self.value = value + + +class SystemData(msrest.serialization.Model): + """Metadata pertaining to creation and last modification of the resource. + + :param created_by: The identity that created the resource. + :type created_by: str + :param created_by_type: The type of identity that created the resource. Possible values + include: "User", "Application", "ManagedIdentity", "Key". + :type created_by_type: str or ~storage_cache_management_client.models.CreatedByType + :param created_at: The timestamp of resource creation (UTC). + :type created_at: ~datetime.datetime + :param last_modified_by: The identity that last modified the resource. + :type last_modified_by: str + :param last_modified_by_type: The type of identity that last modified the resource. Possible + values include: "User", "Application", "ManagedIdentity", "Key". + :type last_modified_by_type: str or ~storage_cache_management_client.models.CreatedByType + :param last_modified_at: The type of identity that last modified the resource. + :type last_modified_at: ~datetime.datetime + """ + + _attribute_map = { + 'created_by': {'key': 'createdBy', 'type': 'str'}, + 'created_by_type': {'key': 'createdByType', 'type': 'str'}, + 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, + 'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'}, + 'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'}, + 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, + } + + def __init__( + self, + *, + created_by: Optional[str] = None, + created_by_type: Optional[Union[str, "CreatedByType"]] = None, + created_at: Optional[datetime.datetime] = None, + last_modified_by: Optional[str] = None, + last_modified_by_type: Optional[Union[str, "CreatedByType"]] = None, + last_modified_at: Optional[datetime.datetime] = None, + **kwargs + ): + super(SystemData, self).__init__(**kwargs) + self.created_by = created_by + self.created_by_type = created_by_type + self.created_at = created_at + self.last_modified_by = last_modified_by + self.last_modified_by_type = last_modified_by_type + self.last_modified_at = last_modified_at + + +class UnknownTarget(msrest.serialization.Model): + """Properties pertained to UnknownTarget. + + :param unknown_map: Dictionary of string->string pairs containing information about the Storage + Target. + :type unknown_map: dict[str, str] + """ + + _attribute_map = { + 'unknown_map': {'key': 'unknownMap', 'type': '{str}'}, + } + + def __init__( + self, + *, + unknown_map: Optional[Dict[str, str]] = None, + **kwargs + ): + super(UnknownTarget, self).__init__(**kwargs) + self.unknown_map = unknown_map + + +class UnknownTargetProperties(StorageTargetProperties): + """Storage container for use as an Unknown Storage Target. + + All required parameters must be populated in order to send to Azure. + + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Required. Type of the Storage Target.Constant filled by server. Possible + values include: "nfs3", "clfs", "unknown". + :type target_type: str or ~storage_cache_management_client.models.StorageTargetType + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param clfs: Properties when targetType is clfs. + :type clfs: ~storage_cache_management_client.models.ClfsTarget + :param unknown: Properties when targetType is unknown. + :type unknown: ~storage_cache_management_client.models.UnknownTarget + """ + + _validation = { + 'target_type': {'required': True}, + } + + _attribute_map = { + 'junctions': {'key': 'junctions', 'type': '[NamespaceJunction]'}, + 'target_type': {'key': 'targetType', 'type': 'str'}, + 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, + 'nfs3': {'key': 'nfs3', 'type': 'Nfs3Target'}, + 'clfs': {'key': 'clfs', 'type': 'ClfsTarget'}, + 'unknown': {'key': 'unknown', 'type': 'UnknownTarget'}, + } + + def __init__( + self, + *, + junctions: Optional[List["NamespaceJunction"]] = None, + provisioning_state: Optional[Union[str, "ProvisioningStateType"]] = None, + nfs3: Optional["Nfs3Target"] = None, + clfs: Optional["ClfsTarget"] = None, + unknown: Optional["UnknownTarget"] = None, + **kwargs + ): + super(UnknownTargetProperties, self).__init__(junctions=junctions, provisioning_state=provisioning_state, nfs3=nfs3, clfs=clfs, unknown=unknown, **kwargs) + self.target_type = 'unknown' # type: str + + +class UsageModel(msrest.serialization.Model): + """A usage model. + + :param display: Localized information describing this usage model. + :type display: ~storage_cache_management_client.models.UsageModelDisplay + :param model_name: Non-localized keyword name for this usage model. + :type model_name: str + :param target_type: The type of Storage Target to which this model is applicable (only nfs3 as + of this version). + :type target_type: str + """ + + _attribute_map = { + 'display': {'key': 'display', 'type': 'UsageModelDisplay'}, + 'model_name': {'key': 'modelName', 'type': 'str'}, + 'target_type': {'key': 'targetType', 'type': 'str'}, + } + + def __init__( + self, + *, + display: Optional["UsageModelDisplay"] = None, + model_name: Optional[str] = None, + target_type: Optional[str] = None, + **kwargs + ): + super(UsageModel, self).__init__(**kwargs) + self.display = display + self.model_name = model_name + self.target_type = target_type + + +class UsageModelDisplay(msrest.serialization.Model): + """Localized information describing this usage model. + + :param description: String to display for this usage model. + :type description: str + """ + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + } + + def __init__( + self, + *, + description: Optional[str] = None, + **kwargs + ): + super(UsageModelDisplay, self).__init__(**kwargs) + self.description = description + + +class UsageModelsResult(msrest.serialization.Model): + """A list of Cache usage models. + + :param next_link: The URI to fetch the next page of Cache usage models. + :type next_link: str + :param value: The list of usage models available for the subscription. + :type value: list[~storage_cache_management_client.models.UsageModel] + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'value': {'key': 'value', 'type': '[UsageModel]'}, + } + + def __init__( + self, + *, + next_link: Optional[str] = None, + value: Optional[List["UsageModel"]] = None, + **kwargs + ): + super(UsageModelsResult, self).__init__(**kwargs) + self.next_link = next_link + self.value = value diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_storage_cache_management_client_enums.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_storage_cache_management_client_enums.py new file mode 100644 index 00000000000..e65593ffd74 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_storage_cache_management_client_enums.py @@ -0,0 +1,106 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum, EnumMeta +from six import with_metaclass + +class _CaseInsensitiveEnumMeta(EnumMeta): + def __getitem__(self, name): + return super().__getitem__(name.upper()) + + def __getattr__(cls, name): + """Return the enum member matching `name` + We use __getattr__ instead of descriptors or inserting into the enum + class' __dict__ in order to support `name` and `value` being both + properties for enum members (which live in the class' __dict__) and + enum members themselves. + """ + try: + return cls._member_map_[name.upper()] + except KeyError: + raise AttributeError(name) + + +class CacheIdentityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The type of identity used for the cache + """ + + SYSTEM_ASSIGNED = "SystemAssigned" + NONE = "None" + +class CreatedByType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The type of identity that created the resource. + """ + + USER = "User" + APPLICATION = "Application" + MANAGED_IDENTITY = "ManagedIdentity" + KEY = "Key" + +class FirmwareStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """True if there is a firmware update ready to install on this Cache. The firmware will + automatically be installed after firmwareUpdateDeadline if not triggered earlier via the + upgrade operation. + """ + + AVAILABLE = "available" + UNAVAILABLE = "unavailable" + +class HealthStateType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """List of Cache health states. + """ + + UNKNOWN = "Unknown" + HEALTHY = "Healthy" + DEGRADED = "Degraded" + DOWN = "Down" + TRANSITIONING = "Transitioning" + STOPPING = "Stopping" + STOPPED = "Stopped" + UPGRADING = "Upgrading" + FLUSHING = "Flushing" + +class MetricAggregationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + NOT_SPECIFIED = "NotSpecified" + NONE = "None" + AVERAGE = "Average" + MINIMUM = "Minimum" + MAXIMUM = "Maximum" + TOTAL = "Total" + COUNT = "Count" + +class ProvisioningStateType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """ARM provisioning state, see https://github.com/Azure/azure-resource-manager- + rpc/blob/master/v1.0/Addendum.md#provisioningstate-property + """ + + SUCCEEDED = "Succeeded" + FAILED = "Failed" + CANCELLED = "Cancelled" + CREATING = "Creating" + DELETING = "Deleting" + UPDATING = "Updating" + +class ReasonCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The reason for the restriction. As of now this can be "QuotaId" or + "NotAvailableForSubscription". "QuotaId" is set when the SKU has requiredQuotas parameter as + the subscription does not belong to that quota. "NotAvailableForSubscription" is related to + capacity at the datacenter. + """ + + QUOTA_ID = "QuotaId" + NOT_AVAILABLE_FOR_SUBSCRIPTION = "NotAvailableForSubscription" + +class StorageTargetType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Type of the Storage Target. + """ + + NFS3 = "nfs3" + CLFS = "clfs" + UNKNOWN = "unknown" diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/__init__.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/__init__.py new file mode 100644 index 00000000000..52d521bf575 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/__init__.py @@ -0,0 +1,23 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._operation_operations import OperationOperations +from ._sku_operations import SkuOperations +from ._usage_model_operations import UsageModelOperations +from ._asc_operation_operations import ASCOperationOperations +from ._cache_operations import CacheOperations +from ._storage_target_operations import StorageTargetOperations + +__all__ = [ + 'OperationOperations', + 'SkuOperations', + 'UsageModelOperations', + 'ASCOperationOperations', + 'CacheOperations', + 'StorageTargetOperations', +] diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_asc_operation_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_asc_operation_operations.py new file mode 100644 index 00000000000..4ecb56fd8cc --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_asc_operation_operations.py @@ -0,0 +1,104 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse +from azure.mgmt.core.exceptions import ARMErrorFormat + +from .. import models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class ASCOperationOperations(object): + """ASCOperationOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def get( + self, + location, # type: str + operation_id, # type: str + **kwargs # type: Any + ): + # type: (...) -> "models.ASCOperation" + """Gets the status of an asynchronous operation for the Azure HPC cache. + + :param location: The region name which the operation will lookup into. + :type location: str + :param operation_id: The operation id which uniquely identifies the asynchronous operation. + :type operation_id: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ASCOperation, or the result of cls(response) + :rtype: ~storage_cache_management_client.models.ASCOperation + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ASCOperation"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + # Construct URL + url = self.get.metadata['url'] # type: ignore + path_format_arguments = { + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'location': self._serialize.url("location", location, 'str'), + 'operationId': self._serialize.url("operation_id", operation_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize('ASCOperation', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/locations/{location}/ascOperations/{operationId}'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_cache_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_cache_operations.py new file mode 100644 index 00000000000..fac18b68a99 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_cache_operations.py @@ -0,0 +1,1162 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse +from azure.core.polling import LROPoller, NoPolling, PollingMethod +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.arm_polling import ARMPolling + +from .. import models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class CacheOperations(object): + """CacheOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list( + self, + **kwargs # type: Any + ): + # type: (...) -> Iterable["models.CachesListResult"] + """Returns all Caches the user has access to under a subscription. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either CachesListResult or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~storage_cache_management_client.models.CachesListResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.CachesListResult"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + def prepare_request(next_link=None): + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + if not next_link: + # Construct URL + url = self.list.metadata['url'] # type: ignore + path_format_arguments = { + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + request = self._client.get(url, query_parameters, header_parameters) + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize('CachesListResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged( + get_next, extract_data + ) + list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/caches'} # type: ignore + + def list_by_resource_group( + self, + resource_group_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> Iterable["models.CachesListResult"] + """Returns all Caches the user has access to under a resource group. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either CachesListResult or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~storage_cache_management_client.models.CachesListResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.CachesListResult"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + def prepare_request(next_link=None): + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + if not next_link: + # Construct URL + url = self.list_by_resource_group.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + request = self._client.get(url, query_parameters, header_parameters) + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize('CachesListResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged( + get_next, extract_data + ) + list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches'} # type: ignore + + def _delete_initial( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> object + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + # Construct URL + url = self._delete_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + def begin_delete( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> LROPoller[object] + """Schedules a Cache for deletion. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of LROPoller that returns either object or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[object] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + + if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + def get( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> "models.Cache" + """Returns a Cache. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Cache, or the result of cls(response) + :rtype: ~storage_cache_management_client.models.Cache + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + # Construct URL + url = self.get.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize('Cache', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + def _create_or_update_initial( + self, + resource_group_name, # type: str + cache_name, # type: str + tags=None, # type: Optional[object] + location=None, # type: Optional[str] + cache_size_gb=None, # type: Optional[int] + provisioning_state=None, # type: Optional[Union[str, "models.ProvisioningStateType"]] + subnet=None, # type: Optional[str] + upgrade_status=None, # type: Optional["models.CacheUpgradeStatus"] + root_squash=None, # type: Optional[bool] + key_encryption_key=None, # type: Optional["models.KeyVaultKeyReference"] + mtu=1500, # type: Optional[int] + name=None, # type: Optional[str] + type=None, # type: Optional[Union[str, "models.CacheIdentityType"]] + **kwargs # type: Any + ): + # type: (...) -> "models.Cache" + cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + cache = models.Cache(tags=tags, location=location, cache_size_gb=cache_size_gb, provisioning_state=provisioning_state, subnet=subnet, upgrade_status=upgrade_status, root_squash=root_squash, key_encryption_key=key_encryption_key, mtu=mtu, name_sku_name=name, type_identity_type=type) + api_version = "2020-03-01" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self._create_or_update_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if cache is not None: + body_content = self._serialize.body(cache, 'Cache') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize('Cache', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('Cache', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('Cache', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + def begin_create_or_update( + self, + resource_group_name, # type: str + cache_name, # type: str + tags=None, # type: Optional[object] + location=None, # type: Optional[str] + cache_size_gb=None, # type: Optional[int] + provisioning_state=None, # type: Optional[Union[str, "models.ProvisioningStateType"]] + subnet=None, # type: Optional[str] + upgrade_status=None, # type: Optional["models.CacheUpgradeStatus"] + root_squash=None, # type: Optional[bool] + key_encryption_key=None, # type: Optional["models.KeyVaultKeyReference"] + mtu=1500, # type: Optional[int] + name=None, # type: Optional[str] + type=None, # type: Optional[Union[str, "models.CacheIdentityType"]] + **kwargs # type: Any + ): + # type: (...) -> LROPoller["models.Cache"] + """Create or update a Cache. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :param tags: ARM tags as name/value pairs. + :type tags: object + :param location: Region name string. + :type location: str + :param cache_size_gb: The size of this Cache, in GB. + :type cache_size_gb: int + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param subnet: Subnet used for the Cache. + :type subnet: str + :param upgrade_status: Upgrade status of the Cache. + :type upgrade_status: ~storage_cache_management_client.models.CacheUpgradeStatus + :param root_squash: root squash of cache property. + :type root_squash: bool + :param key_encryption_key: Specifies the location of the key encryption key in Key Vault. + :type key_encryption_key: ~storage_cache_management_client.models.KeyVaultKeyReference + :param mtu: The IPv4 maximum transmission unit configured for the subnet. + :type mtu: int + :param name: SKU name for this Cache. + :type name: str + :param type: The type of identity used for the cache. + :type type: str or ~storage_cache_management_client.models.CacheIdentityType + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of LROPoller that returns either Cache or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~storage_cache_management_client.models.Cache] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = self._create_or_update_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + tags=tags, + location=location, + cache_size_gb=cache_size_gb, + provisioning_state=provisioning_state, + subnet=subnet, + upgrade_status=upgrade_status, + root_squash=root_squash, + key_encryption_key=key_encryption_key, + mtu=mtu, + name=name, + type=type, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('Cache', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + + if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + def update( + self, + resource_group_name, # type: str + cache_name, # type: str + tags=None, # type: Optional[object] + location=None, # type: Optional[str] + cache_size_gb=None, # type: Optional[int] + provisioning_state=None, # type: Optional[Union[str, "models.ProvisioningStateType"]] + subnet=None, # type: Optional[str] + upgrade_status=None, # type: Optional["models.CacheUpgradeStatus"] + root_squash=None, # type: Optional[bool] + key_encryption_key=None, # type: Optional["models.KeyVaultKeyReference"] + mtu=1500, # type: Optional[int] + name=None, # type: Optional[str] + type=None, # type: Optional[Union[str, "models.CacheIdentityType"]] + **kwargs # type: Any + ): + # type: (...) -> "models.Cache" + """Update a Cache instance. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :param tags: ARM tags as name/value pairs. + :type tags: object + :param location: Region name string. + :type location: str + :param cache_size_gb: The size of this Cache, in GB. + :type cache_size_gb: int + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param subnet: Subnet used for the Cache. + :type subnet: str + :param upgrade_status: Upgrade status of the Cache. + :type upgrade_status: ~storage_cache_management_client.models.CacheUpgradeStatus + :param root_squash: root squash of cache property. + :type root_squash: bool + :param key_encryption_key: Specifies the location of the key encryption key in Key Vault. + :type key_encryption_key: ~storage_cache_management_client.models.KeyVaultKeyReference + :param mtu: The IPv4 maximum transmission unit configured for the subnet. + :type mtu: int + :param name: SKU name for this Cache. + :type name: str + :param type: The type of identity used for the cache. + :type type: str or ~storage_cache_management_client.models.CacheIdentityType + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Cache, or the result of cls(response) + :rtype: ~storage_cache_management_client.models.Cache + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + cache = models.Cache(tags=tags, location=location, cache_size_gb=cache_size_gb, provisioning_state=provisioning_state, subnet=subnet, upgrade_status=upgrade_status, root_squash=root_squash, key_encryption_key=key_encryption_key, mtu=mtu, name_sku_name=name, type_identity_type=type) + api_version = "2020-03-01" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.update.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if cache is not None: + body_content = self._serialize.body(cache, 'Cache') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize('Cache', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + def _flush_initial( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> object + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + # Construct URL + url = self._flush_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _flush_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/flush'} # type: ignore + + def begin_flush( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> LROPoller[object] + """Tells a Cache to write all dirty data to the Storage Target(s). During the flush, clients will + see errors returned until the flush is complete. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of LROPoller that returns either object or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[object] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = self._flush_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + + if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_flush.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/flush'} # type: ignore + + def _start_initial( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> object + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + # Construct URL + url = self._start_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/start'} # type: ignore + + def begin_start( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> LROPoller[object] + """Tells a Stopped state Cache to transition to Active state. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of LROPoller that returns either object or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[object] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = self._start_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + + if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/start'} # type: ignore + + def _stop_initial( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> object + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + # Construct URL + url = self._stop_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/stop'} # type: ignore + + def begin_stop( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> LROPoller[object] + """Tells an Active Cache to transition to Stopped state. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of LROPoller that returns either object or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[object] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = self._stop_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + + if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/stop'} # type: ignore + + def _upgrade_firmware_initial( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> object + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + # Construct URL + url = self._upgrade_firmware_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 201: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _upgrade_firmware_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/upgrade'} # type: ignore + + def begin_upgrade_firmware( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> LROPoller[object] + """Upgrade a Cache's firmware if a new version is available. Otherwise, this operation has no + effect. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of LROPoller that returns either object or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[object] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = self._upgrade_firmware_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + + if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_upgrade_firmware.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/upgrade'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_operation_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_operation_operations.py new file mode 100644 index 00000000000..63a62f1f56a --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_operation_operations.py @@ -0,0 +1,109 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse +from azure.mgmt.core.exceptions import ARMErrorFormat + +from .. import models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class OperationOperations(object): + """OperationOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list( + self, + **kwargs # type: Any + ): + # type: (...) -> Iterable["models.ApiOperationListResult"] + """Lists all of the available Resource Provider operations. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either ApiOperationListResult or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~storage_cache_management_client.models.ApiOperationListResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ApiOperationListResult"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + def prepare_request(next_link=None): + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + if not next_link: + # Construct URL + url = self.list.metadata['url'] # type: ignore + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + request = self._client.get(url, query_parameters, header_parameters) + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize('ApiOperationListResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged( + get_next, extract_data + ) + list.metadata = {'url': '/providers/Microsoft.StorageCache/operations'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_sku_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_sku_operations.py new file mode 100644 index 00000000000..cdf298a9b19 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_sku_operations.py @@ -0,0 +1,113 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse +from azure.mgmt.core.exceptions import ARMErrorFormat + +from .. import models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class SkuOperations(object): + """SkuOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list( + self, + **kwargs # type: Any + ): + # type: (...) -> Iterable["models.ResourceSkusResult"] + """Get the list of StorageCache.Cache SKUs available to this subscription. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either ResourceSkusResult or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~storage_cache_management_client.models.ResourceSkusResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ResourceSkusResult"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + def prepare_request(next_link=None): + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + if not next_link: + # Construct URL + url = self.list.metadata['url'] # type: ignore + path_format_arguments = { + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + request = self._client.get(url, query_parameters, header_parameters) + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize('ResourceSkusResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged( + get_next, extract_data + ) + list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/skus'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_storage_target_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_storage_target_operations.py new file mode 100644 index 00000000000..e4948d4b717 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_storage_target_operations.py @@ -0,0 +1,499 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse +from azure.core.polling import LROPoller, NoPolling, PollingMethod +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.arm_polling import ARMPolling + +from .. import models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class StorageTargetOperations(object): + """StorageTargetOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list_by_cache( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> Iterable["models.StorageTargetsResult"] + """Returns a list of Storage Targets for the specified Cache. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either StorageTargetsResult or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~storage_cache_management_client.models.StorageTargetsResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTargetsResult"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + def prepare_request(next_link=None): + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + if not next_link: + # Construct URL + url = self.list_by_cache.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + request = self._client.get(url, query_parameters, header_parameters) + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize('StorageTargetsResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged( + get_next, extract_data + ) + list_by_cache.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets'} # type: ignore + + def _delete_initial( + self, + resource_group_name, # type: str + cache_name, # type: str + storage_target_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> object + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + # Construct URL + url = self._delete_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore + + def begin_delete( + self, + resource_group_name, # type: str + cache_name, # type: str + storage_target_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> LROPoller[object] + """Removes a Storage Target from a Cache. This operation is allowed at any time, but if the Cache + is down or unhealthy, the actual removal of the Storage Target may be delayed until the Cache + is healthy again. Note that if the Cache has data to flush to the Storage Target, the data will + be flushed before the Storage Target will be deleted. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :param storage_target_name: Name of Storage Target. + :type storage_target_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of LROPoller that returns either object or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[object] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + storage_target_name=storage_target_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + + if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore + + def get( + self, + resource_group_name, # type: str + cache_name, # type: str + storage_target_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> "models.StorageTarget" + """Returns a Storage Target from a Cache. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :param storage_target_name: Name of the Storage Target. Length of name must be not greater than + 80 and chars must be in list of [-0-9a-zA-Z_] char class. + :type storage_target_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: StorageTarget, or the result of cls(response) + :rtype: ~storage_cache_management_client.models.StorageTarget + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTarget"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + # Construct URL + url = self.get.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize('StorageTarget', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore + + def _create_or_update_initial( + self, + resource_group_name, # type: str + cache_name, # type: str + storage_target_name, # type: str + junctions=None, # type: Optional[List["models.NamespaceJunction"]] + target_type=None, # type: Optional[Union[str, "models.StorageTargetType"]] + provisioning_state=None, # type: Optional[Union[str, "models.ProvisioningStateType"]] + nfs3=None, # type: Optional["models.Nfs3Target"] + unknown_map=None, # type: Optional[Dict[str, str]] + target=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> "models.StorageTarget" + cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTarget"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + storagetarget = models.StorageTarget(junctions=junctions, target_type=target_type, provisioning_state=provisioning_state, nfs3=nfs3, unknown_map=unknown_map, target=target) + api_version = "2020-03-01" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self._create_or_update_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if storagetarget is not None: + body_content = self._serialize.body(storagetarget, 'StorageTarget') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize('StorageTarget', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('StorageTarget', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('StorageTarget', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore + + def begin_create_or_update( + self, + resource_group_name, # type: str + cache_name, # type: str + storage_target_name, # type: str + junctions=None, # type: Optional[List["models.NamespaceJunction"]] + target_type=None, # type: Optional[Union[str, "models.StorageTargetType"]] + provisioning_state=None, # type: Optional[Union[str, "models.ProvisioningStateType"]] + nfs3=None, # type: Optional["models.Nfs3Target"] + unknown_map=None, # type: Optional[Dict[str, str]] + target=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> LROPoller["models.StorageTarget"] + """Create or update a Storage Target. This operation is allowed at any time, but if the Cache is + down or unhealthy, the actual creation/modification of the Storage Target may be delayed until + the Cache is healthy again. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :param storage_target_name: Name of the Storage Target. Length of name must be not greater than + 80 and chars must be in list of [-0-9a-zA-Z_] char class. + :type storage_target_name: str + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Type of the Storage Target. + :type target_type: str or ~storage_cache_management_client.models.StorageTargetType + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param unknown_map: Dictionary of string->string pairs containing information about the Storage + Target. + :type unknown_map: dict[str, str] + :param target: Resource ID of storage container. + :type target: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of LROPoller that returns either StorageTarget or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~storage_cache_management_client.models.StorageTarget] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTarget"] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] + if cont_token is None: + raw_result = self._create_or_update_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + storage_target_name=storage_target_name, + junctions=junctions, + target_type=target_type, + provisioning_state=provisioning_state, + nfs3=nfs3, + unknown_map=unknown_map, + target=target, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('StorageTarget', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + + if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output + ) + else: + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_usage_model_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_usage_model_operations.py new file mode 100644 index 00000000000..7b3f94d7dc5 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_usage_model_operations.py @@ -0,0 +1,113 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse +from azure.mgmt.core.exceptions import ARMErrorFormat + +from .. import models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class UsageModelOperations(object): + """UsageModelOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list( + self, + **kwargs # type: Any + ): + # type: (...) -> Iterable["models.UsageModelsResult"] + """Get the list of Cache Usage Models available to this subscription. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either UsageModelsResult or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~storage_cache_management_client.models.UsageModelsResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.UsageModelsResult"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + accept = "application/json" + + def prepare_request(next_link=None): + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + if not next_link: + # Construct URL + url = self.list.metadata['url'] # type: ignore + path_format_arguments = { + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + request = self._client.get(url, query_parameters, header_parameters) + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize('UsageModelsResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged( + get_next, extract_data + ) + list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/usageModels'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/py.typed b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/py.typed new file mode 100644 index 00000000000..e5aff4f83af --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/src/storagecache/report.md b/src/storagecache/report.md new file mode 100644 index 00000000000..191931149bc --- /dev/null +++ b/src/storagecache/report.md @@ -0,0 +1,307 @@ +# Azure CLI Module Creation Report + +## EXTENSION +|CLI Extension|Command Groups| +|---------|------------| +|az storagecache|[groups](#CommandGroups) + +## GROUPS +### Command groups in `az storagecache` extension +|CLI Command Group|Group Swagger name|Commands| +|---------|------------|--------| +|az storagecache sku|Skus|[commands](#CommandsInSkus)| +|az storagecache usage-model|UsageModels|[commands](#CommandsInUsageModels)| +|az storagecache asc-operation|AscOperations|[commands](#CommandsInAscOperations)| +|az storagecache cache|Caches|[commands](#CommandsInCaches)| +|az storagecache storage-target|StorageTargets|[commands](#CommandsInStorageTargets)| + +## COMMANDS +### Commands in `az storagecache asc-operation` group +|CLI Command|Operation Swagger name|Parameters|Examples| +|---------|------------|--------|-----------| +|[az storagecache asc-operation show](#AscOperationsGet)|Get|[Parameters](#ParametersAscOperationsGet)|[Example](#ExamplesAscOperationsGet)| + +### Commands in `az storagecache cache` group +|CLI Command|Operation Swagger name|Parameters|Examples| +|---------|------------|--------|-----------| +|[az storagecache cache list](#CachesListByResourceGroup)|ListByResourceGroup|[Parameters](#ParametersCachesListByResourceGroup)|[Example](#ExamplesCachesListByResourceGroup)| +|[az storagecache cache list](#CachesList)|List|[Parameters](#ParametersCachesList)|[Example](#ExamplesCachesList)| +|[az storagecache cache show](#CachesGet)|Get|[Parameters](#ParametersCachesGet)|[Example](#ExamplesCachesGet)| +|[az storagecache cache create](#CachesCreateOrUpdate#Create)|CreateOrUpdate#Create|[Parameters](#ParametersCachesCreateOrUpdate#Create)|[Example](#ExamplesCachesCreateOrUpdate#Create)| +|[az storagecache cache update](#CachesUpdate)|Update|[Parameters](#ParametersCachesUpdate)|[Example](#ExamplesCachesUpdate)| +|[az storagecache cache delete](#CachesDelete)|Delete|[Parameters](#ParametersCachesDelete)|[Example](#ExamplesCachesDelete)| +|[az storagecache cache flush](#CachesFlush)|Flush|[Parameters](#ParametersCachesFlush)|[Example](#ExamplesCachesFlush)| +|[az storagecache cache start](#CachesStart)|Start|[Parameters](#ParametersCachesStart)|[Example](#ExamplesCachesStart)| +|[az storagecache cache stop](#CachesStop)|Stop|[Parameters](#ParametersCachesStop)|[Example](#ExamplesCachesStop)| +|[az storagecache cache upgrade-firmware](#CachesUpgradeFirmware)|UpgradeFirmware|[Parameters](#ParametersCachesUpgradeFirmware)|[Example](#ExamplesCachesUpgradeFirmware)| + +### Commands in `az storagecache sku` group +|CLI Command|Operation Swagger name|Parameters|Examples| +|---------|------------|--------|-----------| +|[az storagecache sku list](#SkusList)|List|[Parameters](#ParametersSkusList)|[Example](#ExamplesSkusList)| + +### Commands in `az storagecache storage-target` group +|CLI Command|Operation Swagger name|Parameters|Examples| +|---------|------------|--------|-----------| +|[az storagecache storage-target list](#StorageTargetsListByCache)|ListByCache|[Parameters](#ParametersStorageTargetsListByCache)|[Example](#ExamplesStorageTargetsListByCache)| +|[az storagecache storage-target show](#StorageTargetsGet)|Get|[Parameters](#ParametersStorageTargetsGet)|[Example](#ExamplesStorageTargetsGet)| +|[az storagecache storage-target create](#StorageTargetsCreateOrUpdate#Create)|CreateOrUpdate#Create|[Parameters](#ParametersStorageTargetsCreateOrUpdate#Create)|[Example](#ExamplesStorageTargetsCreateOrUpdate#Create)| +|[az storagecache storage-target update](#StorageTargetsCreateOrUpdate#Update)|CreateOrUpdate#Update|[Parameters](#ParametersStorageTargetsCreateOrUpdate#Update)|Not Found| +|[az storagecache storage-target delete](#StorageTargetsDelete)|Delete|[Parameters](#ParametersStorageTargetsDelete)|[Example](#ExamplesStorageTargetsDelete)| + +### Commands in `az storagecache usage-model` group +|CLI Command|Operation Swagger name|Parameters|Examples| +|---------|------------|--------|-----------| +|[az storagecache usage-model list](#UsageModelsList)|List|[Parameters](#ParametersUsageModelsList)|[Example](#ExamplesUsageModelsList)| + + +## COMMAND DETAILS + +### group `az storagecache asc-operation` +#### Command `az storagecache asc-operation show` + +##### Example +``` +az storagecache asc-operation show --operation-id "testoperationid" --location "West US" +``` +##### Parameters +|Option|Type|Description|Path (SDK)|Swagger name| +|------|----|-----------|----------|------------| +|**--location**|string|The region name which the operation will lookup into.|location|location| +|**--operation-id**|string|The operation id which uniquely identifies the asynchronous operation.|operation_id|operationId| + +### group `az storagecache cache` +#### Command `az storagecache cache list` + +##### Example +``` +az storagecache cache list --resource-group "scgroup" +``` +##### Parameters +|Option|Type|Description|Path (SDK)|Swagger name| +|------|----|-----------|----------|------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName| + +#### Command `az storagecache cache list` + +##### Example +``` +az storagecache cache list +``` +##### Parameters +|Option|Type|Description|Path (SDK)|Swagger name| +|------|----|-----------|----------|------------| +#### Command `az storagecache cache show` + +##### Example +``` +az storagecache cache show --cache-name "sc1" --resource-group "scgroup" +``` +##### Parameters +|Option|Type|Description|Path (SDK)|Swagger name| +|------|----|-----------|----------|------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name|cacheName| + +#### Command `az storagecache cache create` + +##### Example +``` +az storagecache cache create --location "westus" --cache-size-gb 3072 --subnet "/subscriptions/00000000-0000-0000-0000-\ +000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1" --sku-name \ +"Standard_2G" --tags "{\\"Dept\\":\\"ContosoAds\\"}" --cache-name "sc1" --resource-group "scgroup" +``` +##### Parameters +|Option|Type|Description|Path (SDK)|Swagger name| +|------|----|-----------|----------|------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name|cacheName| +|**--tags**|any|ARM tags as name/value pairs.|tags|tags| +|**--location**|string|Region name string.|location|location| +|**--cache-size-gb**|integer|The size of this Cache, in GB.|cache_size_gb|cacheSizeGB| +|**--provisioning-state**|choice|ARM provisioning state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property|provisioning_state|provisioningState| +|**--subnet**|string|Subnet used for the Cache.|subnet|subnet| +|**--security-settings-root-squash**|boolean|root squash of cache property.|root_squash|rootSquash| +|**--encryption-settings-key-encryption-key**|object|Specifies the location of the key encryption key in Key Vault.|key_encryption_key|keyEncryptionKey| +|**--network-settings-mtu**|integer|The IPv4 maximum transmission unit configured for the subnet.|mtu|mtu| +|**--sku-name**|string|SKU name for this Cache.|name|name| +|**--identity-type**|sealed-choice|The type of identity used for the cache|type|type| + +#### Command `az storagecache cache update` + +##### Example +``` +az storagecache cache update --location "westus" --cache-size-gb 3072 --subnet "/subscriptions/00000000-0000-0000-0000-\ +000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1" --sku-name \ +"Standard_2G" --tags "{\\"Dept\\":\\"ContosoAds\\"}" --cache-name "sc1" --resource-group "scgroup" +``` +##### Parameters +|Option|Type|Description|Path (SDK)|Swagger name| +|------|----|-----------|----------|------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name|cacheName| +|**--tags**|any|ARM tags as name/value pairs.|tags|tags| +|**--location**|string|Region name string.|location|location| +|**--cache-size-gb**|integer|The size of this Cache, in GB.|cache_size_gb|cacheSizeGB| +|**--provisioning-state**|choice|ARM provisioning state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property|provisioning_state|provisioningState| +|**--subnet**|string|Subnet used for the Cache.|subnet|subnet| +|**--security-settings-root-squash**|boolean|root squash of cache property.|root_squash|rootSquash| +|**--encryption-settings-key-encryption-key**|object|Specifies the location of the key encryption key in Key Vault.|key_encryption_key|keyEncryptionKey| +|**--network-settings-mtu**|integer|The IPv4 maximum transmission unit configured for the subnet.|mtu|mtu| +|**--sku-name**|string|SKU name for this Cache.|name|name| +|**--identity-type**|sealed-choice|The type of identity used for the cache|type|type| + +#### Command `az storagecache cache delete` + +##### Example +``` +az storagecache cache delete --cache-name "sc" --resource-group "scgroup" +``` +##### Parameters +|Option|Type|Description|Path (SDK)|Swagger name| +|------|----|-----------|----------|------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name|cacheName| + +#### Command `az storagecache cache flush` + +##### Example +``` +az storagecache cache flush --cache-name "sc" --resource-group "scgroup" +``` +##### Parameters +|Option|Type|Description|Path (SDK)|Swagger name| +|------|----|-----------|----------|------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name|cacheName| + +#### Command `az storagecache cache start` + +##### Example +``` +az storagecache cache start --cache-name "sc" --resource-group "scgroup" +``` +##### Parameters +|Option|Type|Description|Path (SDK)|Swagger name| +|------|----|-----------|----------|------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name|cacheName| + +#### Command `az storagecache cache stop` + +##### Example +``` +az storagecache cache stop --cache-name "sc" --resource-group "scgroup" +``` +##### Parameters +|Option|Type|Description|Path (SDK)|Swagger name| +|------|----|-----------|----------|------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name|cacheName| + +#### Command `az storagecache cache upgrade-firmware` + +##### Example +``` +az storagecache cache upgrade-firmware --cache-name "sc1" --resource-group "scgroup" +``` +##### Parameters +|Option|Type|Description|Path (SDK)|Swagger name| +|------|----|-----------|----------|------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name|cacheName| + +### group `az storagecache sku` +#### Command `az storagecache sku list` + +##### Example +``` +az storagecache sku list +``` +##### Parameters +|Option|Type|Description|Path (SDK)|Swagger name| +|------|----|-----------|----------|------------| +### group `az storagecache storage-target` +#### Command `az storagecache storage-target list` + +##### Example +``` +az storagecache storage-target list --cache-name "sc1" --resource-group "scgroup" +``` +##### Parameters +|Option|Type|Description|Path (SDK)|Swagger name| +|------|----|-----------|----------|------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name|cacheName| + +#### Command `az storagecache storage-target show` + +##### Example +``` +az storagecache storage-target show --cache-name "sc1" --resource-group "scgroup" --name "st1" +``` +##### Parameters +|Option|Type|Description|Path (SDK)|Swagger name| +|------|----|-----------|----------|------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name|cacheName| +|**--storage-target-name**|string|Name of the Storage Target. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|storage_target_name|storageTargetName| + +#### Command `az storagecache storage-target create` + +##### Example +``` +az storagecache storage-target create --cache-name "sc1" --resource-group "scgroup" --name "st1" --junctions \ +namespace-path="/path/on/cache" nfs-export="exp1" target-path="/path/on/exp1" --junctions \ +namespace-path="/path2/on/cache" nfs-export="exp2" target-path="/path2/on/exp2" --nfs3 target="10.0.44.44" \ +usage-model="READ_HEAVY_INFREQ" +``` +##### Parameters +|Option|Type|Description|Path (SDK)|Swagger name| +|------|----|-----------|----------|------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name|cacheName| +|**--storage-target-name**|string|Name of the Storage Target. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|storage_target_name|storageTargetName| +|**--junctions**|array|List of Cache namespace junctions to target for namespace associations.|junctions|junctions| +|**--provisioning-state**|choice|ARM provisioning state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property|provisioning_state|provisioningState| +|**--nfs3**|object|Properties when targetType is nfs3.|nfs3|nfs3| +|**--unknown-unknown-map**|dictionary|Dictionary of string->string pairs containing information about the Storage Target.|unknown_map|unknownMap| +|**--clfs-target**|string|Resource ID of storage container.|target|target| + +#### Command `az storagecache storage-target update` + +##### Parameters +|Option|Type|Description|Path (SDK)|Swagger name| +|------|----|-----------|----------|------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name|cacheName| +|**--storage-target-name**|string|Name of the Storage Target. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|storage_target_name|storageTargetName| +|**--junctions**|array|List of Cache namespace junctions to target for namespace associations.|junctions|junctions| +|**--provisioning-state**|choice|ARM provisioning state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property|provisioning_state|provisioningState| +|**--nfs3**|object|Properties when targetType is nfs3.|nfs3|nfs3| +|**--unknown-unknown-map**|dictionary|Dictionary of string->string pairs containing information about the Storage Target.|unknown_map|unknownMap| +|**--clfs-target**|string|Resource ID of storage container.|target|target| + +#### Command `az storagecache storage-target delete` + +##### Example +``` +az storagecache storage-target delete --cache-name "sc1" --resource-group "scgroup" --name "st1" +``` +##### Parameters +|Option|Type|Description|Path (SDK)|Swagger name| +|------|----|-----------|----------|------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name|cacheName| +|**--storage-target-name**|string|Name of Storage Target.|storage_target_name|storageTargetName| + +### group `az storagecache usage-model` +#### Command `az storagecache usage-model list` + +##### Example +``` +az storagecache usage-model list +``` +##### Parameters +|Option|Type|Description|Path (SDK)|Swagger name| +|------|----|-----------|----------|------------| \ No newline at end of file diff --git a/src/storagecache/setup.cfg b/src/storagecache/setup.cfg new file mode 100644 index 00000000000..2fdd96e5d39 --- /dev/null +++ b/src/storagecache/setup.cfg @@ -0,0 +1 @@ +#setup.cfg \ No newline at end of file diff --git a/src/storagecache/setup.py b/src/storagecache/setup.py new file mode 100644 index 00000000000..8f359711a2e --- /dev/null +++ b/src/storagecache/setup.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python + +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + + +from codecs import open +from setuptools import setup, find_packages + +# HISTORY.rst entry. +VERSION = '0.1.0' +try: + from azext_storagecache.manual.version import VERSION +except ImportError: + pass + +# The full list of classifiers is available at +# https://pypi.python.org/pypi?%3Aaction=list_classifiers +CLASSIFIERS = [ + 'Development Status :: 4 - Beta', + 'Intended Audience :: Developers', + 'Intended Audience :: System Administrators', + 'Programming Language :: Python', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'License :: OSI Approved :: MIT License', +] + +DEPENDENCIES = [] + +try: + from azext_storagecache.manual.dependency import DEPENDENCIES +except ImportError: + pass + +with open('README.md', 'r', encoding='utf-8') as f: + README = f.read() +with open('HISTORY.rst', 'r', encoding='utf-8') as f: + HISTORY = f.read() + +setup( + name='storagecache', + version=VERSION, + description='Microsoft Azure Command-Line Tools StorageCacheManagementClient Extension', + author='Microsoft Corporation', + author_email='azpycli@microsoft.com', + url='https://github.com/Azure/azure-cli-extensions/tree/master/src/storagecache', + long_description=README + '\n\n' + HISTORY, + license='MIT', + classifiers=CLASSIFIERS, + packages=find_packages(), + install_requires=DEPENDENCIES, + package_data={'azext_storagecache': ['azext_metadata.json']}, +)