diff --git a/azurerm/internal/services/recoveryservices/client/client.go b/azurerm/internal/services/recoveryservices/client/client.go index 73861b567610..b4f73810d745 100644 --- a/azurerm/internal/services/recoveryservices/client/client.go +++ b/azurerm/internal/services/recoveryservices/client/client.go @@ -8,15 +8,17 @@ import ( ) type Client struct { - ProtectedItemsClient *backup.ProtectedItemsClient - ProtectionPoliciesClient *backup.ProtectionPoliciesClient - VaultsClient *recoveryservices.VaultsClient - FabricClient func(resourceGroupName string, vaultName string) siterecovery.ReplicationFabricsClient - ProtectionContainerClient func(resourceGroupName string, vaultName string) siterecovery.ReplicationProtectionContainersClient - ReplicationPoliciesClient func(resourceGroupName string, vaultName string) siterecovery.ReplicationPoliciesClient - ContainerMappingClient func(resourceGroupName string, vaultName string) siterecovery.ReplicationProtectionContainerMappingsClient - NetworkMappingClient func(resourceGroupName string, vaultName string) siterecovery.ReplicationNetworkMappingsClient - ReplicationMigrationItemsClient func(resourceGroupName string, vaultName string) siterecovery.ReplicationProtectedItemsClient + ProtectedItemsClient *backup.ProtectedItemsClient + ProtectionPoliciesClient *backup.ProtectionPoliciesClient + BackupProtectionContainersClient *backup.ProtectionContainersClient + BackupOperationStatusesClient *backup.OperationStatusesClient + VaultsClient *recoveryservices.VaultsClient + FabricClient func(resourceGroupName string, vaultName string) siterecovery.ReplicationFabricsClient + ProtectionContainerClient func(resourceGroupName string, vaultName string) siterecovery.ReplicationProtectionContainersClient + ReplicationPoliciesClient func(resourceGroupName string, vaultName string) siterecovery.ReplicationPoliciesClient + ContainerMappingClient func(resourceGroupName string, vaultName string) siterecovery.ReplicationProtectionContainerMappingsClient + NetworkMappingClient func(resourceGroupName string, vaultName string) siterecovery.ReplicationNetworkMappingsClient + ReplicationMigrationItemsClient func(resourceGroupName string, vaultName string) siterecovery.ReplicationProtectedItemsClient } func NewClient(o *common.ClientOptions) *Client { @@ -29,6 +31,12 @@ func NewClient(o *common.ClientOptions) *Client { protectionPoliciesClient := backup.NewProtectionPoliciesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&protectionPoliciesClient.Client, o.ResourceManagerAuthorizer) + backupProtectionContainersClient := backup.NewProtectionContainersClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&backupProtectionContainersClient.Client, o.ResourceManagerAuthorizer) + + backupOperationStatusesClient := backup.NewOperationStatusesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&backupOperationStatusesClient.Client, o.ResourceManagerAuthorizer) + fabricClient := func(resourceGroupName string, vaultName string) siterecovery.ReplicationFabricsClient { client := siterecovery.NewReplicationFabricsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId, resourceGroupName, vaultName) o.ConfigureClient(&client.Client, o.ResourceManagerAuthorizer) @@ -66,14 +74,16 @@ func NewClient(o *common.ClientOptions) *Client { } return &Client{ - ProtectedItemsClient: &protectedItemsClient, - ProtectionPoliciesClient: &protectionPoliciesClient, - VaultsClient: &vaultsClient, - FabricClient: fabricClient, - ProtectionContainerClient: protectionContainerClient, - ReplicationPoliciesClient: replicationPoliciesClient, - ContainerMappingClient: containerMappingClient, - NetworkMappingClient: networkMappingClient, - ReplicationMigrationItemsClient: replicationMigrationItemsClient, + ProtectedItemsClient: &protectedItemsClient, + ProtectionPoliciesClient: &protectionPoliciesClient, + BackupProtectionContainersClient: &backupProtectionContainersClient, + BackupOperationStatusesClient: &backupOperationStatusesClient, + VaultsClient: &vaultsClient, + FabricClient: fabricClient, + ProtectionContainerClient: protectionContainerClient, + ReplicationPoliciesClient: replicationPoliciesClient, + ContainerMappingClient: containerMappingClient, + NetworkMappingClient: networkMappingClient, + ReplicationMigrationItemsClient: replicationMigrationItemsClient, } } diff --git a/azurerm/provider.go b/azurerm/provider.go index bbe84d9b6829..54163589ed1d 100644 --- a/azurerm/provider.go +++ b/azurerm/provider.go @@ -217,6 +217,9 @@ func Provider() terraform.ResourceProvider { "azurerm_azuread_application": resourceArmActiveDirectoryApplication(), "azurerm_azuread_service_principal_password": resourceArmActiveDirectoryServicePrincipalPassword(), "azurerm_azuread_service_principal": resourceArmActiveDirectoryServicePrincipal(), + "azurerm_backup_container_storage_account": resourceArmBackupProtectionContainerStorageAccount(), + "azurerm_backup_policy_file_share": resourceArmBackupProtectionPolicyFileShare(), + "azurerm_backup_protected_file_share": resourceArmBackupProtectedFileShare(), "azurerm_backup_protected_vm": resourceArmRecoveryServicesBackupProtectedVM(), "azurerm_backup_policy_vm": resourceArmBackupProtectionPolicyVM(), "azurerm_bastion_host": resourceArmBastionHost(), diff --git a/azurerm/resource_arm_backup_container_storage_account.go b/azurerm/resource_arm_backup_container_storage_account.go new file mode 100644 index 000000000000..fed45d389c15 --- /dev/null +++ b/azurerm/resource_arm_backup_container_storage_account.go @@ -0,0 +1,250 @@ +package azurerm + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2017-07-01/backup" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmBackupProtectionContainerStorageAccount() *schema.Resource { + return &schema.Resource{ + Create: resourceArmBackupProtectionContainerStorageAccountCreate, + Read: resourceArmBackupProtectionContainerStorageAccountRead, + Update: nil, + Delete: resourceArmBackupProtectionContainerStorageAccountDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Read: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "resource_group_name": azure.SchemaResourceGroupName(), + + "recovery_vault_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: azure.ValidateRecoveryServicesVaultName, + }, + "storage_account_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + }, + }, + } +} + +func resourceArmBackupProtectionContainerStorageAccountCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).RecoveryServices.BackupProtectionContainersClient + opStatusClient := meta.(*ArmClient).RecoveryServices.BackupOperationStatusesClient + ctx, cancel := timeouts.ForRead(meta.(*ArmClient).StopContext, d) + defer cancel() + + resGroup := d.Get("resource_group_name").(string) + vaultName := d.Get("recovery_vault_name").(string) + storageAccountID := d.Get("storage_account_id").(string) + + parsedStorageAccountID, err := azure.ParseAzureResourceID(storageAccountID) + if err != nil { + return fmt.Errorf("[ERROR] Unable to parse storage_account_id '%s': %+v", storageAccountID, err) + } + accountName, hasName := parsedStorageAccountID.Path["storageAccounts"] + if !hasName { + return fmt.Errorf("[ERROR] parsed storage_account_id '%s' doesn't contain 'storageAccounts'", storageAccountID) + } + + containerName := fmt.Sprintf("StorageContainer;storage;%s;%s", parsedStorageAccountID.ResourceGroup, accountName) + + if features.ShouldResourcesBeImported() && d.IsNewResource() { + existing, err := client.Get(ctx, vaultName, resGroup, "Azure", containerName) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing recovery services protection container %s (Vault %s): %+v", containerName, vaultName, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_backup_protection_container_storage", azure.HandleAzureSdkForGoBug2824(*existing.ID)) + } + } + + parameters := backup.ProtectionContainerResource{ + Properties: &backup.AzureStorageContainer{ + SourceResourceID: &storageAccountID, + FriendlyName: &accountName, + BackupManagementType: backup.ManagementTypeAzureStorage, + ContainerType: backup.ContainerTypeStorageContainer1, + }, + } + + resp, err := client.Register(ctx, vaultName, resGroup, "Azure", containerName, parameters) + if err != nil { + return fmt.Errorf("Error registering backup protection container %s (Vault %s): %+v", containerName, vaultName, err) + } + + locationURL, err := resp.Response.Location() // Operation ID found in the Location header + if locationURL == nil || err != nil { + return fmt.Errorf("Unable to determine operation URL for protection container registration status for %s. (Vault %s): Location header missing or empty", containerName, vaultName) + } + + opResourceID := azure.HandleAzureSdkForGoBug2824(locationURL.Path) + + parsedLocation, err := azure.ParseAzureResourceID(opResourceID) + if err != nil { + return err + } + + operationID := parsedLocation.Path["operationResults"] + if _, err = resourceArmBackupProtectionContainerStorageAccountWaitForOperation(ctx, opStatusClient, vaultName, resGroup, operationID, d); err != nil { + return err + } + + resp, err = client.Get(ctx, vaultName, resGroup, "Azure", containerName) + if err != nil { + return fmt.Errorf("Error retrieving site recovery protection container %s (Vault %s): %+v", containerName, vaultName, err) + } + + d.SetId(azure.HandleAzureSdkForGoBug2824(*resp.ID)) + + return resourceArmBackupProtectionContainerStorageAccountRead(d, meta) +} + +func resourceArmBackupProtectionContainerStorageAccountRead(d *schema.ResourceData, meta interface{}) error { + id, err := azure.ParseAzureResourceID(d.Id()) + if err != nil { + return err + } + + resGroup := id.ResourceGroup + vaultName := id.Path["vaults"] + fabricName := id.Path["backupFabrics"] + containerName := id.Path["protectionContainers"] + + client := meta.(*ArmClient).RecoveryServices.BackupProtectionContainersClient + ctx, cancel := timeouts.ForRead(meta.(*ArmClient).StopContext, d) + defer cancel() + + resp, err := client.Get(ctx, vaultName, resGroup, fabricName, containerName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + d.SetId("") + return nil + } + return fmt.Errorf("Error making Read request on backup protection container %s (Vault %s): %+v", containerName, vaultName, err) + } + + d.Set("resource_group_name", resGroup) + d.Set("recovery_vault_name", vaultName) + + if properties, ok := resp.Properties.AsAzureStorageContainer(); ok && properties != nil { + d.Set("storage_account_id", properties.SourceResourceID) + } + + return nil +} + +func resourceArmBackupProtectionContainerStorageAccountDelete(d *schema.ResourceData, meta interface{}) error { + id, err := azure.ParseAzureResourceID(d.Id()) + if err != nil { + return err + } + + resGroup := id.ResourceGroup + vaultName := id.Path["vaults"] + fabricName := id.Path["backupFabrics"] + containerName := id.Path["protectionContainers"] + + client := meta.(*ArmClient).RecoveryServices.BackupProtectionContainersClient + opClient := meta.(*ArmClient).RecoveryServices.BackupOperationStatusesClient + ctx, cancel := timeouts.ForDelete(meta.(*ArmClient).StopContext, d) + defer cancel() + + resp, err := client.Unregister(ctx, vaultName, resGroup, fabricName, containerName) + if err != nil { + return fmt.Errorf("Error deregistering backup protection container %s (Vault %s): %+v", containerName, vaultName, err) + } + + locationURL, err := resp.Response.Location() + if err != nil || locationURL == nil { + return fmt.Errorf("Error unregistering backup protection container %s (Vault %s): Location header missing or empty", containerName, vaultName) + } + + opResourceID := azure.HandleAzureSdkForGoBug2824(locationURL.Path) + + parsedLocation, err := azure.ParseAzureResourceID(opResourceID) + if err != nil { + return err + } + operationID := parsedLocation.Path["backupOperationResults"] + + if _, err = resourceArmBackupProtectionContainerStorageAccountWaitForOperation(ctx, opClient, vaultName, resGroup, operationID, d); err != nil { + return err + } + + return nil +} + +func resourceArmBackupProtectionContainerStorageAccountWaitForOperation(ctx context.Context, client *backup.OperationStatusesClient, vaultName, resourceGroup, operationID string, d *schema.ResourceData) (backup.OperationStatus, error) { + state := &resource.StateChangeConf{ + MinTimeout: 10 * time.Second, + Delay: 10 * time.Second, + Pending: []string{"InProgress"}, + Target: []string{"Succeeded"}, + Refresh: resourceArmBackupProtectionContainerStorageAccountCheckOperation(ctx, client, vaultName, resourceGroup, operationID), + ContinuousTargetOccurence: 5, // Without this buffer, file share backups and storage account deletions may fail if performed immediately after creating/destroying the container + } + + if features.SupportsCustomTimeouts() { + if d.IsNewResource() { + state.Timeout = d.Timeout(schema.TimeoutCreate) + } else { + state.Timeout = d.Timeout(schema.TimeoutUpdate) + } + } else { + state.Timeout = 30 * time.Minute + } + + log.Printf("[DEBUG] Waiting for backup container operation %q (Vault %q) to complete", operationID, vaultName) + resp, err := state.WaitForState() + if err != nil { + return resp.(backup.OperationStatus), err + } + return resp.(backup.OperationStatus), nil +} + +func resourceArmBackupProtectionContainerStorageAccountCheckOperation(ctx context.Context, client *backup.OperationStatusesClient, vaultName, resourceGroup, operationID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := client.Get(ctx, vaultName, resourceGroup, operationID) + if err != nil { + return resp, "Error", fmt.Errorf("Error making Read request on Recovery Service Protection Container operation %q (Vault %q in Resource Group %q): %+v", operationID, vaultName, resourceGroup, err) + } + + if opErr := resp.Error; opErr != nil { + errMsg := "No upstream error message" + if opErr.Message != nil { + errMsg = *opErr.Message + } + err = fmt.Errorf("Recovery Service Protection Container operation status failed with status %q (Vault %q Resource Group %q Operation ID %q): %+v", resp.Status, vaultName, resourceGroup, operationID, errMsg) + } + + return resp, string(resp.Status), err + } +} diff --git a/azurerm/resource_arm_backup_container_storage_account_test.go b/azurerm/resource_arm_backup_container_storage_account_test.go new file mode 100644 index 000000000000..131de1c988b1 --- /dev/null +++ b/azurerm/resource_arm_backup_container_storage_account_test.go @@ -0,0 +1,129 @@ +package azurerm + +import ( + "fmt" + "net/http" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" +) + +func TestAccAzureRMBackupProtectionContainerStorageAccount_basic(t *testing.T) { + resourceGroupName := "azurerm_resource_group.testrg" + vaultName := "azurerm_recovery_services_vault.testvlt" + storageAccountName := "azurerm_storage_account.testsa" + resourceName := "azurerm_backup_container_storage_account.test" + ri := tf.AccRandTimeInt() + rs := acctest.RandString(4) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMResourceGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMBackupProtectionContainerStorageAccount(ri, rs, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMBackupProtectionContainerStorageAccount(resourceGroupName, vaultName, storageAccountName, resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccAzureRMBackupProtectionContainerStorageAccount(rInt int, rString string, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "testrg" { + name = "acctestRG-backup-%d" + location = "%s" +} + +resource "azurerm_recovery_services_vault" "testvlt" { + name = "acctest-vault-%d" + location = "${azurerm_resource_group.testrg.location}" + resource_group_name = "${azurerm_resource_group.testrg.name}" + sku = "Standard" +} + +resource "azurerm_storage_account" "testsa" { + name = "unlikely23exst2acct%s" + resource_group_name = "${azurerm_resource_group.testrg.name}" + + location = "${azurerm_resource_group.testrg.location}" + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_backup_container_storage_account" "test" { + resource_group_name = "${azurerm_resource_group.testrg.name}" + recovery_vault_name = "${azurerm_recovery_services_vault.testvlt.name}" + storage_account_id = "${azurerm_storage_account.testsa.id}" +} +`, rInt, location, rInt, rString) +} + +func testCheckAzureRMBackupProtectionContainerStorageAccount(resourceGroupStateName, vaultStateName, storageAccountName, resourceStateName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + // Ensure we have enough information in state to look up in API + resourceGroupState, ok := s.RootModule().Resources[resourceGroupStateName] + if !ok { + return fmt.Errorf("Not found: %s", resourceGroupStateName) + } + vaultState, ok := s.RootModule().Resources[vaultStateName] + if !ok { + return fmt.Errorf("Not found: %s", vaultStateName) + } + storageState, ok := s.RootModule().Resources[storageAccountName] + if !ok { + return fmt.Errorf("Not found: %s", storageAccountName) + } + protectionContainerState, ok := s.RootModule().Resources[resourceStateName] + if !ok { + return fmt.Errorf("Not found: %s", resourceStateName) + } + + resourceGroupName := resourceGroupState.Primary.Attributes["name"] + vaultName := vaultState.Primary.Attributes["name"] + storageAccountID := storageState.Primary.Attributes["id"] + resourceStorageID := protectionContainerState.Primary.Attributes["storage_account_id"] + + if storageAccountID != resourceStorageID { + return fmt.Errorf("Bad: Container resource's storage_account_id %q does not match storage account resource's ID %q", storageAccountID, resourceStorageID) + } + + parsedStorageAccountID, err := azure.ParseAzureResourceID(storageAccountID) + if err != nil { + return fmt.Errorf("Bad: Unable to parse storage_account_id '%s': %+v", storageAccountID, err) + } + accountName, hasName := parsedStorageAccountID.Path["storageAccounts"] + if !hasName { + return fmt.Errorf("Bad: Parsed storage_account_id '%s' doesn't contain 'storageAccounts'", storageAccountID) + } + + containerName := fmt.Sprintf("StorageContainer;storage;%s;%s", parsedStorageAccountID.ResourceGroup, accountName) + + // Ensure container exists in API + client := testAccProvider.Meta().(*ArmClient).RecoveryServices.BackupProtectionContainersClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + + resp, err := client.Get(ctx, vaultName, resourceGroupName, "Azure", containerName) + if err != nil { + return fmt.Errorf("Bad: Get on protection container: %+v", err) + } + + if resp.Response.StatusCode == http.StatusNotFound { + return fmt.Errorf("Bad: container: %q does not exist", containerName) + } + + return nil + } +} diff --git a/azurerm/resource_arm_backup_policy_file_share.go b/azurerm/resource_arm_backup_policy_file_share.go new file mode 100644 index 000000000000..b0db576aacfc --- /dev/null +++ b/azurerm/resource_arm_backup_policy_file_share.go @@ -0,0 +1,379 @@ +package azurerm + +import ( + "context" + "fmt" + "log" + "regexp" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2017-07-01/backup" + "github.com/Azure/go-autorest/autorest/date" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmBackupProtectionPolicyFileShare() *schema.Resource { + return &schema.Resource{ + Create: resourceArmBackupProtectionPolicyFileShareCreateUpdate, + Read: resourceArmBackupProtectionPolicyFileShareRead, + Update: resourceArmBackupProtectionPolicyFileShareCreateUpdate, + Delete: resourceArmBackupProtectionPolicyFileShareDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Read: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringMatch( + regexp.MustCompile("^[a-zA-Z][-_!a-zA-Z0-9]{2,149}$"), + "Backup Policy name must be 3 - 150 characters long, start with a letter, contain only letters and numbers.", + ), + }, + + "resource_group_name": azure.SchemaResourceGroupName(), + + "recovery_vault_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: azure.ValidateRecoveryServicesVaultName, + }, + + "timezone": { + Type: schema.TypeString, + Optional: true, + Default: "UTC", + }, + + "backup": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "frequency": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: suppress.CaseDifference, + ValidateFunc: validation.StringInSlice([]string{ + string(backup.ScheduleRunTypeDaily), + }, false), + }, + + "time": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringMatch( + regexp.MustCompile("^([01][0-9]|[2][0-3]):([03][0])$"), //time must be on the hour or half past + "Time of day must match the format HH:mm where HH is 00-23 and mm is 00 or 30", + ), + }, + }, + }, + }, + + "retention_daily": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 180), + }, + }, + }, + }, + }, + } +} + +func resourceArmBackupProtectionPolicyFileShareCreateUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).RecoveryServices.ProtectionPoliciesClient + ctx, cancel := timeouts.ForCreateUpdate(meta.(*ArmClient).StopContext, d) + defer cancel() + + policyName := d.Get("name").(string) + resourceGroup := d.Get("resource_group_name").(string) + vaultName := d.Get("recovery_vault_name").(string) + + log.Printf("[DEBUG] Creating/updating Recovery Service Protection Policy %s (resource group %q)", policyName, resourceGroup) + + //getting this ready now because its shared between *everything*, time is... complicated for this resource + timeOfDay := d.Get("backup.0.time").(string) + dateOfDay, err := time.Parse(time.RFC3339, fmt.Sprintf("2018-07-30T%s:00Z", timeOfDay)) + if err != nil { + return fmt.Errorf("Error generating time from %q for policy %q (Resource Group %q): %+v", timeOfDay, policyName, resourceGroup, err) + } + times := append(make([]date.Time, 0), date.Time{Time: dateOfDay}) + + if features.ShouldResourcesBeImported() && d.IsNewResource() { + existing, err2 := client.Get(ctx, vaultName, resourceGroup, policyName) + if err2 != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Recovery Service Protection Policy %q (Resource Group %q): %+v", policyName, resourceGroup, err2) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_backup_policy_file_share", *existing.ID) + } + } + + policy := backup.ProtectionPolicyResource{ + Properties: &backup.AzureFileShareProtectionPolicy{ + TimeZone: utils.String(d.Get("timezone").(string)), + BackupManagementType: backup.BackupManagementTypeAzureStorage, + WorkLoadType: backup.WorkloadTypeAzureFileShare, + SchedulePolicy: expandArmBackupProtectionPolicyFileShareSchedule(d, times), + RetentionPolicy: &backup.LongTermRetentionPolicy{ //SimpleRetentionPolicy only has duration property ¯\_(ツ)_/¯ + RetentionPolicyType: backup.RetentionPolicyTypeLongTermRetentionPolicy, + DailySchedule: expandArmBackupProtectionPolicyFileShareRetentionDaily(d, times), + }, + }, + } + if _, err = client.CreateOrUpdate(ctx, vaultName, resourceGroup, policyName, policy); err != nil { + return fmt.Errorf("Error creating/updating Recovery Service Protection Policy %q (Resource Group %q): %+v", policyName, resourceGroup, err) + } + + resp, err := resourceArmBackupProtectionPolicyFileShareWaitForUpdate(ctx, client, vaultName, resourceGroup, policyName, d) + if err != nil { + return err + } + + id := strings.Replace(*resp.ID, "Subscriptions", "subscriptions", 1) + d.SetId(id) + + return resourceArmBackupProtectionPolicyFileShareRead(d, meta) +} + +func resourceArmBackupProtectionPolicyFileShareRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).RecoveryServices.ProtectionPoliciesClient + ctx, cancel := timeouts.ForRead(meta.(*ArmClient).StopContext, d) + defer cancel() + + id, err := azure.ParseAzureResourceID(d.Id()) + if err != nil { + return err + } + + policyName := id.Path["backupPolicies"] + vaultName := id.Path["vaults"] + resourceGroup := id.ResourceGroup + + log.Printf("[DEBUG] Reading Recovery Service Protection Policy %q (resource group %q)", policyName, resourceGroup) + + resp, err := client.Get(ctx, vaultName, resourceGroup, policyName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + d.SetId("") + return nil + } + + return fmt.Errorf("Error making Read request on Recovery Service Protection Policy %q (Resource Group %q): %+v", policyName, resourceGroup, err) + } + + d.Set("name", policyName) + d.Set("resource_group_name", resourceGroup) + d.Set("recovery_vault_name", vaultName) + + if properties, ok := resp.Properties.AsAzureFileShareProtectionPolicy(); ok && properties != nil { + d.Set("timezone", properties.TimeZone) + + if schedule, ok := properties.SchedulePolicy.AsSimpleSchedulePolicy(); ok && schedule != nil { + if err := d.Set("backup", flattenArmBackupProtectionPolicyFileShareSchedule(schedule)); err != nil { + return fmt.Errorf("Error setting `backup`: %+v", err) + } + } + + if retention, ok := properties.RetentionPolicy.AsLongTermRetentionPolicy(); ok && retention != nil { + if s := retention.DailySchedule; s != nil { + if err := d.Set("retention_daily", flattenArmBackupProtectionPolicyFileShareRetentionDaily(s)); err != nil { + return fmt.Errorf("Error setting `retention_daily`: %+v", err) + } + } else { + d.Set("retention_daily", nil) + } + } + } + + return nil +} + +func resourceArmBackupProtectionPolicyFileShareDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).RecoveryServices.ProtectionPoliciesClient + ctx, cancel := timeouts.ForDelete(meta.(*ArmClient).StopContext, d) + defer cancel() + + id, err := azure.ParseAzureResourceID(d.Id()) + if err != nil { + return err + } + + policyName := id.Path["backupPolicies"] + resourceGroup := id.ResourceGroup + vaultName := id.Path["vaults"] + + log.Printf("[DEBUG] Deleting Recovery Service Protection Policy %q (resource group %q)", policyName, resourceGroup) + + resp, err := client.Delete(ctx, vaultName, resourceGroup, policyName) + if err != nil { + if !utils.ResponseWasNotFound(resp) { + return fmt.Errorf("Error issuing delete request for Recovery Service Protection Policy %q (Resource Group %q): %+v", policyName, resourceGroup, err) + } + } + + if _, err := resourceArmBackupProtectionPolicyFileShareWaitForDeletion(ctx, client, vaultName, resourceGroup, policyName, d); err != nil { + return err + } + + return nil +} + +func expandArmBackupProtectionPolicyFileShareSchedule(d *schema.ResourceData, times []date.Time) *backup.SimpleSchedulePolicy { + if bb, ok := d.Get("backup").([]interface{}); ok && len(bb) > 0 { + block := bb[0].(map[string]interface{}) + + schedule := backup.SimpleSchedulePolicy{ //LongTermSchedulePolicy has no properties + SchedulePolicyType: backup.SchedulePolicyTypeSimpleSchedulePolicy, + ScheduleRunTimes: ×, + } + + if v, ok := block["frequency"].(string); ok { + schedule.ScheduleRunFrequency = backup.ScheduleRunType(v) + } + + return &schedule + } + + return nil +} + +func expandArmBackupProtectionPolicyFileShareRetentionDaily(d *schema.ResourceData, times []date.Time) *backup.DailyRetentionSchedule { + if rb, ok := d.Get("retention_daily").([]interface{}); ok && len(rb) > 0 { + block := rb[0].(map[string]interface{}) + + return &backup.DailyRetentionSchedule{ + RetentionTimes: ×, + RetentionDuration: &backup.RetentionDuration{ + Count: utils.Int32(int32(block["count"].(int))), + DurationType: backup.RetentionDurationTypeDays, + }, + } + } + + return nil +} + +func flattenArmBackupProtectionPolicyFileShareSchedule(schedule *backup.SimpleSchedulePolicy) []interface{} { + block := map[string]interface{}{} + + block["frequency"] = string(schedule.ScheduleRunFrequency) + + if times := schedule.ScheduleRunTimes; times != nil && len(*times) > 0 { + block["time"] = (*times)[0].Format("15:04") + } + + return []interface{}{block} +} + +func flattenArmBackupProtectionPolicyFileShareRetentionDaily(daily *backup.DailyRetentionSchedule) []interface{} { + block := map[string]interface{}{} + + if duration := daily.RetentionDuration; duration != nil { + if v := duration.Count; v != nil { + block["count"] = *v + } + } + + return []interface{}{block} +} + +func resourceArmBackupProtectionPolicyFileShareWaitForUpdate(ctx context.Context, client *backup.ProtectionPoliciesClient, vaultName, resourceGroup, policyName string, d *schema.ResourceData) (backup.ProtectionPolicyResource, error) { + state := &resource.StateChangeConf{ + MinTimeout: 30 * time.Second, + Delay: 10 * time.Second, + Pending: []string{"NotFound"}, + Target: []string{"Found"}, + Refresh: resourceArmBackupProtectionPolicyFileShareRefreshFunc(ctx, client, vaultName, resourceGroup, policyName), + } + + if features.SupportsCustomTimeouts() { + if d.IsNewResource() { + state.Timeout = d.Timeout(schema.TimeoutCreate) + } else { + state.Timeout = d.Timeout(schema.TimeoutUpdate) + } + } else { + state.Timeout = 30 * time.Minute + } + + resp, err := state.WaitForState() + if err != nil { + return resp.(backup.ProtectionPolicyResource), fmt.Errorf("Error waiting for the Recovery Service Protection Policy %q to update (Resource Group %q): %+v", policyName, resourceGroup, err) + } + + return resp.(backup.ProtectionPolicyResource), nil +} + +func resourceArmBackupProtectionPolicyFileShareWaitForDeletion(ctx context.Context, client *backup.ProtectionPoliciesClient, vaultName, resourceGroup, policyName string, d *schema.ResourceData) (backup.ProtectionPolicyResource, error) { + state := &resource.StateChangeConf{ + MinTimeout: 30 * time.Second, + Delay: 10 * time.Second, + Pending: []string{"Found"}, + Target: []string{"NotFound"}, + Refresh: resourceArmBackupProtectionPolicyFileShareRefreshFunc(ctx, client, vaultName, resourceGroup, policyName), + } + + if features.SupportsCustomTimeouts() { + state.Timeout = d.Timeout(schema.TimeoutDelete) + } else { + state.Timeout = 30 * time.Minute + } + + resp, err := state.WaitForState() + if err != nil { + return resp.(backup.ProtectionPolicyResource), fmt.Errorf("Error waiting for the Recovery Service Protection Policy %q to be missing (Resource Group %q): %+v", policyName, resourceGroup, err) + } + + return resp.(backup.ProtectionPolicyResource), nil +} + +func resourceArmBackupProtectionPolicyFileShareRefreshFunc(ctx context.Context, client *backup.ProtectionPoliciesClient, vaultName, resourceGroup, policyName string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := client.Get(ctx, vaultName, resourceGroup, policyName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return resp, "NotFound", nil + } + + return resp, "Error", fmt.Errorf("Error making Read request on Recovery Service Protection Policy %q (Resource Group %q): %+v", policyName, resourceGroup, err) + } + + return resp, "Found", nil + } +} diff --git a/azurerm/resource_arm_backup_policy_file_share_test.go b/azurerm/resource_arm_backup_policy_file_share_test.go new file mode 100644 index 000000000000..a3df455d3a42 --- /dev/null +++ b/azurerm/resource_arm_backup_policy_file_share_test.go @@ -0,0 +1,254 @@ +package azurerm + +import ( + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func TestAccAzureRMBackupProtectionPolicyFileShare_basicDaily(t *testing.T) { + resourceName := "azurerm_backup_policy_file_share.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMBackupProtectionPolicyFileShareDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMBackupProtectionPolicyFileShare_basicDaily(ri, testLocation()), + Check: checkAccAzureRMBackupProtectionPolicyFileShare_basicDaily(resourceName, ri), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMBackupProtectionPolicyFileShare_requiresImport(t *testing.T) { + if !features.ShouldResourcesBeImported() { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_backup_policy_file_share.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMBackupProtectionPolicyFileShareDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMBackupProtectionPolicyFileShare_basicDaily(ri, testLocation()), + Check: checkAccAzureRMBackupProtectionPolicyFileShare_basicDaily(resourceName, ri), + }, + { + Config: testAccAzureRMBackupProtectionPolicyFileShare_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_backup_policy_file_share"), + }, + }, + }) +} + +func TestAccAzureRMBackupProtectionPolicyFileShare_updateDaily(t *testing.T) { + resourceName := "azurerm_backup_policy_file_share.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMBackupProtectionPolicyFileShareDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMBackupProtectionPolicyFileShare_basicDaily(ri, testLocation()), + Check: checkAccAzureRMBackupProtectionPolicyFileShare_basicDaily(resourceName, ri), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAzureRMBackupProtectionPolicyFileShare_updateDaily(ri, testLocation()), + Check: checkAccAzureRMBackupProtectionPolicyFileShare_updateDaily(resourceName, ri), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testCheckAzureRMBackupProtectionPolicyFileShareDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*ArmClient).RecoveryServices.ProtectionPoliciesClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_backup_policy_file_share" { + continue + } + + resourceGroup := rs.Primary.Attributes["resource_group_name"] + vaultName := rs.Primary.Attributes["recovery_vault_name"] + policyName := rs.Primary.Attributes["name"] + + resp, err := client.Get(ctx, vaultName, resourceGroup, policyName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return nil + } + + return err + } + + return fmt.Errorf("Recovery Services Vault Policy still exists:\n%#v", resp) + } + + return nil +} + +func testCheckAzureRMBackupProtectionPolicyFileShareExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + client := testAccProvider.Meta().(*ArmClient).RecoveryServices.ProtectionPoliciesClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + + // Ensure we have enough information in state to look up in API + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %q", resourceName) + } + + vaultName := rs.Primary.Attributes["recovery_vault_name"] + policyName := rs.Primary.Attributes["name"] + resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] + if !hasResourceGroup { + return fmt.Errorf("Bad: no resource group found in state for Recovery Services Vault %q Policy: %q", vaultName, policyName) + } + + resp, err := client.Get(ctx, vaultName, resourceGroup, policyName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("Recovery Services Vault Policy %q (resource group: %q) was not found: %+v", policyName, resourceGroup, err) + } + + return fmt.Errorf("Bad: Get on recoveryServicesVaultsClient: %+v", err) + } + + return nil + } +} + +func testAccAzureRMBackupProtectionPolicyFileShare_base(rInt int, location string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-backup-%[1]d" + location = "%[2]s" +} + +resource "azurerm_recovery_services_vault" "test" { + name = "acctest-RSV-%[1]d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + sku = "Standard" +} +`, rInt, location, strconv.Itoa(rInt)[12:17]) +} + +func testAccAzureRMBackupProtectionPolicyFileShare_basicDaily(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_backup_policy_file_share" "test" { + name = "acctest-PFS-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + recovery_vault_name = "${azurerm_recovery_services_vault.test.name}" + + backup { + frequency = "Daily" + time = "23:00" + } + + retention_daily { + count = 10 + } +} +`, testAccAzureRMBackupProtectionPolicyFileShare_base(rInt, location), rInt) +} + +func testAccAzureRMBackupProtectionPolicyFileShare_updateDaily(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_backup_policy_file_share" "test" { + name = "acctest-PFS-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + recovery_vault_name = "${azurerm_recovery_services_vault.test.name}" + + backup { + frequency = "Daily" + time = "23:30" + } + + retention_daily { + count = 180 + } +} +`, testAccAzureRMBackupProtectionPolicyFileShare_base(rInt, location), rInt) +} + +func testAccAzureRMBackupProtectionPolicyFileShare_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_backup_policy_file_share" "import" { + name = "${azurerm_backup_policy_file_share.test.name}" + resource_group_name = "${azurerm_backup_policy_file_share.test.resource_group_name}" + recovery_vault_name = "${azurerm_backup_policy_file_share.test.recovery_vault_name}" + + backup { + frequency = "Daily" + time = "23:00" + } + + retention_daily { + count = 10 + } +} +`, testAccAzureRMBackupProtectionPolicyFileShare_basicDaily(rInt, location)) +} + +func checkAccAzureRMBackupProtectionPolicyFileShare_basicDaily(resourceName string, ri int) resource.TestCheckFunc { + return resource.ComposeAggregateTestCheckFunc( + testCheckAzureRMBackupProtectionPolicyFileShareExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", fmt.Sprintf("acctest-PFS-%d", ri)), + resource.TestCheckResourceAttr(resourceName, "resource_group_name", fmt.Sprintf("acctestRG-backup-%d", ri)), + resource.TestCheckResourceAttr(resourceName, "recovery_vault_name", fmt.Sprintf("acctest-RSV-%d", ri)), + resource.TestCheckResourceAttr(resourceName, "backup.0.frequency", "Daily"), + resource.TestCheckResourceAttr(resourceName, "backup.0.time", "23:00"), + resource.TestCheckResourceAttr(resourceName, "retention_daily.0.count", "10"), + ) +} + +func checkAccAzureRMBackupProtectionPolicyFileShare_updateDaily(resourceName string, ri int) resource.TestCheckFunc { + return resource.ComposeAggregateTestCheckFunc( + testCheckAzureRMBackupProtectionPolicyFileShareExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", fmt.Sprintf("acctest-PFS-%d", ri)), + resource.TestCheckResourceAttr(resourceName, "resource_group_name", fmt.Sprintf("acctestRG-backup-%d", ri)), + resource.TestCheckResourceAttr(resourceName, "recovery_vault_name", fmt.Sprintf("acctest-RSV-%d", ri)), + resource.TestCheckResourceAttr(resourceName, "backup.0.frequency", "Daily"), + resource.TestCheckResourceAttr(resourceName, "backup.0.time", "23:30"), + resource.TestCheckResourceAttr(resourceName, "retention_daily.0.count", "180"), + ) +} diff --git a/azurerm/resource_arm_backup_protected_file_share.go b/azurerm/resource_arm_backup_protected_file_share.go new file mode 100644 index 000000000000..76dc146740a2 --- /dev/null +++ b/azurerm/resource_arm_backup_protected_file_share.go @@ -0,0 +1,292 @@ +package azurerm + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/services/recoveryservices/mgmt/2017-07-01/backup" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmBackupProtectedFileShare() *schema.Resource { + return &schema.Resource{ + Create: resourceArmBackupProtectedFileShareCreateUpdate, + Read: resourceArmBackupProtectedFileShareRead, + Update: resourceArmBackupProtectedFileShareCreateUpdate, + Delete: resourceArmBackupProtectedFileShareDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(80 * time.Minute), + Read: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(80 * time.Minute), + Delete: schema.DefaultTimeout(80 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + "resource_group_name": azure.SchemaResourceGroupName(), + + "recovery_vault_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: azure.ValidateRecoveryServicesVaultName, + }, + + "source_storage_account_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + }, + + "source_file_share_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArmStorageShareName, + }, + + "backup_policy_id": { + Type: schema.TypeString, + Required: true, + ValidateFunc: azure.ValidateResourceID, + }, + }, + } +} + +func resourceArmBackupProtectedFileShareCreateUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).RecoveryServices.ProtectedItemsClient + opClient := meta.(*ArmClient).RecoveryServices.BackupOperationStatusesClient + ctx, cancel := timeouts.ForCreateUpdate(meta.(*ArmClient).StopContext, d) + defer cancel() + + resourceGroup := d.Get("resource_group_name").(string) + + vaultName := d.Get("recovery_vault_name").(string) + storageAccountID := d.Get("source_storage_account_id").(string) + fileShareName := d.Get("source_file_share_name").(string) + policyID := d.Get("backup_policy_id").(string) + + //get storage account name from id + parsedStorageAccountID, err := azure.ParseAzureResourceID(storageAccountID) + if err != nil { + return fmt.Errorf("[ERROR] Unable to parse source_storage_account_id '%s': %+v", storageAccountID, err) + } + accountName, hasName := parsedStorageAccountID.Path["storageAccounts"] + if !hasName { + return fmt.Errorf("[ERROR] parsed source_storage_account_id '%s' doesn't contain 'storageAccounts'", storageAccountID) + } + + protectedItemName := fmt.Sprintf("AzureFileShare;%s", fileShareName) + containerName := fmt.Sprintf("StorageContainer;storage;%s;%s", parsedStorageAccountID.ResourceGroup, accountName) + + log.Printf("[DEBUG] Creating/updating Recovery Service Protected File Share %q (Container Name %q)", protectedItemName, containerName) + + if features.ShouldResourcesBeImported() && d.IsNewResource() { + existing, err2 := client.Get(ctx, vaultName, resourceGroup, "Azure", containerName, protectedItemName, "") + if err2 != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Recovery Service Protected File Share %q (Resource Group %q): %+v", protectedItemName, resourceGroup, err2) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_backup_protected_file_share", *existing.ID) + } + } + + item := backup.ProtectedItemResource{ + Properties: &backup.AzureFileshareProtectedItem{ + PolicyID: &policyID, + ProtectedItemType: backup.ProtectedItemTypeAzureFileShareProtectedItem, + WorkloadType: backup.DataSourceTypeAzureFileShare, + SourceResourceID: utils.String(storageAccountID), + FriendlyName: utils.String(fileShareName), + }, + } + + resp, err := client.CreateOrUpdate(ctx, vaultName, resourceGroup, "Azure", containerName, protectedItemName, item) + if err != nil { + return fmt.Errorf("Error creating/updating Recovery Service Protected File Share %q (Resource Group %q): %+v", protectedItemName, resourceGroup, err) + } + + locationURL, err := resp.Response.Location() + if err != nil || locationURL == nil { + return fmt.Errorf("Error creating/updating Azure File Share backup item %q (Vault %q): Location header missing or empty", containerName, vaultName) + } + + opResourceID := azure.HandleAzureSdkForGoBug2824(locationURL.Path) + + parsedLocation, err := azure.ParseAzureResourceID(opResourceID) + if err != nil { + return err + } + operationID := parsedLocation.Path["operationResults"] + + if _, err := resourceArmBackupProtectedFileShareWaitForOperation(ctx, opClient, vaultName, resourceGroup, operationID, d); err != nil { + return err + } + + resp, err = client.Get(ctx, vaultName, resourceGroup, "Azure", containerName, protectedItemName, "") + + if err != nil { + return fmt.Errorf("Error creating/udpating Azure File Share backup item %q (Vault %q): %+v", protectedItemName, vaultName, err) + } + + id := strings.Replace(*resp.ID, "Subscriptions", "subscriptions", 1) // This code is a workaround for this bug https://github.com/Azure/azure-sdk-for-go/issues/2824 + d.SetId(id) + + return resourceArmBackupProtectedFileShareRead(d, meta) +} + +func resourceArmBackupProtectedFileShareRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).RecoveryServices.ProtectedItemsClient + ctx, cancel := timeouts.ForRead(meta.(*ArmClient).StopContext, d) + defer cancel() + + id, err := azure.ParseAzureResourceID(d.Id()) + if err != nil { + return err + } + + protectedItemName := id.Path["protectedItems"] + vaultName := id.Path["vaults"] + resourceGroup := id.ResourceGroup + containerName := id.Path["protectionContainers"] + + log.Printf("[DEBUG] Reading Recovery Service Protected File Share %q (resource group %q)", protectedItemName, resourceGroup) + + resp, err := client.Get(ctx, vaultName, resourceGroup, "Azure", containerName, protectedItemName, "") + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + d.SetId("") + return nil + } + + return fmt.Errorf("Error making Read request on Recovery Service Protected File Share %q (Vault %q Resource Group %q): %+v", protectedItemName, vaultName, resourceGroup, err) + } + + d.Set("resource_group_name", resourceGroup) + d.Set("recovery_vault_name", vaultName) + + if properties := resp.Properties; properties != nil { + if item, ok := properties.AsAzureFileshareProtectedItem(); ok { + sourceResourceID := strings.Replace(*item.SourceResourceID, "Microsoft.storage", "Microsoft.Storage", 1) // The SDK is returning inconsistent capitalization + d.Set("source_storage_account_id", sourceResourceID) + d.Set("source_file_share_name", item.FriendlyName) + + if v := item.PolicyID; v != nil { + d.Set("backup_policy_id", strings.Replace(*v, "Subscriptions", "subscriptions", 1)) + } + } + } + + return nil +} + +func resourceArmBackupProtectedFileShareDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).RecoveryServices.ProtectedItemsClient + opClient := meta.(*ArmClient).RecoveryServices.BackupOperationStatusesClient + ctx, cancel := timeouts.ForDelete(meta.(*ArmClient).StopContext, d) + defer cancel() + + id, err := azure.ParseAzureResourceID(d.Id()) + if err != nil { + return err + } + + protectedItemName := id.Path["protectedItems"] + resourceGroup := id.ResourceGroup + vaultName := id.Path["vaults"] + containerName := id.Path["protectionContainers"] + + log.Printf("[DEBUG] Deleting Recovery Service Protected Item %q (resource group %q)", protectedItemName, resourceGroup) + + resp, err := client.Delete(ctx, vaultName, resourceGroup, "Azure", containerName, protectedItemName) + if err != nil { + if !utils.ResponseWasNotFound(resp) { + return fmt.Errorf("Error issuing delete request for Recovery Service Protected File Share %q (Resource Group %q): %+v", protectedItemName, resourceGroup, err) + } + } + + locationURL, err := resp.Response.Location() + if err != nil || locationURL == nil { + return fmt.Errorf("Error deleting Azure File Share backups item %s (Vault %s): Location header missing or empty", containerName, vaultName) + } + + opResourceID := azure.HandleAzureSdkForGoBug2824(locationURL.Path) + + parsedLocation, err := azure.ParseAzureResourceID(opResourceID) + if err != nil { + return err + } + operationID := parsedLocation.Path["backupOperationResults"] // This is different for create and delete requests ¯\_(ツ)_/¯ + + if _, err := resourceArmBackupProtectedFileShareWaitForOperation(ctx, opClient, vaultName, resourceGroup, operationID, d); err != nil { + return err + } + + return nil +} + +func resourceArmBackupProtectedFileShareWaitForOperation(ctx context.Context, client *backup.OperationStatusesClient, vaultName, resourceGroup, operationID string, d *schema.ResourceData) (backup.OperationStatus, error) { + state := &resource.StateChangeConf{ + MinTimeout: 10 * time.Second, + Delay: 10 * time.Second, + Pending: []string{"InProgress"}, + Target: []string{"Succeeded"}, + Refresh: resourceArmBackupProtectedFileShareCheckOperation(ctx, client, vaultName, resourceGroup, operationID), + } + + if features.SupportsCustomTimeouts() { + if d.IsNewResource() { + state.Timeout = d.Timeout(schema.TimeoutCreate) + } else { + state.Timeout = d.Timeout(schema.TimeoutUpdate) + } + } else { + state.Timeout = 30 * time.Minute + } + + log.Printf("[DEBUG] Waiting for backup operation %s (Vault %s) to complete", operationID, vaultName) + resp, err := state.WaitForState() + if err != nil { + return resp.(backup.OperationStatus), err + } + return resp.(backup.OperationStatus), nil +} + +func resourceArmBackupProtectedFileShareCheckOperation(ctx context.Context, client *backup.OperationStatusesClient, vaultName, resourceGroup, operationID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := client.Get(ctx, vaultName, resourceGroup, operationID) + if err != nil { + return resp, "Error", fmt.Errorf("Error making Read request on Recovery Service Protection Container operation %q (Vault %q in Resource Group %q): %+v", operationID, vaultName, resourceGroup, err) + } + + if opErr := resp.Error; opErr != nil { + errMsg := "No upstream error message" + if opErr.Message != nil { + errMsg = *opErr.Message + } + err = fmt.Errorf("Azure Backup operation status failed with status %q (Vault %q Resource Group %q Operation ID %q): %+v", resp.Status, vaultName, resourceGroup, operationID, errMsg) + } + + log.Printf("[DEBUG] Backup operation %s status is %s", operationID, string(resp.Status)) + return resp, string(resp.Status), err + } +} diff --git a/azurerm/resource_arm_backup_protected_file_share_test.go b/azurerm/resource_arm_backup_protected_file_share_test.go new file mode 100644 index 000000000000..6068dd08db18 --- /dev/null +++ b/azurerm/resource_arm_backup_protected_file_share_test.go @@ -0,0 +1,315 @@ +package azurerm + +import ( + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +// TODO: These tests fail because enabling backup on file shares with no content +func TestAccAzureRMBackupProtectedFileShare_basic(t *testing.T) { + resourceName := "azurerm_backup_protected_file_share.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMBackupProtectedFileShareDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMBackupProtectedFileShare_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMBackupProtectedFileShareExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "resource_group_name"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { //vault cannot be deleted unless we unregister all backups + Config: testAccAzureRMBackupProtectedFileShare_base(ri, testLocation()), + Check: resource.ComposeTestCheckFunc(), + }, + }, + }) +} + +func TestAccAzureRMBackupProtectedFileShare_requiresImport(t *testing.T) { + if !features.ShouldResourcesBeImported() { + t.Skip("Skipping since resources aren't required to be imported") + return + } + + resourceName := "azurerm_backup_protected_file_share.test" + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMBackupProtectedFileShareDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMBackupProtectedFileShare_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMBackupProtectedFileShareExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "resource_group_name"), + ), + }, + { + Config: testAccAzureRMBackupProtectedFileShare_requiresImport(ri, testLocation()), + ExpectError: testRequiresImportError("azurerm_backup_protected_file_share"), + }, + { //vault cannot be deleted unless we unregister all backups + Config: testAccAzureRMBackupProtectedFileShare_base(ri, testLocation()), + Check: resource.ComposeTestCheckFunc(), + }, + }, + }) +} + +func TestAccAzureRMBackupProtectedFileShare_updateBackupPolicyId(t *testing.T) { + protectedFileShareResourceName := "azurerm_backup_protected_file_share.test" + fBackupPolicyResourceName := "azurerm_backup_policy_file_share.test1" + sBackupPolicyResourceName := "azurerm_backup_policy_file_share.test2" + + ri := tf.AccRandTimeInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMBackupProtectedFileShareDestroy, + Steps: []resource.TestStep{ + { // Create resources and link first backup policy id + Config: testAccAzureRMBackupProtectedFileShare_basic(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(protectedFileShareResourceName, "backup_policy_id", fBackupPolicyResourceName, "id"), + ), + }, + { // Modify backup policy id to the second one + // Set Destroy false to prevent error from cleaning up dangling resource + Config: testAccAzureRMBackupProtectedFileShare_updatePolicy(ri, testLocation()), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(protectedFileShareResourceName, "backup_policy_id", sBackupPolicyResourceName, "id"), + ), + }, + { // Remove protected items first before the associated policies are deleted + Config: testAccAzureRMBackupProtectedFileShare_base(ri, testLocation()), + Check: resource.ComposeTestCheckFunc(), + }, + }, + }) +} + +func testCheckAzureRMBackupProtectedFileShareDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_backup_protected_file_share" { + continue + } + + resourceGroup := rs.Primary.Attributes["resource_group_name"] + vaultName := rs.Primary.Attributes["recovery_vault_name"] + storageID := rs.Primary.Attributes["source_storage_account_id"] + fileShareName := rs.Primary.Attributes["source_file_share_name"] + + parsedStorageID, err := azure.ParseAzureResourceID(storageID) + if err != nil { + return fmt.Errorf("[ERROR] Unable to parse source_storage_account_id '%s': %+v", storageID, err) + } + accountName, hasName := parsedStorageID.Path["storageAccounts"] + if !hasName { + return fmt.Errorf("[ERROR] parsed source_storage_account_id '%s' doesn't contain 'storageAccounts'", storageID) + } + + protectedItemName := fmt.Sprintf("AzureFileShare;%s", fileShareName) + containerName := fmt.Sprintf("StorageContainer;storage;%s;%s", parsedStorageID.ResourceGroup, accountName) + + client := testAccProvider.Meta().(*ArmClient).RecoveryServices.ProtectedItemsClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + + resp, err := client.Get(ctx, vaultName, resourceGroup, "Azure", containerName, protectedItemName, "") + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return nil + } + + return err + } + + return fmt.Errorf("Azure Backup Protected File Share still exists:\n%#v", resp) + } + + return nil +} + +func testCheckAzureRMBackupProtectedFileShareExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + // Ensure we have enough information in state to look up in API + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %q", resourceName) + } + + resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] + if !hasResourceGroup { + return fmt.Errorf("Bad: no resource group found in state for Azure Backup Protected File Share: %q", resourceName) + } + + vaultName := rs.Primary.Attributes["recovery_vault_name"] + storageID := rs.Primary.Attributes["source_storage_account_id"] + fileShareName := rs.Primary.Attributes["source_file_share_name"] + + parsedStorageID, err := azure.ParseAzureResourceID(storageID) + if err != nil { + return fmt.Errorf("[ERROR] Unable to parse source_storage_account_id '%s': %+v", storageID, err) + } + accountName, hasName := parsedStorageID.Path["storageAccounts"] + if !hasName { + return fmt.Errorf("[ERROR] parsed source_storage_account_id '%s' doesn't contain 'storageAccounts'", storageID) + } + + protectedItemName := fmt.Sprintf("AzureFileShare;%s", fileShareName) + containerName := fmt.Sprintf("StorageContainer;storage;%s;%s", parsedStorageID.ResourceGroup, accountName) + + client := testAccProvider.Meta().(*ArmClient).RecoveryServices.ProtectedItemsClient + ctx := testAccProvider.Meta().(*ArmClient).StopContext + + resp, err := client.Get(ctx, vaultName, resourceGroup, "Azure", containerName, protectedItemName, "") + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("Azure Backup Protected File Share %q (resource group: %q) was not found: %+v", protectedItemName, resourceGroup, err) + } + + return fmt.Errorf("Bad: Get on recoveryServicesVaultsClient: %+v", err) + } + + return nil + } +} + +func testAccAzureRMBackupProtectedFileShare_base(rInt int, location string) string { + rstr := strconv.Itoa(rInt) + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-backup-%[1]d" + location = "%[2]s" +} + +resource "azurerm_storage_account" "test" { + name = "acctest%[3]s" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_storage_share" "test" { + name = "acctest-ss-%[1]d" + storage_account_name = "${azurerm_storage_account.test.name}" + metadata = {} + + lifecycle { + ignore_changes = [metadata] // Ignore changes Azure Backup makes to the metadata + } +} + +resource "azurerm_recovery_services_vault" "test" { + name = "acctest-VAULT-%[1]d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + sku = "Standard" +} + +resource "azurerm_backup_policy_file_share" "test1" { + name = "acctest-PFS-%[1]d" + resource_group_name = "${azurerm_resource_group.test.name}" + recovery_vault_name = "${azurerm_recovery_services_vault.test.name}" + + backup { + frequency = "Daily" + time = "23:00" + } + + retention_daily { + count = 10 + } +} +`, rInt, location, rstr[len(rstr)-5:]) +} + +func testAccAzureRMBackupProtectedFileShare_basic(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_backup_container_storage_account" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + recovery_vault_name = "${azurerm_recovery_services_vault.test.name}" + storage_account_id = "${azurerm_storage_account.test.id}" +} + +resource "azurerm_backup_protected_file_share" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + recovery_vault_name = "${azurerm_recovery_services_vault.test.name}" + source_storage_account_id = "${azurerm_backup_container_storage_account.test.storage_account_id}" + source_file_share_name = "${azurerm_storage_share.test.name}" + backup_policy_id = "${azurerm_backup_policy_file_share.test1.id}" +} +`, testAccAzureRMBackupProtectedFileShare_base(rInt, location)) +} + +func testAccAzureRMBackupProtectedFileShare_updatePolicy(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_backup_policy_file_share" "test2" { + name = "acctest-%d-Secondary" + resource_group_name = "${azurerm_resource_group.test.name}" + recovery_vault_name = "${azurerm_recovery_services_vault.test.name}" + + backup { + frequency = "Daily" + time = "23:00" + } + + retention_daily { + count = 10 + } +} + +resource "azurerm_backup_container_storage_account" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + recovery_vault_name = "${azurerm_recovery_services_vault.test.name}" + storage_account_id = "${azurerm_storage_account.test.id}" +} + +resource "azurerm_backup_protected_file_share" "test" { + resource_group_name = "${azurerm_resource_group.test.name}" + recovery_vault_name = "${azurerm_recovery_services_vault.test.name}" + source_storage_account_id = "${azurerm_backup_container_storage_account.test.storage_account_id}" + source_file_share_name = "${azurerm_storage_share.test.name}" + backup_policy_id = "${azurerm_backup_policy_file_share.test2.id}" +} +`, testAccAzureRMBackupProtectedFileShare_base(rInt, location), rInt) +} + +func testAccAzureRMBackupProtectedFileShare_requiresImport(rInt int, location string) string { + return fmt.Sprintf(` +%s + +resource "azurerm_backup_protected_file_share" "test_import" { + resource_group_name = "${azurerm_resource_group.test.name}" + recovery_vault_name = "${azurerm_recovery_services_vault.test.name}" + source_storage_account_id = "${azurerm_storage_account.test.id}" + source_file_share_name = "${azurerm_storage_share.test.name}" + backup_policy_id = "${azurerm_backup_policy_file_share.test1.id}" +} +`, testAccAzureRMBackupProtectedFileShare_basic(rInt, location)) +} diff --git a/website/docs/r/backup_container_storage_account.html.markdown b/website/docs/r/backup_container_storage_account.html.markdown new file mode 100644 index 000000000000..ad80a5e10ef0 --- /dev/null +++ b/website/docs/r/backup_container_storage_account.html.markdown @@ -0,0 +1,72 @@ +--- +subcategory: "Recovery Services" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_backup_container_storage_account" +sidebar_current: "docs-azurerm-backup-container-storage-account" +description: |- + Manages a storage account container in an Azure Recovery Vault +--- + +# azurerm_backup_container_storage_account + +Manages registration of a storage account with Azure Backup. Storage accounts must be registered with an Azure Recovery Vault in order to backup file shares within the storage account. Registering a storage account with a vault creates what is known as a protection container within Azure Recovery Services. Once the container is created, Azure file shares within the storage account can be backed up using the `azurerm_backup_protected_file_share` resource. + +-> **NOTE:** Azure Backup for Azure File Shares is currently in public preview. During the preview, the service is subject to additional limitations and unsupported backup scenarios. [Read More](https://docs.microsoft.com/en-us/azure/backup/backup-azure-files#limitations-for-azure-file-share-backup-during-preview) + +## Example Usage + +```hcl +resource "azurerm_resource_group" "rg" { + name = "tfex-network-mapping-primary" + location = "West US" +} + +resource "azurerm_recovery_services_vault" "vault" { + name = "example-recovery-vault" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + sku = "Standard" +} + +resource "azurerm_storage_account" "sa" { + name = "examplesa" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_backup_container_storage_account" "container" { + resource_group_name = "${azurerm_resource_group.rg.name}" + recovery_vault_name = "${azurerm_recovery_services_vault.vault.name}" + storage_account_id = "${azurerm_storage_account.sa.id}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `resource_group_name` - (Required) Name of the resource group where the vault is located. + +* `recovery_vault_name` - (Required) The name of the vault where the storage account will be registered. + +* `storage_account_id` - (Required) Azure Resource ID of the storage account to be registered + +-> **NOTE** Azure Backup places a Resource Lock on the storage account that will cause deletion to fail until the account is unregistered from Azure Backup + +## Attributes Reference + +In addition to the arguments above, the following attributes are exported: + +* `id` - The resource ID. + +## Import + +Azure Backup Storage Account Containers can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_backup_container_storage_account.mycontainer "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resource-group-name/providers/Microsoft.RecoveryServices/vaults/recovery-vault-name/backupFabrics/Azure/protectionContainers/StorageContainer;storage;storage-rg-name;storage-account" +``` + +Note the ID requires quoting as there are semicolons diff --git a/website/docs/r/backup_policy_file_share.markdown b/website/docs/r/backup_policy_file_share.markdown new file mode 100644 index 000000000000..58ceb1236a94 --- /dev/null +++ b/website/docs/r/backup_policy_file_share.markdown @@ -0,0 +1,95 @@ +--- +subcategory: "Recovery Services" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_backup_policy_file_share" +sidebar_current: "docs-azurerm-backup-policy-file-share" +description: |- + Manages an Azure File Share Backup Policy. +--- + +# azurerm_backup_policy_file_share + +Manages an Azure File Share Backup Policy within a Recovery Services vault. + +-> **NOTE:** Azure Backup for Azure File Shares is currently in public preview. During the preview, the service is subject to additional limitations and unsupported backup scenarios. [Read More](https://docs.microsoft.com/en-us/azure/backup/backup-azure-files#limitations-for-azure-file-share-backup-during-preview) + +## Example Usage + +```hcl +resource "azurerm_resource_group" "rg" { + name = "tfex-recovery_vault" + location = "West US" +} + +resource "azurerm_recovery_services_vault" "vault" { + name = "tfex-recovery-vault" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + sku = "Standard" +} + +resource "azurerm_backup_policy_file_share" "policy" { + name = "tfex-recovery-vault-policy" + resource_group_name = "${azurerm_resource_group.rg.name}" + recovery_vault_name = "${azurerm_recovery_services_vault.vault.name}" + + timezone = "UTC" + + backup { + frequency = "Daily" + time = "23:00" + } + + retention_daily { + count = 10 + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) Specifies the name of the policy. Changing this forces a new resource to be created. + +* `resource_group_name` - (Required) The name of the resource group in which to create the policy. Changing this forces a new resource to be created. + +* `recovery_vault_name` - (Required) Specifies the name of the Recovery Services Vault to use. Changing this forces a new resource to be created. + +* `backup` - (Required) Configures the Policy backup frequency and times as documented in the `backup` block below. + +* `timezone` - (Optional) Specifies the timezone. Defaults to `UTC` + +* `retention_daily` - (Required) Configures the policy daily retention as documented in the `retention_daily` block below. + +-> **NOTE:** During the public preview, only daily retentions are supported. This argument is made available in this format for consistency with VM backup policies and to allow for potential future support of additional retention policies + +--- + +The `backup` block supports: + +* `frequency` - (Required) Sets the backup frequency. Currently, only `Daily` is supported + +-> **NOTE:** During the public preview, only daily backups are supported. This argument is made available for consistency with VM backup policies and to allow for potential future support of weekly backups + +* `times` - (Required) The time of day to perform the backup in 24-hour format. Times must be either on the hour or half hour (e.g. 12:00, 12:30, 13:00, etc.) + +--- + +The `retention_daily` block supports: + +* `count` - (Required) The number of daily backups to keep. Must be between `1` and `180` (inclusive) + +## Attributes Reference + +The following attributes are exported: + +* `id` - The ID of the Azure File Share Backup Policy. + +## Import + +Azure File Share Backup Policies can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_backup_policy_file_share.policy1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.RecoveryServices/vaults/example-recovery-vault/backupPolicies/policy1 +``` diff --git a/website/docs/r/backup_protected_file_share.markdown b/website/docs/r/backup_protected_file_share.markdown new file mode 100644 index 000000000000..138182eb6312 --- /dev/null +++ b/website/docs/r/backup_protected_file_share.markdown @@ -0,0 +1,102 @@ +--- +subcategory: "Recovery Services" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_backup_protected_file_share" +sidebar_current: "docs-azurerm-backup-protected-vm" +description: |- + Manages an Azure Backup Protected File Share. +--- + +# azurerm_backup_protected_file_share + +Manages an Azure Backup Protected File Share to enable backups for file shares within an Azure Storage Account + +-> **NOTE:** Azure Backup for Azure File Shares is currently in public preview. During the preview, the service is subject to additional limitations and unsupported backup scenarios. [Read More](https://docs.microsoft.com/en-us/azure/backup/backup-azure-files#limitations-for-azure-file-share-backup-during-preview) + +-> **NOTE** Azure Backup for Azure File Shares does not support Soft Delete at this time. Deleting this resource will also delete all associated backup data. Please exercise caution. Consider using [`prevent_destroy`](https://www.terraform.io/docs/configuration/resources.html#prevent_destroy) to guard against accidental deletion. + +## Example Usage + +```hcl +resource "azurerm_resource_group" "rg" { + name = "tfex-recovery_vault" + location = "West US" +} + +resource "azurerm_recovery_services_vault" "vault" { + name = "tfex-recovery-vault" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + sku = "Standard" +} + +resource "azurerm_storage_account" "sa" { + name = "examplesa" + location = "${azurerm_resource_group.rg.location}" + resource_group_name = "${azurerm_resource_group.rg.name}" + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_storage_share" "example" { + name = "example-share" + storage_account_name = "${azurerm_storage_account.sa.name}" +} + +resource "azurerm_backup_container_storage_account" "protection-container" { + resource_group_name = "${azurerm_resource_group.rg.name}" + recovery_vault_name = "${azurerm_recovery_services_vault.vault.name}" + storage_account_id = "${azurerm_storage_account.sa.id}" +} + +resource "azurerm_backup_policy_file_share" "example" { + name = "tfex-recovery-vault-policy" + resource_group_name = "${azurerm_resource_group.rg.name}" + recovery_vault_name = "${azurerm_recovery_services_vault.vault.name}" + + backup { + frequency = "Daily" + time = "23:00" + } +} + +resource "azurerm_backup_protected_file_share" "share1" { + resource_group_name = "${azurerm_resource_group.rg.name}" + recovery_vault_name = "${azurerm_recovery_services_vault.vault.name}" + source_storage_account_id = "${azurerm_backup_container_storage_account.protection-container.storage_account_id}" + source_file_share_name = "${azurerm_storage_share.example.name}" + backup_policy_id = "${azurerm_backup_policy_file_share.example.id}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `resource_group_name` - (Required) The name of the resource group in which to create the Azure Backup Protected File Share. Changing this forces a new resource to be created. + +* `recovery_vault_name` - (Required) Specifies the name of the Recovery Services Vault to use. Changing this forces a new resource to be created. + +* `source_storage_account_id` - (Required) Specifies the ID of the storage account of the file share to backup. Changing this forces a new resource to be created. + +-> **NOTE** The storage account must already be registered with the recovery vault in order to backup shares within the account. You can use the `azurerm_backup_container_storage_account` resource or the [Register-AzRecoveryServicesBackupContainer PowerShell cmdlet](https://docs.microsoft.com/en-us/powershell/module/az.recoveryservices/register-azrecoveryservicesbackupcontainer?view=azps-3.2.0) to register a storage account with a vault. + +* `source_file_share_name` - (Required) Specifies the name of the file share to backup. Changing this forces a new resource to be created. + +* `backup_policy_id` - (Required) Specifies the ID of the backup policy to use. The policy must be an Azure File Share backup policy. Other types are not supported. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The ID of the Azure Backup protected item. + +## Import + +Azure Backup Protected File Shares can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_backup_protected_file_share.item1 "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.RecoveryServices/vaults/example-recovery-vault/backupFabrics/Azure/protectionContainers/StorageContainer;storage;group2;example-storage-account/protectedItems/AzureFileShare;example-share" +``` + +Note the ID requires quoting as there are semicolons