From 18801c32c4450d820812d352f86ad908c847412e Mon Sep 17 00:00:00 2001 From: Pulumi Bot <30351955+pulumi-bot@users.noreply.github.com> Date: Mon, 28 Aug 2023 15:22:16 -0700 Subject: [PATCH] Upgrade terraform-provider-databricks to v1.24.0 (#179) * make tfgen * make build_sdks --------- Co-authored-by: pulumi-bot --- .../bridge-metadata.json | 55 +- .../pulumi-resource-databricks/schema.json | 487 +++++++++++++--- provider/go.mod | 46 +- provider/go.sum | 88 +-- sdk/dotnet/AccessControlRuleSet.cs | 257 ++++++++- sdk/dotnet/Catalog.cs | 18 + sdk/dotnet/Connection.cs | 282 ++++++++++ sdk/dotnet/ExternalLocation.cs | 66 ++- sdk/dotnet/GetCurrentUser.cs | 5 + sdk/dotnet/GetGroup.cs | 19 + sdk/dotnet/GetServicePrincipal.cs | 19 + sdk/dotnet/GetUser.cs | 7 + sdk/dotnet/Grants.cs | 9 + .../AccessControlRuleSetGrantRuleArgs.cs | 3 +- .../AccessControlRuleSetGrantRuleGetArgs.cs | 3 +- .../ExternalLocationEncryptionDetailsArgs.cs | 23 + ...xternalLocationEncryptionDetailsGetArgs.cs | 23 + ...cryptionDetailsSseEncryptionDetailsArgs.cs | 26 + ...ptionDetailsSseEncryptionDetailsGetArgs.cs | 26 + .../GetJobJobSettingsSettingsRunJobTask.cs | 2 +- ...GetJobJobSettingsSettingsRunJobTaskArgs.cs | 2 +- ...GetJobJobSettingsSettingsTaskRunJobTask.cs | 2 +- ...obJobSettingsSettingsTaskRunJobTaskArgs.cs | 2 +- sdk/dotnet/Inputs/JobRunJobTaskArgs.cs | 2 +- sdk/dotnet/Inputs/JobRunJobTaskGetArgs.cs | 2 +- sdk/dotnet/Inputs/JobTaskRunJobTaskArgs.cs | 2 +- sdk/dotnet/Inputs/JobTaskRunJobTaskGetArgs.cs | 2 +- .../MetastoreDataAccessAwsIamRoleArgs.cs | 2 +- .../MetastoreDataAccessAwsIamRoleGetArgs.cs | 2 +- ...toreDataAccessAzureServicePrincipalArgs.cs | 2 - ...eDataAccessAzureServicePrincipalGetArgs.cs | 2 - ...taAccessDatabricksGcpServiceAccountArgs.cs | 2 + ...ccessDatabricksGcpServiceAccountGetArgs.cs | 2 + ...storeDataAccessGcpServiceAccountKeyArgs.cs | 2 + ...reDataAccessGcpServiceAccountKeyGetArgs.cs | 2 + sdk/dotnet/Inputs/MlflowModelTagArgs.cs | 8 +- sdk/dotnet/Inputs/MlflowModelTagGetArgs.cs | 8 +- .../Inputs/MlflowWebhookHttpUrlSpecArgs.cs | 14 +- .../Inputs/MlflowWebhookHttpUrlSpecGetArgs.cs | 14 +- sdk/dotnet/Inputs/MlflowWebhookJobSpecArgs.cs | 14 +- .../Inputs/MlflowWebhookJobSpecGetArgs.cs | 14 +- .../ModelServingConfigServedModelArgs.cs | 7 + .../ModelServingConfigServedModelGetArgs.cs | 7 + .../MwsCustomerManagedKeysGcpKeyInfoArgs.cs | 3 + ...MwsCustomerManagedKeysGcpKeyInfoGetArgs.cs | 3 + sdk/dotnet/Inputs/MwsWorkspacesTokenArgs.cs | 3 + .../Inputs/MwsWorkspacesTokenGetArgs.cs | 3 + sdk/dotnet/Inputs/SqlTableColumnArgs.cs | 6 +- sdk/dotnet/Inputs/SqlTableColumnGetArgs.cs | 6 +- sdk/dotnet/Metastore.cs | 30 +- sdk/dotnet/MetastoreAssignment.cs | 1 + sdk/dotnet/MlflowModel.cs | 15 +- sdk/dotnet/MwsCustomerManagedKeys.cs | 74 ++- sdk/dotnet/MwsWorkspaces.cs | 9 + .../Outputs/AccessControlRuleSetGrantRule.cs | 3 +- .../ExternalLocationEncryptionDetails.cs | 24 + ...onEncryptionDetailsSseEncryptionDetails.cs | 29 + ...tJobJobSettingsSettingsRunJobTaskResult.cs | 4 +- ...JobSettingsSettingsTaskRunJobTaskResult.cs | 4 +- sdk/dotnet/Outputs/JobRunJobTask.cs | 4 +- sdk/dotnet/Outputs/JobTaskRunJobTask.cs | 4 +- .../Outputs/MetastoreDataAccessAwsIamRole.cs | 2 +- ...etastoreDataAccessAzureServicePrincipal.cs | 2 - ...reDataAccessDatabricksGcpServiceAccount.cs | 2 + ...MetastoreDataAccessGcpServiceAccountKey.cs | 2 + sdk/dotnet/Outputs/MlflowModelTag.cs | 8 +- .../Outputs/ModelServingConfigServedModel.cs | 6 + .../MwsCustomerManagedKeysGcpKeyInfo.cs | 3 + sdk/dotnet/Outputs/MwsWorkspacesToken.cs | 3 + sdk/dotnet/Outputs/SqlTableColumn.cs | 6 +- sdk/dotnet/Share.cs | 18 + sdk/dotnet/SqlAlert.cs | 18 + sdk/dotnet/SqlDashboard.cs | 18 + sdk/dotnet/SqlQuery.cs | 18 + sdk/dotnet/StorageCredential.cs | 9 + sdk/go/databricks/accessControlRuleSet.go | 297 +++++++++- sdk/go/databricks/catalog.go | 15 + sdk/go/databricks/connection.go | 373 +++++++++++++ sdk/go/databricks/externalLocation.go | 83 ++- sdk/go/databricks/getCurrentUser.go | 8 +- sdk/go/databricks/getGroup.go | 11 + sdk/go/databricks/getServicePrincipal.go | 11 + sdk/go/databricks/getUser.go | 7 + sdk/go/databricks/grants.go | 9 + sdk/go/databricks/init.go | 7 + sdk/go/databricks/metastore.go | 68 ++- sdk/go/databricks/metastoreAssignment.go | 1 + sdk/go/databricks/mlflowModel.go | 37 +- sdk/go/databricks/mwsCustomerManagedKeys.go | 109 +++- sdk/go/databricks/mwsWorkspaces.go | 16 +- sdk/go/databricks/pulumiTypes.go | 446 ++++++++++++--- sdk/go/databricks/share.go | 15 + sdk/go/databricks/sqlAlert.go | 28 +- sdk/go/databricks/sqlDashboard.go | 48 +- sdk/go/databricks/sqlQuery.go | 38 +- sdk/go/databricks/storageCredential.go | 9 + .../databricks/AccessControlRuleSet.java | 295 +++++++++- .../databricks/AccessControlRuleSetArgs.java | 8 + .../java/com/pulumi/databricks/Catalog.java | 14 + .../com/pulumi/databricks/CatalogArgs.java | 37 ++ .../com/pulumi/databricks/Connection.java | 226 ++++++++ .../com/pulumi/databricks/ConnectionArgs.java | 307 +++++++++++ .../databricks/DatabricksFunctions.java | 6 + .../pulumi/databricks/ExternalLocation.java | 51 +- .../databricks/ExternalLocationArgs.java | 128 ++++- .../java/com/pulumi/databricks/Grants.java | 6 + .../com/pulumi/databricks/GrantsArgs.java | 17 + .../java/com/pulumi/databricks/Metastore.java | 23 +- .../com/pulumi/databricks/MetastoreArgs.java | 54 ++ .../databricks/MetastoreAssignment.java | 1 + .../com/pulumi/databricks/MlflowModel.java | 24 +- .../pulumi/databricks/MlflowModelArgs.java | 17 - .../databricks/MwsCustomerManagedKeys.java | 89 ++- .../MwsCustomerManagedKeysArgs.java | 28 +- .../com/pulumi/databricks/MwsWorkspaces.java | 8 + .../pulumi/databricks/MwsWorkspacesArgs.java | 20 + .../java/com/pulumi/databricks/Share.java | 14 + .../java/com/pulumi/databricks/ShareArgs.java | 37 ++ .../java/com/pulumi/databricks/SqlAlert.java | 12 + .../com/pulumi/databricks/SqlAlertArgs.java | 34 ++ .../com/pulumi/databricks/SqlDashboard.java | 12 + .../pulumi/databricks/SqlDashboardArgs.java | 34 ++ .../java/com/pulumi/databricks/SqlQuery.java | 12 + .../com/pulumi/databricks/SqlQueryArgs.java | 34 ++ .../pulumi/databricks/StorageCredential.java | 6 + .../databricks/StorageCredentialArgs.java | 17 + .../AccessControlRuleSetGrantRuleArgs.java | 12 +- .../inputs/AccessControlRuleSetState.java | 8 + .../databricks/inputs/CatalogState.java | 37 ++ .../databricks/inputs/ConnectionState.java | 305 +++++++++++ ...ExternalLocationEncryptionDetailsArgs.java | 63 +++ ...yptionDetailsSseEncryptionDetailsArgs.java | 80 +++ .../inputs/ExternalLocationState.java | 128 ++++- .../databricks/inputs/GetGroupArgs.java | 37 ++ .../databricks/inputs/GetGroupPlainArgs.java | 27 + .../GetJobJobSettingsSettingsRunJobTask.java | 7 +- ...tJobJobSettingsSettingsRunJobTaskArgs.java | 9 +- ...tJobJobSettingsSettingsTaskRunJobTask.java | 7 +- ...JobSettingsSettingsTaskRunJobTaskArgs.java | 9 +- .../inputs/GetServicePrincipalArgs.java | 37 ++ .../inputs/GetServicePrincipalPlainArgs.java | 27 + .../pulumi/databricks/inputs/GrantsState.java | 17 + .../databricks/inputs/JobRunJobTaskArgs.java | 9 +- .../inputs/JobTaskRunJobTaskArgs.java | 9 +- .../MetastoreDataAccessAwsIamRoleArgs.java | 8 +- ...reDataAccessAzureServicePrincipalArgs.java | 8 - ...AccessDatabricksGcpServiceAccountArgs.java | 8 + ...oreDataAccessGcpServiceAccountKeyArgs.java | 8 + .../databricks/inputs/MetastoreState.java | 54 ++ .../databricks/inputs/MlflowModelState.java | 17 - .../databricks/inputs/MlflowModelTagArgs.java | 24 +- .../ModelServingConfigServedModelArgs.java | 40 ++ .../MwsCustomerManagedKeysGcpKeyInfoArgs.java | 20 + .../inputs/MwsCustomerManagedKeysState.java | 28 +- .../databricks/inputs/MwsWorkspacesState.java | 20 + .../inputs/MwsWorkspacesTokenArgs.java | 20 + .../pulumi/databricks/inputs/ShareState.java | 37 ++ .../databricks/inputs/SqlAlertState.java | 34 ++ .../databricks/inputs/SqlDashboardState.java | 34 ++ .../databricks/inputs/SqlQueryState.java | 34 ++ .../databricks/inputs/SqlTableColumnArgs.java | 19 +- .../inputs/StorageCredentialState.java | 17 + .../AccessControlRuleSetGrantRule.java | 6 +- .../ExternalLocationEncryptionDetails.java | 48 ++ ...EncryptionDetailsSseEncryptionDetails.java | 60 ++ .../outputs/GetCurrentUserResult.java | 12 + .../databricks/outputs/GetGroupResult.java | 20 + .../GetJobJobSettingsSettingsRunJobTask.java | 9 +- ...tJobJobSettingsSettingsTaskRunJobTask.java | 9 +- .../outputs/GetServicePrincipalResult.java | 20 + .../databricks/outputs/GetUserResult.java | 20 + .../databricks/outputs/JobRunJobTask.java | 9 +- .../databricks/outputs/JobTaskRunJobTask.java | 9 +- .../MetastoreDataAccessAwsIamRole.java | 4 +- ...astoreDataAccessAzureServicePrincipal.java | 4 - ...DataAccessDatabricksGcpServiceAccount.java | 4 + ...tastoreDataAccessGcpServiceAccountKey.java | 4 + .../databricks/outputs/MlflowModelTag.java | 26 +- .../ModelServingConfigServedModel.java | 16 + .../MwsCustomerManagedKeysGcpKeyInfo.java | 8 + .../outputs/MwsWorkspacesToken.java | 8 + .../databricks/outputs/SqlTableColumn.java | 16 +- sdk/nodejs/accessControlRuleSet.ts | 126 ++++- sdk/nodejs/catalog.ts | 14 + sdk/nodejs/connection.ts | 206 +++++++ sdk/nodejs/externalLocation.ts | 56 +- sdk/nodejs/getCurrentUser.ts | 2 + sdk/nodejs/getGroup.ts | 13 + sdk/nodejs/getServicePrincipal.ts | 13 + sdk/nodejs/getUser.ts | 4 + sdk/nodejs/grants.ts | 5 + sdk/nodejs/index.ts | 8 + sdk/nodejs/metastore.ts | 22 +- sdk/nodejs/metastoreAssignment.ts | 1 + sdk/nodejs/mlflowModel.ts | 11 +- sdk/nodejs/mwsCustomerManagedKeys.ts | 52 +- sdk/nodejs/mwsWorkspaces.ts | 9 + sdk/nodejs/share.ts | 14 + sdk/nodejs/sqlAlert.ts | 10 + sdk/nodejs/sqlDashboard.ts | 10 + sdk/nodejs/sqlQuery.ts | 10 + sdk/nodejs/storageCredential.ts | 5 + sdk/nodejs/tsconfig.json | 1 + sdk/nodejs/types/input.ts | 52 +- sdk/nodejs/types/output.ts | 48 +- sdk/python/pulumi_databricks/__init__.py | 9 + sdk/python/pulumi_databricks/_inputs.py | 169 ++++-- .../access_control_rule_set.py | 232 +++++++- sdk/python/pulumi_databricks/catalog.py | 47 ++ sdk/python/pulumi_databricks/connection.py | 518 ++++++++++++++++++ .../pulumi_databricks/external_location.py | 171 +++++- .../pulumi_databricks/get_current_user.py | 13 +- sdk/python/pulumi_databricks/get_group.py | 24 +- .../get_service_principal.py | 24 +- sdk/python/pulumi_databricks/get_user.py | 15 +- sdk/python/pulumi_databricks/grants.py | 34 ++ sdk/python/pulumi_databricks/metastore.py | 87 ++- .../pulumi_databricks/metastore_assignment.py | 2 + sdk/python/pulumi_databricks/mlflow_model.py | 40 +- .../mws_customer_managed_keys.py | 97 +++- .../pulumi_databricks/mws_workspaces.py | 13 + sdk/python/pulumi_databricks/outputs.py | 171 ++++-- sdk/python/pulumi_databricks/share.py | 53 +- sdk/python/pulumi_databricks/sql_alert.py | 74 ++- sdk/python/pulumi_databricks/sql_dashboard.py | 74 ++- sdk/python/pulumi_databricks/sql_query.py | 74 ++- .../pulumi_databricks/storage_credential.py | 34 ++ 227 files changed, 8512 insertions(+), 852 deletions(-) create mode 100644 sdk/dotnet/Connection.cs create mode 100644 sdk/dotnet/Inputs/ExternalLocationEncryptionDetailsArgs.cs create mode 100644 sdk/dotnet/Inputs/ExternalLocationEncryptionDetailsGetArgs.cs create mode 100644 sdk/dotnet/Inputs/ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs.cs create mode 100644 sdk/dotnet/Inputs/ExternalLocationEncryptionDetailsSseEncryptionDetailsGetArgs.cs create mode 100644 sdk/dotnet/Outputs/ExternalLocationEncryptionDetails.cs create mode 100644 sdk/dotnet/Outputs/ExternalLocationEncryptionDetailsSseEncryptionDetails.cs create mode 100644 sdk/go/databricks/connection.go create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/Connection.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/ConnectionArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/ConnectionState.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/ExternalLocationEncryptionDetailsArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/inputs/ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/outputs/ExternalLocationEncryptionDetails.java create mode 100644 sdk/java/src/main/java/com/pulumi/databricks/outputs/ExternalLocationEncryptionDetailsSseEncryptionDetails.java create mode 100644 sdk/nodejs/connection.ts create mode 100644 sdk/python/pulumi_databricks/connection.py diff --git a/provider/cmd/pulumi-resource-databricks/bridge-metadata.json b/provider/cmd/pulumi-resource-databricks/bridge-metadata.json index 163fc083..ba26cc63 100644 --- a/provider/cmd/pulumi-resource-databricks/bridge-metadata.json +++ b/provider/cmd/pulumi-resource-databricks/bridge-metadata.json @@ -141,6 +141,10 @@ "current": "databricks:index/clusterPolicy:ClusterPolicy", "majorVersion": 1 }, + "databricks_connection": { + "current": "databricks:index/connection:Connection", + "majorVersion": 1 + }, "databricks_dbfs_file": { "current": "databricks:index/dbfsFile:DbfsFile", "majorVersion": 1 @@ -155,7 +159,19 @@ }, "databricks_external_location": { "current": "databricks:index/externalLocation:ExternalLocation", - "majorVersion": 1 + "majorVersion": 1, + "fields": { + "encryption_details": { + "maxItemsOne": true, + "elem": { + "fields": { + "sse_encryption_details": { + "maxItemsOne": true + } + } + } + } + } }, "databricks_git_credential": { "current": "databricks:index/gitCredential:GitCredential", @@ -2688,6 +2704,7 @@ "databricks:index/catalogWorkspaceBinding:CatalogWorkspaceBinding": "databricks_catalog_workspace_binding", "databricks:index/cluster:Cluster": "databricks_cluster", "databricks:index/clusterPolicy:ClusterPolicy": "databricks_cluster_policy", + "databricks:index/connection:Connection": "databricks_connection", "databricks:index/dbfsFile:DbfsFile": "databricks_dbfs_file", "databricks:index/directory:Directory": "databricks_directory", "databricks:index/entitlements:Entitlements": "databricks_entitlements", @@ -2842,6 +2859,12 @@ "encryptionType": "encryption_type", "kmsKey": "kms_key" }, + "databricks:index/ExternalLocationEncryptionDetails:ExternalLocationEncryptionDetails": { + "sseEncryptionDetails": "sse_encryption_details" + }, + "databricks:index/ExternalLocationEncryptionDetailsSseEncryptionDetails:ExternalLocationEncryptionDetailsSseEncryptionDetails": { + "awsKmsKeyArn": "aws_kms_key_arn" + }, "databricks:index/InstancePoolAwsAttributes:InstancePoolAwsAttributes": { "spotBidPricePercent": "spot_bid_price_percent", "zoneId": "zone_id" @@ -3584,6 +3607,7 @@ "grantRules": "grant_rules" }, "databricks:index/catalog:Catalog": { + "connectionName": "connection_name", "forceDestroy": "force_destroy", "isolationMode": "isolation_mode", "metastoreId": "metastore_id", @@ -3635,6 +3659,11 @@ "policyFamilyId": "policy_family_id", "policyId": "policy_id" }, + "databricks:index/connection:Connection": { + "connectionType": "connection_type", + "metastoreId": "metastore_id", + "readOnly": "read_only" + }, "databricks:index/dbfsFile:DbfsFile": { "contentBase64": "content_base64", "dbfsPath": "dbfs_path", @@ -3654,8 +3683,11 @@ "workspaceAccess": "workspace_access" }, "databricks:index/externalLocation:ExternalLocation": { + "accessPoint": "access_point", "credentialName": "credential_name", + "encryptionDetails": "encryption_details", "forceDestroy": "force_destroy", + "forceUpdate": "force_update", "metastoreId": "metastore_id", "readOnly": "read_only", "skipValidation": "skip_validation" @@ -3796,6 +3828,7 @@ "clusterNameContains": "cluster_name_contains" }, "databricks:index/getCurrentUser:getCurrentUser": { + "aclPrincipalId": "acl_principal_id", "externalId": "external_id", "userName": "user_name", "workspaceUrl": "workspace_url" @@ -3814,6 +3847,7 @@ "objectId": "object_id" }, "databricks:index/getGroup:getGroup": { + "aclPrincipalId": "acl_principal_id", "allowClusterCreate": "allow_cluster_create", "allowInstancePoolCreate": "allow_instance_pool_create", "childGroups": "child_groups", @@ -4388,6 +4422,7 @@ "catalogName": "catalog_name" }, "databricks:index/getServicePrincipal:getServicePrincipal": { + "aclPrincipalId": "acl_principal_id", "applicationId": "application_id", "displayName": "display_name", "externalId": "external_id", @@ -4447,6 +4482,7 @@ "schemaName": "schema_name" }, "databricks:index/getUser:getUser": { + "aclPrincipalId": "acl_principal_id", "applicationId": "application_id", "displayName": "display_name", "externalId": "external_id", @@ -4470,6 +4506,7 @@ }, "databricks:index/grants:Grants": { "externalLocation": "external_location", + "foreignConnection": "foreign_connection", "grants": "grant", "materializedView": "materialized_view", "storageCredential": "storage_credential" @@ -4562,7 +4599,9 @@ "deltaSharingScope": "delta_sharing_scope", "forceDestroy": "force_destroy", "globalMetastoreId": "global_metastore_id", + "metastoreId": "metastore_id", "storageRoot": "storage_root", + "storageRootCredentialId": "storage_root_credential_id", "updatedAt": "updated_at", "updatedBy": "updated_by" }, @@ -4595,7 +4634,6 @@ "databricks:index/mlflowModel:MlflowModel": { "creationTimestamp": "creation_timestamp", "lastUpdatedTimestamp": "last_updated_timestamp", - "registeredModelId": "registered_model_id", "userId": "user_id" }, "databricks:index/mlflowWebhook:MlflowWebhook": { @@ -4802,7 +4840,13 @@ "objects": "object" }, "databricks:index/sqlAlert:SqlAlert": { - "queryId": "query_id" + "createdAt": "created_at", + "queryId": "query_id", + "updatedAt": "updated_at" + }, + "databricks:index/sqlDashboard:SqlDashboard": { + "createdAt": "created_at", + "updatedAt": "updated_at" }, "databricks:index/sqlEndpoint:SqlEndpoint": { "autoStopMins": "auto_stop_mins", @@ -4834,9 +4878,11 @@ "privilegeAssignments": "privilege_assignments" }, "databricks:index/sqlQuery:SqlQuery": { + "createdAt": "created_at", "dataSourceId": "data_source_id", "parameters": "parameter", - "runAsRole": "run_as_role" + "runAsRole": "run_as_role", + "updatedAt": "updated_at" }, "databricks:index/sqlTable:SqlTable": { "catalogName": "catalog_name", @@ -4865,6 +4911,7 @@ "azureManagedIdentity": "azure_managed_identity", "azureServicePrincipal": "azure_service_principal", "databricksGcpServiceAccount": "databricks_gcp_service_account", + "forceDestroy": "force_destroy", "gcpServiceAccountKey": "gcp_service_account_key", "metastoreId": "metastore_id", "readOnly": "read_only" diff --git a/provider/cmd/pulumi-resource-databricks/schema.json b/provider/cmd/pulumi-resource-databricks/schema.json index e205f559..7b546073 100644 --- a/provider/cmd/pulumi-resource-databricks/schema.json +++ b/provider/cmd/pulumi-resource-databricks/schema.json @@ -158,7 +158,7 @@ }, "role": { "type": "string", - "description": "Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles).\n* `roles/servicePrincipal.manager` - Manager of a service principal.\n* `roles/servicePrincipal.user` - User of a service principal.\n" + "description": "Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles) or [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page).\n* `roles/servicePrincipal.manager` - Manager of a service principal.\n* `roles/servicePrincipal.user` - User of a service principal.\n* `roles/group.manager` - Manager of a group.\n" } }, "type": "object", @@ -545,6 +545,25 @@ }, "type": "object" }, + "databricks:index/ExternalLocationEncryptionDetails:ExternalLocationEncryptionDetails": { + "properties": { + "sseEncryptionDetails": { + "$ref": "#/types/databricks:index/ExternalLocationEncryptionDetailsSseEncryptionDetails:ExternalLocationEncryptionDetailsSseEncryptionDetails" + } + }, + "type": "object" + }, + "databricks:index/ExternalLocationEncryptionDetailsSseEncryptionDetails:ExternalLocationEncryptionDetailsSseEncryptionDetails": { + "properties": { + "algorithm": { + "type": "string" + }, + "awsKmsKeyArn": { + "type": "string" + } + }, + "type": "object" + }, "databricks:index/GrantsGrant:GrantsGrant": { "properties": { "principal": { @@ -2015,7 +2034,7 @@ "databricks:index/JobRunJobTask:JobRunJobTask": { "properties": { "jobId": { - "type": "string", + "type": "integer", "description": "(String) ID of the job\n" }, "jobParameters": { @@ -2957,7 +2976,7 @@ "databricks:index/JobTaskRunJobTask:JobTaskRunJobTask": { "properties": { "jobId": { - "type": "string", + "type": "integer", "description": "(String) ID of the job\n" }, "jobParameters": { @@ -3340,7 +3359,7 @@ "properties": { "roleArn": { "type": "string", - "description": "The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF`\n\n`azure_service_principal` optional configuration block for credential details for Azure:\n", + "description": "The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF`\n\n`azure_managed_identity` optional configuration block for using managed identity as credential details for Azure (Recommended):\n", "willReplaceOnChanges": true } }, @@ -3371,7 +3390,7 @@ }, "clientSecret": { "type": "string", - "description": "The client secret generated for the above app ID in AAD. **This field is redacted on output**\n\n`azure_managed_identity` optional configuration block for using managed identity as credential details for Azure:\n", + "description": "The client secret generated for the above app ID in AAD. **This field is redacted on output**\n", "secret": true, "willReplaceOnChanges": true }, @@ -3392,7 +3411,7 @@ "properties": { "email": { "type": "string", - "description": "The email of the GCP service account created, to be granted access to relevant buckets.\n" + "description": "The email of the GCP service account created, to be granted access to relevant buckets.\n\n`azure_service_principal` optional configuration block for credential details for Azure (Legacy):\n" } }, "type": "object", @@ -3408,7 +3427,7 @@ "properties": { "email": { "type": "string", - "description": "The email of the GCP service account created, to be granted access to relevant buckets.\n", + "description": "The email of the GCP service account created, to be granted access to relevant buckets.\n\n`azure_service_principal` optional configuration block for credential details for Azure (Legacy):\n", "willReplaceOnChanges": true }, "privateKey": { @@ -3437,18 +3456,13 @@ "type": "string" } }, - "type": "object", - "required": [ - "key", - "value" - ] + "type": "object" }, "databricks:index/MlflowWebhookHttpUrlSpec:MlflowWebhookHttpUrlSpec": { "properties": { "authorization": { "type": "string", - "description": "Value of the authorization header that should be sent in the request sent by the wehbook. It should be of the form `\u003cauth type\u003e \u003ccredentials\u003e`, e.g. `Bearer \u003caccess_token\u003e`. If set to an empty string, no authorization header will be included in the request.\n", - "secret": true + "description": "Value of the authorization header that should be sent in the request sent by the wehbook. It should be of the form `\u003cauth type\u003e \u003ccredentials\u003e`, e.g. `Bearer \u003caccess_token\u003e`. If set to an empty string, no authorization header will be included in the request.\n" }, "enableSslVerification": { "type": "boolean", @@ -3472,8 +3486,7 @@ "properties": { "accessToken": { "type": "string", - "description": "The personal access token used to authorize webhook's job runs.\n", - "secret": true + "description": "The personal access token used to authorize webhook's job runs.\n" }, "jobId": { "type": "string", @@ -3523,10 +3536,12 @@ "type": "object", "additionalProperties": { "$ref": "pulumi.json#/Any" - } + }, + "description": "a map of environment variable name/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`.\n" }, "instanceProfileArn": { - "type": "string" + "type": "string", + "description": "ARN of the instance profile that the served model will use to access AWS resources.\n" }, "modelName": { "type": "string", @@ -3813,6 +3828,7 @@ "properties": { "kmsKeyId": { "type": "string", + "description": "The GCP KMS key's resource name.\n", "willReplaceOnChanges": true } }, @@ -4030,7 +4046,8 @@ "type": "string" }, "lifetimeSeconds": { - "type": "integer" + "type": "integer", + "description": "Token expiry lifetime. By default its 2592000 (30 days).\n" }, "tokenId": { "type": "string" @@ -5135,13 +5152,12 @@ }, "type": { "type": "string", - "description": "Column type spec (with metadata) as SQL text\n" + "description": "Column type spec (with metadata) as SQL text. Not supported for `VIEW` table_type.\n" } }, "type": "object", "required": [ - "name", - "type" + "name" ] }, "databricks:index/SqlWidgetParameter:SqlWidgetParameter": { @@ -7525,7 +7541,7 @@ "databricks:index/getJobJobSettingsSettingsRunJobTask:getJobJobSettingsSettingsRunJobTask": { "properties": { "jobId": { - "type": "string" + "type": "integer" }, "jobParameters": { "type": "object", @@ -8413,7 +8429,7 @@ "databricks:index/getJobJobSettingsSettingsTaskRunJobTask:getJobJobSettingsSettingsTaskRunJobTask": { "properties": { "jobId": { - "type": "string" + "type": "integer" }, "jobParameters": { "type": "object", @@ -9137,7 +9153,7 @@ }, "resources": { "databricks:index/accessControlRuleSet:AccessControlRuleSet": { - "description": "This resource allows you to manage access rules on Databricks account level resources. For convenience we allow accessing this resource through the Databricks account and workspace.\n\n\u003e **Note** Currently, we only support managing access rules on service principal resources through `databricks.AccessControlRuleSet`.\n\n\u003e **Warning** `databricks.AccessControlRuleSet` cannot be used to manage access rules for resources supported by databricks_permissions. Refer to its documentation for more information.\n\n## Related Resources\n\nThe following resources are often used in the same context:\n\n* databricks.Group\n* databricks.User\n* databricks.ServicePrincipal\n", + "description": "This resource allows you to manage access rules on Databricks account level resources. For convenience we allow accessing this resource through the Databricks account and workspace.\n\n\u003e **Note** Currently, we only support managing access rules on service principal, group and account resources through `databricks.AccessControlRuleSet`.\n\n\u003e **Warning** `databricks.AccessControlRuleSet` cannot be used to manage access rules for resources supported by databricks_permissions. Refer to its documentation for more information.\n\n## Service principal rule set usage\n\nThrough a Databricks workspace:\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst accountId = \"00000000-0000-0000-0000-000000000000\";\nconst ds = databricks.getGroup({\n displayName: \"Data Science\",\n});\nconst automationSp = new databricks.ServicePrincipal(\"automationSp\", {displayName: \"SP_FOR_AUTOMATION\"});\nconst automationSpRuleSet = new databricks.AccessControlRuleSet(\"automationSpRuleSet\", {grantRules: [{\n principals: [ds.then(ds =\u003e ds.aclPrincipalId)],\n role: \"roles/servicePrincipal.user\",\n}]});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\naccount_id = \"00000000-0000-0000-0000-000000000000\"\nds = databricks.get_group(display_name=\"Data Science\")\nautomation_sp = databricks.ServicePrincipal(\"automationSp\", display_name=\"SP_FOR_AUTOMATION\")\nautomation_sp_rule_set = databricks.AccessControlRuleSet(\"automationSpRuleSet\", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[ds.acl_principal_id],\n role=\"roles/servicePrincipal.user\",\n)])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n var ds = Databricks.GetGroup.Invoke(new()\n {\n DisplayName = \"Data Science\",\n });\n\n var automationSp = new Databricks.ServicePrincipal(\"automationSp\", new()\n {\n DisplayName = \"SP_FOR_AUTOMATION\",\n });\n\n var automationSpRuleSet = new Databricks.AccessControlRuleSet(\"automationSpRuleSet\", new()\n {\n GrantRules = new[]\n {\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n ds.Apply(getGroupResult =\u003e getGroupResult.AclPrincipalId),\n },\n Role = \"roles/servicePrincipal.user\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_ := \"00000000-0000-0000-0000-000000000000\"\n\t\tds, err := databricks.LookupGroup(ctx, \u0026databricks.LookupGroupArgs{\n\t\t\tDisplayName: \"Data Science\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewServicePrincipal(ctx, \"automationSp\", \u0026databricks.ServicePrincipalArgs{\n\t\t\tDisplayName: pulumi.String(\"SP_FOR_AUTOMATION\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewAccessControlRuleSet(ctx, \"automationSpRuleSet\", \u0026databricks.AccessControlRuleSetArgs{\n\t\t\tGrantRules: databricks.AccessControlRuleSetGrantRuleArray{\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\t*pulumi.String(ds.AclPrincipalId),\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/servicePrincipal.user\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetGroupArgs;\nimport com.pulumi.databricks.ServicePrincipal;\nimport com.pulumi.databricks.ServicePrincipalArgs;\nimport com.pulumi.databricks.AccessControlRuleSet;\nimport com.pulumi.databricks.AccessControlRuleSetArgs;\nimport com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n final var ds = DatabricksFunctions.getGroup(GetGroupArgs.builder()\n .displayName(\"Data Science\")\n .build());\n\n var automationSp = new ServicePrincipal(\"automationSp\", ServicePrincipalArgs.builder() \n .displayName(\"SP_FOR_AUTOMATION\")\n .build());\n\n var automationSpRuleSet = new AccessControlRuleSet(\"automationSpRuleSet\", AccessControlRuleSetArgs.builder() \n .grantRules(AccessControlRuleSetGrantRuleArgs.builder()\n .principals(ds.applyValue(getGroupResult -\u003e getGroupResult.aclPrincipalId()))\n .role(\"roles/servicePrincipal.user\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n automationSp:\n type: databricks:ServicePrincipal\n properties:\n displayName: SP_FOR_AUTOMATION\n automationSpRuleSet:\n type: databricks:AccessControlRuleSet\n properties:\n grantRules:\n - principals:\n - ${ds.aclPrincipalId}\n role: roles/servicePrincipal.user\nvariables:\n accountId: 00000000-0000-0000-0000-000000000000\n ds:\n fn::invoke:\n Function: databricks:getGroup\n Arguments:\n displayName: Data Science\n```\n\nThrough AWS Databricks account:\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst accountId = \"00000000-0000-0000-0000-000000000000\";\n// account level group creation\nconst ds = new databricks.Group(\"ds\", {});\nconst automationSp = new databricks.ServicePrincipal(\"automationSp\", {displayName: \"SP_FOR_AUTOMATION\"});\nconst automationSpRuleSet = new databricks.AccessControlRuleSet(\"automationSpRuleSet\", {grantRules: [{\n principals: [ds.aclPrincipalId],\n role: \"roles/servicePrincipal.user\",\n}]});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\naccount_id = \"00000000-0000-0000-0000-000000000000\"\n# account level group creation\nds = databricks.Group(\"ds\")\nautomation_sp = databricks.ServicePrincipal(\"automationSp\", display_name=\"SP_FOR_AUTOMATION\")\nautomation_sp_rule_set = databricks.AccessControlRuleSet(\"automationSpRuleSet\", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[ds.acl_principal_id],\n role=\"roles/servicePrincipal.user\",\n)])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n // account level group creation\n var ds = new Databricks.Group(\"ds\");\n\n var automationSp = new Databricks.ServicePrincipal(\"automationSp\", new()\n {\n DisplayName = \"SP_FOR_AUTOMATION\",\n });\n\n var automationSpRuleSet = new Databricks.AccessControlRuleSet(\"automationSpRuleSet\", new()\n {\n GrantRules = new[]\n {\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n ds.AclPrincipalId,\n },\n Role = \"roles/servicePrincipal.user\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_ := \"00000000-0000-0000-0000-000000000000\"\n\t\tds, err := databricks.NewGroup(ctx, \"ds\", nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewServicePrincipal(ctx, \"automationSp\", \u0026databricks.ServicePrincipalArgs{\n\t\t\tDisplayName: pulumi.String(\"SP_FOR_AUTOMATION\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewAccessControlRuleSet(ctx, \"automationSpRuleSet\", \u0026databricks.AccessControlRuleSetArgs{\n\t\t\tGrantRules: databricks.AccessControlRuleSetGrantRuleArray{\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\tds.AclPrincipalId,\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/servicePrincipal.user\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Group;\nimport com.pulumi.databricks.ServicePrincipal;\nimport com.pulumi.databricks.ServicePrincipalArgs;\nimport com.pulumi.databricks.AccessControlRuleSet;\nimport com.pulumi.databricks.AccessControlRuleSetArgs;\nimport com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n var ds = new Group(\"ds\");\n\n var automationSp = new ServicePrincipal(\"automationSp\", ServicePrincipalArgs.builder() \n .displayName(\"SP_FOR_AUTOMATION\")\n .build());\n\n var automationSpRuleSet = new AccessControlRuleSet(\"automationSpRuleSet\", AccessControlRuleSetArgs.builder() \n .grantRules(AccessControlRuleSetGrantRuleArgs.builder()\n .principals(ds.aclPrincipalId())\n .role(\"roles/servicePrincipal.user\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n # account level group creation\n ds:\n type: databricks:Group\n automationSp:\n type: databricks:ServicePrincipal\n properties:\n displayName: SP_FOR_AUTOMATION\n automationSpRuleSet:\n type: databricks:AccessControlRuleSet\n properties:\n grantRules:\n - principals:\n - ${ds.aclPrincipalId}\n role: roles/servicePrincipal.user\nvariables:\n accountId: 00000000-0000-0000-0000-000000000000\n```\n\nThrough Azure Databricks account:\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst accountId = \"00000000-0000-0000-0000-000000000000\";\n// account level group creation\nconst ds = new databricks.Group(\"ds\", {});\nconst automationSp = new databricks.ServicePrincipal(\"automationSp\", {\n applicationId: \"00000000-0000-0000-0000-000000000000\",\n displayName: \"SP_FOR_AUTOMATION\",\n});\nconst automationSpRuleSet = new databricks.AccessControlRuleSet(\"automationSpRuleSet\", {grantRules: [{\n principals: [ds.aclPrincipalId],\n role: \"roles/servicePrincipal.user\",\n}]});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\naccount_id = \"00000000-0000-0000-0000-000000000000\"\n# account level group creation\nds = databricks.Group(\"ds\")\nautomation_sp = databricks.ServicePrincipal(\"automationSp\",\n application_id=\"00000000-0000-0000-0000-000000000000\",\n display_name=\"SP_FOR_AUTOMATION\")\nautomation_sp_rule_set = databricks.AccessControlRuleSet(\"automationSpRuleSet\", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[ds.acl_principal_id],\n role=\"roles/servicePrincipal.user\",\n)])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n // account level group creation\n var ds = new Databricks.Group(\"ds\");\n\n var automationSp = new Databricks.ServicePrincipal(\"automationSp\", new()\n {\n ApplicationId = \"00000000-0000-0000-0000-000000000000\",\n DisplayName = \"SP_FOR_AUTOMATION\",\n });\n\n var automationSpRuleSet = new Databricks.AccessControlRuleSet(\"automationSpRuleSet\", new()\n {\n GrantRules = new[]\n {\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n ds.AclPrincipalId,\n },\n Role = \"roles/servicePrincipal.user\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_ := \"00000000-0000-0000-0000-000000000000\"\n\t\tds, err := databricks.NewGroup(ctx, \"ds\", nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewServicePrincipal(ctx, \"automationSp\", \u0026databricks.ServicePrincipalArgs{\n\t\t\tApplicationId: pulumi.String(\"00000000-0000-0000-0000-000000000000\"),\n\t\t\tDisplayName: pulumi.String(\"SP_FOR_AUTOMATION\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewAccessControlRuleSet(ctx, \"automationSpRuleSet\", \u0026databricks.AccessControlRuleSetArgs{\n\t\t\tGrantRules: databricks.AccessControlRuleSetGrantRuleArray{\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\tds.AclPrincipalId,\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/servicePrincipal.user\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Group;\nimport com.pulumi.databricks.ServicePrincipal;\nimport com.pulumi.databricks.ServicePrincipalArgs;\nimport com.pulumi.databricks.AccessControlRuleSet;\nimport com.pulumi.databricks.AccessControlRuleSetArgs;\nimport com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n var ds = new Group(\"ds\");\n\n var automationSp = new ServicePrincipal(\"automationSp\", ServicePrincipalArgs.builder() \n .applicationId(\"00000000-0000-0000-0000-000000000000\")\n .displayName(\"SP_FOR_AUTOMATION\")\n .build());\n\n var automationSpRuleSet = new AccessControlRuleSet(\"automationSpRuleSet\", AccessControlRuleSetArgs.builder() \n .grantRules(AccessControlRuleSetGrantRuleArgs.builder()\n .principals(ds.aclPrincipalId())\n .role(\"roles/servicePrincipal.user\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n # account level group creation\n ds:\n type: databricks:Group\n automationSp:\n type: databricks:ServicePrincipal\n properties:\n applicationId: 00000000-0000-0000-0000-000000000000\n displayName: SP_FOR_AUTOMATION\n automationSpRuleSet:\n type: databricks:AccessControlRuleSet\n properties:\n grantRules:\n - principals:\n - ${ds.aclPrincipalId}\n role: roles/servicePrincipal.user\nvariables:\n accountId: 00000000-0000-0000-0000-000000000000\n```\n\nThrough GCP Databricks account:\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst accountId = \"00000000-0000-0000-0000-000000000000\";\n// account level group creation\nconst ds = new databricks.Group(\"ds\", {});\nconst automationSp = new databricks.ServicePrincipal(\"automationSp\", {displayName: \"SP_FOR_AUTOMATION\"});\nconst automationSpRuleSet = new databricks.AccessControlRuleSet(\"automationSpRuleSet\", {grantRules: [{\n principals: [ds.aclPrincipalId],\n role: \"roles/servicePrincipal.user\",\n}]});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\naccount_id = \"00000000-0000-0000-0000-000000000000\"\n# account level group creation\nds = databricks.Group(\"ds\")\nautomation_sp = databricks.ServicePrincipal(\"automationSp\", display_name=\"SP_FOR_AUTOMATION\")\nautomation_sp_rule_set = databricks.AccessControlRuleSet(\"automationSpRuleSet\", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[ds.acl_principal_id],\n role=\"roles/servicePrincipal.user\",\n)])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n // account level group creation\n var ds = new Databricks.Group(\"ds\");\n\n var automationSp = new Databricks.ServicePrincipal(\"automationSp\", new()\n {\n DisplayName = \"SP_FOR_AUTOMATION\",\n });\n\n var automationSpRuleSet = new Databricks.AccessControlRuleSet(\"automationSpRuleSet\", new()\n {\n GrantRules = new[]\n {\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n ds.AclPrincipalId,\n },\n Role = \"roles/servicePrincipal.user\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_ := \"00000000-0000-0000-0000-000000000000\"\n\t\tds, err := databricks.NewGroup(ctx, \"ds\", nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewServicePrincipal(ctx, \"automationSp\", \u0026databricks.ServicePrincipalArgs{\n\t\t\tDisplayName: pulumi.String(\"SP_FOR_AUTOMATION\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewAccessControlRuleSet(ctx, \"automationSpRuleSet\", \u0026databricks.AccessControlRuleSetArgs{\n\t\t\tGrantRules: databricks.AccessControlRuleSetGrantRuleArray{\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\tds.AclPrincipalId,\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/servicePrincipal.user\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Group;\nimport com.pulumi.databricks.ServicePrincipal;\nimport com.pulumi.databricks.ServicePrincipalArgs;\nimport com.pulumi.databricks.AccessControlRuleSet;\nimport com.pulumi.databricks.AccessControlRuleSetArgs;\nimport com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n var ds = new Group(\"ds\");\n\n var automationSp = new ServicePrincipal(\"automationSp\", ServicePrincipalArgs.builder() \n .displayName(\"SP_FOR_AUTOMATION\")\n .build());\n\n var automationSpRuleSet = new AccessControlRuleSet(\"automationSpRuleSet\", AccessControlRuleSetArgs.builder() \n .grantRules(AccessControlRuleSetGrantRuleArgs.builder()\n .principals(ds.aclPrincipalId())\n .role(\"roles/servicePrincipal.user\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n # account level group creation\n ds:\n type: databricks:Group\n automationSp:\n type: databricks:ServicePrincipal\n properties:\n displayName: SP_FOR_AUTOMATION\n automationSpRuleSet:\n type: databricks:AccessControlRuleSet\n properties:\n grantRules:\n - principals:\n - ${ds.aclPrincipalId}\n role: roles/servicePrincipal.user\nvariables:\n accountId: 00000000-0000-0000-0000-000000000000\n```\n\n## Group rule set usage\n\nRefer to the appropriate provider configuration as shown in the examples for service principal rule set.\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst accountId = \"00000000-0000-0000-0000-000000000000\";\nconst ds = databricks.getGroup({\n displayName: \"Data Science\",\n});\nconst john = databricks.getUser({\n userName: \"john.doe@example.com\",\n});\nconst dsGroupRuleSet = new databricks.AccessControlRuleSet(\"dsGroupRuleSet\", {grantRules: [{\n principals: [john.then(john =\u003e john.aclPrincipalId)],\n role: \"roles/group.manager\",\n}]});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\naccount_id = \"00000000-0000-0000-0000-000000000000\"\nds = databricks.get_group(display_name=\"Data Science\")\njohn = databricks.get_user(user_name=\"john.doe@example.com\")\nds_group_rule_set = databricks.AccessControlRuleSet(\"dsGroupRuleSet\", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[john.acl_principal_id],\n role=\"roles/group.manager\",\n)])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n var ds = Databricks.GetGroup.Invoke(new()\n {\n DisplayName = \"Data Science\",\n });\n\n var john = Databricks.GetUser.Invoke(new()\n {\n UserName = \"john.doe@example.com\",\n });\n\n var dsGroupRuleSet = new Databricks.AccessControlRuleSet(\"dsGroupRuleSet\", new()\n {\n GrantRules = new[]\n {\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n john.Apply(getUserResult =\u003e getUserResult.AclPrincipalId),\n },\n Role = \"roles/group.manager\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_ := \"00000000-0000-0000-0000-000000000000\"\n\t\t_, err := databricks.LookupGroup(ctx, \u0026databricks.LookupGroupArgs{\n\t\t\tDisplayName: \"Data Science\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tjohn, err := databricks.LookupUser(ctx, \u0026databricks.LookupUserArgs{\n\t\t\tUserName: pulumi.StringRef(\"john.doe@example.com\"),\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewAccessControlRuleSet(ctx, \"dsGroupRuleSet\", \u0026databricks.AccessControlRuleSetArgs{\n\t\t\tGrantRules: databricks.AccessControlRuleSetGrantRuleArray{\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\t*pulumi.String(john.AclPrincipalId),\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/group.manager\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetGroupArgs;\nimport com.pulumi.databricks.inputs.GetUserArgs;\nimport com.pulumi.databricks.AccessControlRuleSet;\nimport com.pulumi.databricks.AccessControlRuleSetArgs;\nimport com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n final var ds = DatabricksFunctions.getGroup(GetGroupArgs.builder()\n .displayName(\"Data Science\")\n .build());\n\n final var john = DatabricksFunctions.getUser(GetUserArgs.builder()\n .userName(\"john.doe@example.com\")\n .build());\n\n var dsGroupRuleSet = new AccessControlRuleSet(\"dsGroupRuleSet\", AccessControlRuleSetArgs.builder() \n .grantRules(AccessControlRuleSetGrantRuleArgs.builder()\n .principals(john.applyValue(getUserResult -\u003e getUserResult.aclPrincipalId()))\n .role(\"roles/group.manager\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n dsGroupRuleSet:\n type: databricks:AccessControlRuleSet\n properties:\n grantRules:\n - principals:\n - ${john.aclPrincipalId}\n role: roles/group.manager\nvariables:\n accountId: 00000000-0000-0000-0000-000000000000\n ds:\n fn::invoke:\n Function: databricks:getGroup\n Arguments:\n displayName: Data Science\n john:\n fn::invoke:\n Function: databricks:getUser\n Arguments:\n userName: john.doe@example.com\n```\n\n## Account rule set usage\n\nRefer to the appropriate provider configuration as shown in the examples for service principal rule set.\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst accountId = \"00000000-0000-0000-0000-000000000000\";\nconst ds = databricks.getGroup({\n displayName: \"Data Science\",\n});\nconst john = databricks.getUser({\n userName: \"john.doe@example.com\",\n});\nconst accountRuleSet = new databricks.AccessControlRuleSet(\"accountRuleSet\", {grantRules: [\n {\n principals: [john.then(john =\u003e john.aclPrincipalId)],\n role: \"roles/group.manager\",\n },\n {\n principals: [data.databricks_user.ds.acl_principal_id],\n role: \"roles/servicePrincipal.manager\",\n },\n]});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\naccount_id = \"00000000-0000-0000-0000-000000000000\"\nds = databricks.get_group(display_name=\"Data Science\")\njohn = databricks.get_user(user_name=\"john.doe@example.com\")\naccount_rule_set = databricks.AccessControlRuleSet(\"accountRuleSet\", grant_rules=[\n databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[john.acl_principal_id],\n role=\"roles/group.manager\",\n ),\n databricks.AccessControlRuleSetGrantRuleArgs(\n principals=[data[\"databricks_user\"][\"ds\"][\"acl_principal_id\"]],\n role=\"roles/servicePrincipal.manager\",\n ),\n])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n var ds = Databricks.GetGroup.Invoke(new()\n {\n DisplayName = \"Data Science\",\n });\n\n var john = Databricks.GetUser.Invoke(new()\n {\n UserName = \"john.doe@example.com\",\n });\n\n var accountRuleSet = new Databricks.AccessControlRuleSet(\"accountRuleSet\", new()\n {\n GrantRules = new[]\n {\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n john.Apply(getUserResult =\u003e getUserResult.AclPrincipalId),\n },\n Role = \"roles/group.manager\",\n },\n new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs\n {\n Principals = new[]\n {\n data.Databricks_user.Ds.Acl_principal_id,\n },\n Role = \"roles/servicePrincipal.manager\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_ := \"00000000-0000-0000-0000-000000000000\"\n\t\t_, err := databricks.LookupGroup(ctx, \u0026databricks.LookupGroupArgs{\n\t\t\tDisplayName: \"Data Science\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tjohn, err := databricks.LookupUser(ctx, \u0026databricks.LookupUserArgs{\n\t\t\tUserName: pulumi.StringRef(\"john.doe@example.com\"),\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewAccessControlRuleSet(ctx, \"accountRuleSet\", \u0026databricks.AccessControlRuleSetArgs{\n\t\t\tGrantRules: databricks.AccessControlRuleSetGrantRuleArray{\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\t*pulumi.String(john.AclPrincipalId),\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/group.manager\"),\n\t\t\t\t},\n\t\t\t\t\u0026databricks.AccessControlRuleSetGrantRuleArgs{\n\t\t\t\t\tPrincipals: pulumi.StringArray{\n\t\t\t\t\t\tdata.Databricks_user.Ds.Acl_principal_id,\n\t\t\t\t\t},\n\t\t\t\t\tRole: pulumi.String(\"roles/servicePrincipal.manager\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.DatabricksFunctions;\nimport com.pulumi.databricks.inputs.GetGroupArgs;\nimport com.pulumi.databricks.inputs.GetUserArgs;\nimport com.pulumi.databricks.AccessControlRuleSet;\nimport com.pulumi.databricks.AccessControlRuleSetArgs;\nimport com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var accountId = \"00000000-0000-0000-0000-000000000000\";\n\n final var ds = DatabricksFunctions.getGroup(GetGroupArgs.builder()\n .displayName(\"Data Science\")\n .build());\n\n final var john = DatabricksFunctions.getUser(GetUserArgs.builder()\n .userName(\"john.doe@example.com\")\n .build());\n\n var accountRuleSet = new AccessControlRuleSet(\"accountRuleSet\", AccessControlRuleSetArgs.builder() \n .grantRules( \n AccessControlRuleSetGrantRuleArgs.builder()\n .principals(john.applyValue(getUserResult -\u003e getUserResult.aclPrincipalId()))\n .role(\"roles/group.manager\")\n .build(),\n AccessControlRuleSetGrantRuleArgs.builder()\n .principals(data.databricks_user().ds().acl_principal_id())\n .role(\"roles/servicePrincipal.manager\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n accountRuleSet:\n type: databricks:AccessControlRuleSet\n properties:\n # user john is manager for all groups in the account\n grantRules:\n - principals:\n - ${john.aclPrincipalId}\n role: roles/group.manager\n - principals:\n - ${data.databricks_user.ds.acl_principal_id}\n role: roles/servicePrincipal.manager\nvariables:\n accountId: 00000000-0000-0000-0000-000000000000\n ds:\n fn::invoke:\n Function: databricks:getGroup\n Arguments:\n displayName: Data Science\n john:\n fn::invoke:\n Function: databricks:getUser\n Arguments:\n userName: john.doe@example.com\n```\n\n## Related Resources\n\nThe following resources are often used in the same context:\n\n* databricks.Group\n* databricks.User\n* databricks.ServicePrincipal\n", "properties": { "etag": { "type": "string" @@ -9151,7 +9167,7 @@ }, "name": { "type": "string", - "description": "Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported:\n* `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default`\n" + "description": "Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported:\n* `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default`\n* `accounts/{account_id}/groups/{group_id}/ruleSets/default`\n* `accounts/{account_id}/ruleSets/default`\n" } }, "required": [ @@ -9168,7 +9184,7 @@ }, "name": { "type": "string", - "description": "Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported:\n* `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default`\n" + "description": "Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported:\n* `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default`\n* `accounts/{account_id}/groups/{group_id}/ruleSets/default`\n* `accounts/{account_id}/ruleSets/default`\n" } }, "stateInputs": { @@ -9186,7 +9202,7 @@ }, "name": { "type": "string", - "description": "Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported:\n* `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default`\n" + "description": "Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported:\n* `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default`\n* `accounts/{account_id}/groups/{group_id}/ruleSets/default`\n* `accounts/{account_id}/ruleSets/default`\n" } }, "type": "object" @@ -9199,6 +9215,10 @@ "type": "string", "description": "User-supplied free-form text.\n" }, + "connectionName": { + "type": "string", + "description": "For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource.\n" + }, "forceDestroy": { "type": "boolean", "description": "Delete catalog regardless of its contents.\n" @@ -9249,6 +9269,11 @@ "type": "string", "description": "User-supplied free-form text.\n" }, + "connectionName": { + "type": "string", + "description": "For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource.\n", + "willReplaceOnChanges": true + }, "forceDestroy": { "type": "boolean", "description": "Delete catalog regardless of its contents.\n" @@ -9298,6 +9323,11 @@ "type": "string", "description": "User-supplied free-form text.\n" }, + "connectionName": { + "type": "string", + "description": "For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource.\n", + "willReplaceOnChanges": true + }, "forceDestroy": { "type": "boolean", "description": "Delete catalog regardless of its contents.\n" @@ -9958,6 +9988,151 @@ "type": "object" } }, + "databricks:index/connection:Connection": { + "description": "Lakehouse Federation is the query federation platform for Databricks. Databricks uses Unity Catalog to manage query federation. To make a dataset available for read-only querying using Lakehouse Federation, you create the following:\n\n- A connection, a securable object in Unity Catalog that specifies a path and credentials for accessing an external database system.\n- A foreign catalog\n\nThis resource manages connections in Unity Catalog\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst mysql = new databricks.Connection(\"mysql\", {\n comment: \"this is a connection to mysql db\",\n connectionType: \"MYSQL\",\n options: {\n host: \"test.mysql.database.azure.com\",\n password: \"password\",\n port: \"3306\",\n user: \"user\",\n },\n properties: {\n purpose: \"testing\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nmysql = databricks.Connection(\"mysql\",\n comment=\"this is a connection to mysql db\",\n connection_type=\"MYSQL\",\n options={\n \"host\": \"test.mysql.database.azure.com\",\n \"password\": \"password\",\n \"port\": \"3306\",\n \"user\": \"user\",\n },\n properties={\n \"purpose\": \"testing\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var mysql = new Databricks.Connection(\"mysql\", new()\n {\n Comment = \"this is a connection to mysql db\",\n ConnectionType = \"MYSQL\",\n Options = \n {\n { \"host\", \"test.mysql.database.azure.com\" },\n { \"password\", \"password\" },\n { \"port\", \"3306\" },\n { \"user\", \"user\" },\n },\n Properties = \n {\n { \"purpose\", \"testing\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewConnection(ctx, \"mysql\", \u0026databricks.ConnectionArgs{\n\t\t\tComment: pulumi.String(\"this is a connection to mysql db\"),\n\t\t\tConnectionType: pulumi.String(\"MYSQL\"),\n\t\t\tOptions: pulumi.AnyMap{\n\t\t\t\t\"host\": pulumi.Any(\"test.mysql.database.azure.com\"),\n\t\t\t\t\"password\": pulumi.Any(\"password\"),\n\t\t\t\t\"port\": pulumi.Any(\"3306\"),\n\t\t\t\t\"user\": pulumi.Any(\"user\"),\n\t\t\t},\n\t\t\tProperties: pulumi.AnyMap{\n\t\t\t\t\"purpose\": pulumi.Any(\"testing\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Connection;\nimport com.pulumi.databricks.ConnectionArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var mysql = new Connection(\"mysql\", ConnectionArgs.builder() \n .comment(\"this is a connection to mysql db\")\n .connectionType(\"MYSQL\")\n .options(Map.ofEntries(\n Map.entry(\"host\", \"test.mysql.database.azure.com\"),\n Map.entry(\"password\", \"password\"),\n Map.entry(\"port\", \"3306\"),\n Map.entry(\"user\", \"user\")\n ))\n .properties(Map.of(\"purpose\", \"testing\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n mysql:\n type: databricks:Connection\n properties:\n comment: this is a connection to mysql db\n connectionType: MYSQL\n options:\n host: test.mysql.database.azure.com\n password: password\n port: '3306'\n user: user\n properties:\n purpose: testing\n```\n{{% /example %}}\n{{% /examples %}}\n\n## Import\n\nThis resource can be imported by `name` bash\n\n```sh\n $ pulumi import databricks:index/connection:Connection this \u003cconnection_name\u003e\n```\n\n ", + "properties": { + "comment": { + "type": "string", + "description": "Free-form text.\n" + }, + "connectionType": { + "type": "string", + "description": "Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources)\n" + }, + "metastoreId": { + "type": "string" + }, + "name": { + "type": "string", + "description": "Name of the Connection.\n" + }, + "options": { + "type": "object", + "additionalProperties": { + "$ref": "pulumi.json#/Any" + }, + "description": "The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`.\n", + "secret": true + }, + "owner": { + "type": "string", + "description": "Name of the connection owner.\n" + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "pulumi.json#/Any" + }, + "description": "Free-form connection properties.\n" + }, + "readOnly": { + "type": "boolean" + } + }, + "required": [ + "connectionType", + "metastoreId", + "name", + "options", + "readOnly" + ], + "inputProperties": { + "comment": { + "type": "string", + "description": "Free-form text.\n", + "willReplaceOnChanges": true + }, + "connectionType": { + "type": "string", + "description": "Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources)\n", + "willReplaceOnChanges": true + }, + "metastoreId": { + "type": "string" + }, + "name": { + "type": "string", + "description": "Name of the Connection.\n" + }, + "options": { + "type": "object", + "additionalProperties": { + "$ref": "pulumi.json#/Any" + }, + "description": "The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`.\n", + "secret": true + }, + "owner": { + "type": "string", + "description": "Name of the connection owner.\n", + "willReplaceOnChanges": true + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "pulumi.json#/Any" + }, + "description": "Free-form connection properties.\n", + "willReplaceOnChanges": true + }, + "readOnly": { + "type": "boolean", + "willReplaceOnChanges": true + } + }, + "requiredInputs": [ + "connectionType", + "options" + ], + "stateInputs": { + "description": "Input properties used for looking up and filtering Connection resources.\n", + "properties": { + "comment": { + "type": "string", + "description": "Free-form text.\n", + "willReplaceOnChanges": true + }, + "connectionType": { + "type": "string", + "description": "Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources)\n", + "willReplaceOnChanges": true + }, + "metastoreId": { + "type": "string" + }, + "name": { + "type": "string", + "description": "Name of the Connection.\n" + }, + "options": { + "type": "object", + "additionalProperties": { + "$ref": "pulumi.json#/Any" + }, + "description": "The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`.\n", + "secret": true + }, + "owner": { + "type": "string", + "description": "Name of the connection owner.\n", + "willReplaceOnChanges": true + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "pulumi.json#/Any" + }, + "description": "Free-form connection properties.\n", + "willReplaceOnChanges": true + }, + "readOnly": { + "type": "boolean", + "willReplaceOnChanges": true + } + }, + "type": "object" + } + }, "databricks:index/dbfsFile:DbfsFile": { "description": "\n\n\n## Import\n\nThe resource dbfs file can be imported using the path of the filebash\n\n```sh\n $ pulumi import databricks:index/dbfsFile:DbfsFile this \u003cpath\u003e\n```\n\n ", "properties": { @@ -10206,18 +10381,30 @@ "databricks:index/externalLocation:ExternalLocation": { "description": "To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage:\n\n- databricks.StorageCredential represent authentication methods to access cloud storage (e.g. an IAM role for Amazon S3 or a service principal for Azure Storage). Storage credentials are access-controlled to determine which users can use the credential.\n- `databricks.ExternalLocation` are objects that combine a cloud storage path with a Storage Credential that can be used to access the location.\n\n\n## Import\n\nThis resource can be imported by namebash\n\n```sh\n $ pulumi import databricks:index/externalLocation:ExternalLocation this \u003cname\u003e\n```\n\n ", "properties": { + "accessPoint": { + "type": "string", + "description": "The ARN of the s3 access point to use with the external location (AWS).\n" + }, "comment": { "type": "string", "description": "User-supplied free-form text.\n" }, "credentialName": { "type": "string", - "description": "Name of the databricks.StorageCredential to use with this External Location.\n" + "description": "Name of the databricks.StorageCredential to use with this external location.\n" + }, + "encryptionDetails": { + "$ref": "#/types/databricks:index/ExternalLocationEncryptionDetails:ExternalLocationEncryptionDetails", + "description": "The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS).\n" }, "forceDestroy": { "type": "boolean", "description": "Destroy external location regardless of its dependents.\n" }, + "forceUpdate": { + "type": "boolean", + "description": "Update external location regardless of its dependents.\n" + }, "metastoreId": { "type": "string" }, @@ -10227,7 +10414,7 @@ }, "owner": { "type": "string", - "description": "Username/groupname/sp application_id of the external Location owner.\n" + "description": "Username/groupname/sp application_id of the external location owner.\n" }, "readOnly": { "type": "boolean", @@ -10250,18 +10437,30 @@ "url" ], "inputProperties": { + "accessPoint": { + "type": "string", + "description": "The ARN of the s3 access point to use with the external location (AWS).\n" + }, "comment": { "type": "string", "description": "User-supplied free-form text.\n" }, "credentialName": { "type": "string", - "description": "Name of the databricks.StorageCredential to use with this External Location.\n" + "description": "Name of the databricks.StorageCredential to use with this external location.\n" + }, + "encryptionDetails": { + "$ref": "#/types/databricks:index/ExternalLocationEncryptionDetails:ExternalLocationEncryptionDetails", + "description": "The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS).\n" }, "forceDestroy": { "type": "boolean", "description": "Destroy external location regardless of its dependents.\n" }, + "forceUpdate": { + "type": "boolean", + "description": "Update external location regardless of its dependents.\n" + }, "metastoreId": { "type": "string" }, @@ -10272,7 +10471,7 @@ }, "owner": { "type": "string", - "description": "Username/groupname/sp application_id of the external Location owner.\n" + "description": "Username/groupname/sp application_id of the external location owner.\n" }, "readOnly": { "type": "boolean", @@ -10294,18 +10493,30 @@ "stateInputs": { "description": "Input properties used for looking up and filtering ExternalLocation resources.\n", "properties": { + "accessPoint": { + "type": "string", + "description": "The ARN of the s3 access point to use with the external location (AWS).\n" + }, "comment": { "type": "string", "description": "User-supplied free-form text.\n" }, "credentialName": { "type": "string", - "description": "Name of the databricks.StorageCredential to use with this External Location.\n" + "description": "Name of the databricks.StorageCredential to use with this external location.\n" + }, + "encryptionDetails": { + "$ref": "#/types/databricks:index/ExternalLocationEncryptionDetails:ExternalLocationEncryptionDetails", + "description": "The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS).\n" }, "forceDestroy": { "type": "boolean", "description": "Destroy external location regardless of its dependents.\n" }, + "forceUpdate": { + "type": "boolean", + "description": "Update external location regardless of its dependents.\n" + }, "metastoreId": { "type": "string" }, @@ -10316,7 +10527,7 @@ }, "owner": { "type": "string", - "description": "Username/groupname/sp application_id of the external Location owner.\n" + "description": "Username/groupname/sp application_id of the external location owner.\n" }, "readOnly": { "type": "boolean", @@ -10492,6 +10703,9 @@ "externalLocation": { "type": "string" }, + "foreignConnection": { + "type": "string" + }, "function": { "type": "string" }, @@ -10543,6 +10757,10 @@ "type": "string", "willReplaceOnChanges": true }, + "foreignConnection": { + "type": "string", + "willReplaceOnChanges": true + }, "function": { "type": "string", "willReplaceOnChanges": true @@ -10605,6 +10823,10 @@ "type": "string", "willReplaceOnChanges": true }, + "foreignConnection": { + "type": "string", + "willReplaceOnChanges": true + }, "function": { "type": "string", "willReplaceOnChanges": true @@ -11844,7 +12066,7 @@ } }, "databricks:index/metastore:Metastore": { - "description": "\u003e **Notes**\n Unity Catalog APIs are accessible via **workspace-level APIs**. This design may change in the future.\n\nA metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces in order to control which workloads use each metastore.\n\nUnity Catalog offers a new metastore with built in security and auditing. This is distinct to the metastore used in previous versions of Databricks (based on the Hive Metastore).\n\n\n## Import\n\nThis resource can be imported by IDbash\n\n```sh\n $ pulumi import databricks:index/metastore:Metastore this \u003cid\u003e\n```\n\n ", + "description": "A metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces in order to control which workloads use each metastore.\n\nUnity Catalog offers a new metastore with built in security and auditing. This is distinct to the metastore used in previous versions of Databricks (based on the Hive Metastore).\n\n\n## Import\n\nThis resource can be imported by IDbash\n\n```sh\n $ pulumi import databricks:index/metastore:Metastore this \u003cid\u003e\n```\n\n ", "properties": { "cloud": { "type": "string" @@ -11877,6 +12099,9 @@ "globalMetastoreId": { "type": "string" }, + "metastoreId": { + "type": "string" + }, "name": { "type": "string", "description": "Name of metastore.\n" @@ -11886,12 +12111,16 @@ "description": "Username/groupname/sp application_id of the metastore owner.\n" }, "region": { - "type": "string" + "type": "string", + "description": "The region of the metastore\n" }, "storageRoot": { "type": "string", "description": "Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource.\n" }, + "storageRootCredentialId": { + "type": "string" + }, "updatedAt": { "type": "integer" }, @@ -11904,6 +12133,7 @@ "createdAt", "createdBy", "globalMetastoreId", + "metastoreId", "name", "owner", "region", @@ -11943,6 +12173,9 @@ "globalMetastoreId": { "type": "string" }, + "metastoreId": { + "type": "string" + }, "name": { "type": "string", "description": "Name of metastore.\n" @@ -11952,13 +12185,17 @@ "description": "Username/groupname/sp application_id of the metastore owner.\n" }, "region": { - "type": "string" + "type": "string", + "description": "The region of the metastore\n" }, "storageRoot": { "type": "string", "description": "Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource.\n", "willReplaceOnChanges": true }, + "storageRootCredentialId": { + "type": "string" + }, "updatedAt": { "type": "integer" }, @@ -12003,6 +12240,9 @@ "globalMetastoreId": { "type": "string" }, + "metastoreId": { + "type": "string" + }, "name": { "type": "string", "description": "Name of metastore.\n" @@ -12012,13 +12252,17 @@ "description": "Username/groupname/sp application_id of the metastore owner.\n" }, "region": { - "type": "string" + "type": "string", + "description": "The region of the metastore\n" }, "storageRoot": { "type": "string", "description": "Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource.\n", "willReplaceOnChanges": true }, + "storageRootCredentialId": { + "type": "string" + }, "updatedAt": { "type": "integer" }, @@ -12030,7 +12274,7 @@ } }, "databricks:index/metastoreAssignment:MetastoreAssignment": { - "description": "A single databricks.Metastore can be shared across Databricks workspaces, and each linked workspace has a consistent view of the data and a single set of access policies. You can only create a single metastore for each region in which your organization operates.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst thisMetastore = new databricks.Metastore(\"thisMetastore\", {\n storageRoot: `s3://${aws_s3_bucket.metastore.id}/metastore`,\n owner: \"uc admins\",\n forceDestroy: true,\n});\nconst thisMetastoreAssignment = new databricks.MetastoreAssignment(\"thisMetastoreAssignment\", {\n metastoreId: thisMetastore.id,\n workspaceId: local.workspace_id,\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nthis_metastore = databricks.Metastore(\"thisMetastore\",\n storage_root=f\"s3://{aws_s3_bucket['metastore']['id']}/metastore\",\n owner=\"uc admins\",\n force_destroy=True)\nthis_metastore_assignment = databricks.MetastoreAssignment(\"thisMetastoreAssignment\",\n metastore_id=this_metastore.id,\n workspace_id=local[\"workspace_id\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var thisMetastore = new Databricks.Metastore(\"thisMetastore\", new()\n {\n StorageRoot = $\"s3://{aws_s3_bucket.Metastore.Id}/metastore\",\n Owner = \"uc admins\",\n ForceDestroy = true,\n });\n\n var thisMetastoreAssignment = new Databricks.MetastoreAssignment(\"thisMetastoreAssignment\", new()\n {\n MetastoreId = thisMetastore.Id,\n WorkspaceId = local.Workspace_id,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tthisMetastore, err := databricks.NewMetastore(ctx, \"thisMetastore\", \u0026databricks.MetastoreArgs{\n\t\t\tStorageRoot: pulumi.String(fmt.Sprintf(\"s3://%v/metastore\", aws_s3_bucket.Metastore.Id)),\n\t\t\tOwner: pulumi.String(\"uc admins\"),\n\t\t\tForceDestroy: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewMetastoreAssignment(ctx, \"thisMetastoreAssignment\", \u0026databricks.MetastoreAssignmentArgs{\n\t\t\tMetastoreId: thisMetastore.ID(),\n\t\t\tWorkspaceId: pulumi.Any(local.Workspace_id),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Metastore;\nimport com.pulumi.databricks.MetastoreArgs;\nimport com.pulumi.databricks.MetastoreAssignment;\nimport com.pulumi.databricks.MetastoreAssignmentArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var thisMetastore = new Metastore(\"thisMetastore\", MetastoreArgs.builder() \n .storageRoot(String.format(\"s3://%s/metastore\", aws_s3_bucket.metastore().id()))\n .owner(\"uc admins\")\n .forceDestroy(true)\n .build());\n\n var thisMetastoreAssignment = new MetastoreAssignment(\"thisMetastoreAssignment\", MetastoreAssignmentArgs.builder() \n .metastoreId(thisMetastore.id())\n .workspaceId(local.workspace_id())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n thisMetastore:\n type: databricks:Metastore\n properties:\n storageRoot: s3://${aws_s3_bucket.metastore.id}/metastore\n owner: uc admins\n forceDestroy: true\n thisMetastoreAssignment:\n type: databricks:MetastoreAssignment\n properties:\n metastoreId: ${thisMetastore.id}\n workspaceId: ${local.workspace_id}\n```\n{{% /example %}}\n{{% /examples %}}", + "description": "A single databricks.Metastore can be shared across Databricks workspaces, and each linked workspace has a consistent view of the data and a single set of access policies. You can only create a single metastore for each region in which your organization operates.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst thisMetastore = new databricks.Metastore(\"thisMetastore\", {\n storageRoot: `s3://${aws_s3_bucket.metastore.id}/metastore`,\n owner: \"uc admins\",\n region: \"us-east-1\",\n forceDestroy: true,\n});\nconst thisMetastoreAssignment = new databricks.MetastoreAssignment(\"thisMetastoreAssignment\", {\n metastoreId: thisMetastore.id,\n workspaceId: local.workspace_id,\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nthis_metastore = databricks.Metastore(\"thisMetastore\",\n storage_root=f\"s3://{aws_s3_bucket['metastore']['id']}/metastore\",\n owner=\"uc admins\",\n region=\"us-east-1\",\n force_destroy=True)\nthis_metastore_assignment = databricks.MetastoreAssignment(\"thisMetastoreAssignment\",\n metastore_id=this_metastore.id,\n workspace_id=local[\"workspace_id\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var thisMetastore = new Databricks.Metastore(\"thisMetastore\", new()\n {\n StorageRoot = $\"s3://{aws_s3_bucket.Metastore.Id}/metastore\",\n Owner = \"uc admins\",\n Region = \"us-east-1\",\n ForceDestroy = true,\n });\n\n var thisMetastoreAssignment = new Databricks.MetastoreAssignment(\"thisMetastoreAssignment\", new()\n {\n MetastoreId = thisMetastore.Id,\n WorkspaceId = local.Workspace_id,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tthisMetastore, err := databricks.NewMetastore(ctx, \"thisMetastore\", \u0026databricks.MetastoreArgs{\n\t\t\tStorageRoot: pulumi.String(fmt.Sprintf(\"s3://%v/metastore\", aws_s3_bucket.Metastore.Id)),\n\t\t\tOwner: pulumi.String(\"uc admins\"),\n\t\t\tRegion: pulumi.String(\"us-east-1\"),\n\t\t\tForceDestroy: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewMetastoreAssignment(ctx, \"thisMetastoreAssignment\", \u0026databricks.MetastoreAssignmentArgs{\n\t\t\tMetastoreId: thisMetastore.ID(),\n\t\t\tWorkspaceId: pulumi.Any(local.Workspace_id),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Metastore;\nimport com.pulumi.databricks.MetastoreArgs;\nimport com.pulumi.databricks.MetastoreAssignment;\nimport com.pulumi.databricks.MetastoreAssignmentArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var thisMetastore = new Metastore(\"thisMetastore\", MetastoreArgs.builder() \n .storageRoot(String.format(\"s3://%s/metastore\", aws_s3_bucket.metastore().id()))\n .owner(\"uc admins\")\n .region(\"us-east-1\")\n .forceDestroy(true)\n .build());\n\n var thisMetastoreAssignment = new MetastoreAssignment(\"thisMetastoreAssignment\", MetastoreAssignmentArgs.builder() \n .metastoreId(thisMetastore.id())\n .workspaceId(local.workspace_id())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n thisMetastore:\n type: databricks:Metastore\n properties:\n storageRoot: s3://${aws_s3_bucket.metastore.id}/metastore\n owner: uc admins\n region: us-east-1\n forceDestroy: true\n thisMetastoreAssignment:\n type: databricks:MetastoreAssignment\n properties:\n metastoreId: ${thisMetastore.id}\n workspaceId: ${local.workspace_id}\n```\n{{% /example %}}\n{{% /examples %}}", "properties": { "defaultCatalogName": { "type": "string", @@ -12060,8 +12304,7 @@ }, "workspaceId": { "type": "integer", - "description": "id of the workspace for the assignment\n", - "willReplaceOnChanges": true + "description": "id of the workspace for the assignment\n" } }, "requiredInputs": [ @@ -12081,8 +12324,7 @@ }, "workspaceId": { "type": "integer", - "description": "id of the workspace for the assignment\n", - "willReplaceOnChanges": true + "description": "id of the workspace for the assignment\n" } }, "type": "object" @@ -12398,9 +12640,6 @@ "type": "string", "description": "Name of MLflow model. Change of name triggers new resource.\n" }, - "registeredModelId": { - "type": "string" - }, "tags": { "type": "array", "items": { @@ -12413,11 +12652,7 @@ } }, "required": [ - "creationTimestamp", - "lastUpdatedTimestamp", - "name", - "registeredModelId", - "userId" + "name" ], "inputProperties": { "creationTimestamp": { @@ -12435,9 +12670,6 @@ "description": "Name of MLflow model. Change of name triggers new resource.\n", "willReplaceOnChanges": true }, - "registeredModelId": { - "type": "string" - }, "tags": { "type": "array", "items": { @@ -12467,9 +12699,6 @@ "description": "Name of MLflow model. Change of name triggers new resource.\n", "willReplaceOnChanges": true }, - "registeredModelId": { - "type": "string" - }, "tags": { "type": "array", "items": { @@ -12536,8 +12765,7 @@ }, "modelName": { "type": "string", - "description": "Name of MLflow model for which webhook will be created. If the model name is not specified, a registry-wide webhook is created that listens for the specified events across all versions of all registered models.\n", - "willReplaceOnChanges": true + "description": "Name of MLflow model for which webhook will be created. If the model name is not specified, a registry-wide webhook is created that listens for the specified events across all versions of all registered models.\n" }, "status": { "type": "string", @@ -12569,8 +12797,7 @@ }, "modelName": { "type": "string", - "description": "Name of MLflow model for which webhook will be created. If the model name is not specified, a registry-wide webhook is created that listens for the specified events across all versions of all registered models.\n", - "willReplaceOnChanges": true + "description": "Name of MLflow model for which webhook will be created. If the model name is not specified, a registry-wide webhook is created that listens for the specified events across all versions of all registered models.\n" }, "status": { "type": "string", @@ -12885,7 +13112,7 @@ } }, "databricks:index/mwsCustomerManagedKeys:MwsCustomerManagedKeys": { - "description": "{{% examples %}}\n## Example Usage\n\n\u003e **Note** If you've used the resource before, please add `use_cases = [\"MANAGED_SERVICES\"]` to keep the previous behaviour.\n{{% example %}}\n### Customer-managed key for managed services\n\nYou must configure this during workspace creation\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws from \"@pulumi/aws\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst config = new pulumi.Config();\nconst databricksAccountId = config.requireObject(\"databricksAccountId\");\nconst current = aws.getCallerIdentity({});\nconst databricksManagedServicesCmk = current.then(current =\u003e aws.iam.getPolicyDocument({\n version: \"2012-10-17\",\n statements: [\n {\n sid: \"Enable IAM User Permissions\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [current.accountId],\n }],\n actions: [\"kms:*\"],\n resources: [\"*\"],\n },\n {\n sid: \"Allow Databricks to use KMS key for control plane managed services\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [\"arn:aws:iam::414351767826:root\"],\n }],\n actions: [\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n ],\n resources: [\"*\"],\n },\n ],\n}));\nconst managedServicesCustomerManagedKey = new aws.kms.Key(\"managedServicesCustomerManagedKey\", {policy: databricksManagedServicesCmk.then(databricksManagedServicesCmk =\u003e databricksManagedServicesCmk.json)});\nconst managedServicesCustomerManagedKeyAlias = new aws.kms.Alias(\"managedServicesCustomerManagedKeyAlias\", {targetKeyId: managedServicesCustomerManagedKey.keyId});\nconst managedServices = new databricks.MwsCustomerManagedKeys(\"managedServices\", {\n accountId: databricksAccountId,\n awsKeyInfo: {\n keyArn: managedServicesCustomerManagedKey.arn,\n keyAlias: managedServicesCustomerManagedKeyAlias.name,\n },\n useCases: [\"MANAGED_SERVICES\"],\n});\n```\n```python\nimport pulumi\nimport pulumi_aws as aws\nimport pulumi_databricks as databricks\n\nconfig = pulumi.Config()\ndatabricks_account_id = config.require_object(\"databricksAccountId\")\ncurrent = aws.get_caller_identity()\ndatabricks_managed_services_cmk = aws.iam.get_policy_document(version=\"2012-10-17\",\n statements=[\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Enable IAM User Permissions\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[current.account_id],\n )],\n actions=[\"kms:*\"],\n resources=[\"*\"],\n ),\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Allow Databricks to use KMS key for control plane managed services\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[\"arn:aws:iam::414351767826:root\"],\n )],\n actions=[\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n ],\n resources=[\"*\"],\n ),\n ])\nmanaged_services_customer_managed_key = aws.kms.Key(\"managedServicesCustomerManagedKey\", policy=databricks_managed_services_cmk.json)\nmanaged_services_customer_managed_key_alias = aws.kms.Alias(\"managedServicesCustomerManagedKeyAlias\", target_key_id=managed_services_customer_managed_key.key_id)\nmanaged_services = databricks.MwsCustomerManagedKeys(\"managedServices\",\n account_id=databricks_account_id,\n aws_key_info=databricks.MwsCustomerManagedKeysAwsKeyInfoArgs(\n key_arn=managed_services_customer_managed_key.arn,\n key_alias=managed_services_customer_managed_key_alias.name,\n ),\n use_cases=[\"MANAGED_SERVICES\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Aws = Pulumi.Aws;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n var databricksAccountId = config.RequireObject\u003cdynamic\u003e(\"databricksAccountId\");\n var current = Aws.GetCallerIdentity.Invoke();\n\n var databricksManagedServicesCmk = Aws.Iam.GetPolicyDocument.Invoke(new()\n {\n Version = \"2012-10-17\",\n Statements = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Enable IAM User Permissions\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n current.Apply(getCallerIdentityResult =\u003e getCallerIdentityResult.AccountId),\n },\n },\n },\n Actions = new[]\n {\n \"kms:*\",\n },\n Resources = new[]\n {\n \"*\",\n },\n },\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Allow Databricks to use KMS key for control plane managed services\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n \"arn:aws:iam::414351767826:root\",\n },\n },\n },\n Actions = new[]\n {\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n },\n Resources = new[]\n {\n \"*\",\n },\n },\n },\n });\n\n var managedServicesCustomerManagedKey = new Aws.Kms.Key(\"managedServicesCustomerManagedKey\", new()\n {\n Policy = databricksManagedServicesCmk.Apply(getPolicyDocumentResult =\u003e getPolicyDocumentResult.Json),\n });\n\n var managedServicesCustomerManagedKeyAlias = new Aws.Kms.Alias(\"managedServicesCustomerManagedKeyAlias\", new()\n {\n TargetKeyId = managedServicesCustomerManagedKey.KeyId,\n });\n\n var managedServices = new Databricks.MwsCustomerManagedKeys(\"managedServices\", new()\n {\n AccountId = databricksAccountId,\n AwsKeyInfo = new Databricks.Inputs.MwsCustomerManagedKeysAwsKeyInfoArgs\n {\n KeyArn = managedServicesCustomerManagedKey.Arn,\n KeyAlias = managedServicesCustomerManagedKeyAlias.Name,\n },\n UseCases = new[]\n {\n \"MANAGED_SERVICES\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-aws/sdk/v5/go/aws\"\n\t\"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/iam\"\n\t\"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kms\"\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\nfunc main() {\npulumi.Run(func(ctx *pulumi.Context) error {\ncfg := config.New(ctx, \"\")\ndatabricksAccountId := cfg.RequireObject(\"databricksAccountId\")\ncurrent, err := aws.GetCallerIdentity(ctx, nil, nil);\nif err != nil {\nreturn err\n}\ndatabricksManagedServicesCmk, err := iam.GetPolicyDocument(ctx, \u0026iam.GetPolicyDocumentArgs{\nVersion: pulumi.StringRef(\"2012-10-17\"),\nStatements: []iam.GetPolicyDocumentStatement{\n{\nSid: pulumi.StringRef(\"Enable IAM User Permissions\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: interface{}{\ncurrent.AccountId,\n},\n},\n},\nActions: []string{\n\"kms:*\",\n},\nResources: []string{\n\"*\",\n},\n},\n{\nSid: pulumi.StringRef(\"Allow Databricks to use KMS key for control plane managed services\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: []string{\n\"arn:aws:iam::414351767826:root\",\n},\n},\n},\nActions: []string{\n\"kms:Encrypt\",\n\"kms:Decrypt\",\n},\nResources: []string{\n\"*\",\n},\n},\n},\n}, nil);\nif err != nil {\nreturn err\n}\nmanagedServicesCustomerManagedKey, err := kms.NewKey(ctx, \"managedServicesCustomerManagedKey\", \u0026kms.KeyArgs{\nPolicy: *pulumi.String(databricksManagedServicesCmk.Json),\n})\nif err != nil {\nreturn err\n}\nmanagedServicesCustomerManagedKeyAlias, err := kms.NewAlias(ctx, \"managedServicesCustomerManagedKeyAlias\", \u0026kms.AliasArgs{\nTargetKeyId: managedServicesCustomerManagedKey.KeyId,\n})\nif err != nil {\nreturn err\n}\n_, err = databricks.NewMwsCustomerManagedKeys(ctx, \"managedServices\", \u0026databricks.MwsCustomerManagedKeysArgs{\nAccountId: pulumi.Any(databricksAccountId),\nAwsKeyInfo: \u0026databricks.MwsCustomerManagedKeysAwsKeyInfoArgs{\nKeyArn: managedServicesCustomerManagedKey.Arn,\nKeyAlias: managedServicesCustomerManagedKeyAlias.Name,\n},\nUseCases: pulumi.StringArray{\npulumi.String(\"MANAGED_SERVICES\"),\n},\n})\nif err != nil {\nreturn err\n}\nreturn nil\n})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.aws.AwsFunctions;\nimport com.pulumi.aws.iam.IamFunctions;\nimport com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;\nimport com.pulumi.aws.kms.Key;\nimport com.pulumi.aws.kms.KeyArgs;\nimport com.pulumi.aws.kms.Alias;\nimport com.pulumi.aws.kms.AliasArgs;\nimport com.pulumi.databricks.MwsCustomerManagedKeys;\nimport com.pulumi.databricks.MwsCustomerManagedKeysArgs;\nimport com.pulumi.databricks.inputs.MwsCustomerManagedKeysAwsKeyInfoArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var config = ctx.config();\n final var databricksAccountId = config.get(\"databricksAccountId\");\n final var current = AwsFunctions.getCallerIdentity();\n\n final var databricksManagedServicesCmk = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()\n .version(\"2012-10-17\")\n .statements( \n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Enable IAM User Permissions\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(current.applyValue(getCallerIdentityResult -\u003e getCallerIdentityResult.accountId()))\n .build())\n .actions(\"kms:*\")\n .resources(\"*\")\n .build(),\n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Allow Databricks to use KMS key for control plane managed services\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(\"arn:aws:iam::414351767826:root\")\n .build())\n .actions( \n \"kms:Encrypt\",\n \"kms:Decrypt\")\n .resources(\"*\")\n .build())\n .build());\n\n var managedServicesCustomerManagedKey = new Key(\"managedServicesCustomerManagedKey\", KeyArgs.builder() \n .policy(databricksManagedServicesCmk.applyValue(getPolicyDocumentResult -\u003e getPolicyDocumentResult.json()))\n .build());\n\n var managedServicesCustomerManagedKeyAlias = new Alias(\"managedServicesCustomerManagedKeyAlias\", AliasArgs.builder() \n .targetKeyId(managedServicesCustomerManagedKey.keyId())\n .build());\n\n var managedServices = new MwsCustomerManagedKeys(\"managedServices\", MwsCustomerManagedKeysArgs.builder() \n .accountId(databricksAccountId)\n .awsKeyInfo(MwsCustomerManagedKeysAwsKeyInfoArgs.builder()\n .keyArn(managedServicesCustomerManagedKey.arn())\n .keyAlias(managedServicesCustomerManagedKeyAlias.name())\n .build())\n .useCases(\"MANAGED_SERVICES\")\n .build());\n\n }\n}\n```\n```yaml\nconfiguration:\n databricksAccountId:\n type: dynamic\nresources:\n managedServicesCustomerManagedKey:\n type: aws:kms:Key\n properties:\n policy: ${databricksManagedServicesCmk.json}\n managedServicesCustomerManagedKeyAlias:\n type: aws:kms:Alias\n properties:\n targetKeyId: ${managedServicesCustomerManagedKey.keyId}\n managedServices:\n type: databricks:MwsCustomerManagedKeys\n properties:\n accountId: ${databricksAccountId}\n awsKeyInfo:\n keyArn: ${managedServicesCustomerManagedKey.arn}\n keyAlias: ${managedServicesCustomerManagedKeyAlias.name}\n useCases:\n - MANAGED_SERVICES\nvariables:\n current:\n fn::invoke:\n Function: aws:getCallerIdentity\n Arguments: {}\n databricksManagedServicesCmk:\n fn::invoke:\n Function: aws:iam:getPolicyDocument\n Arguments:\n version: 2012-10-17\n statements:\n - sid: Enable IAM User Permissions\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - ${current.accountId}\n actions:\n - kms:*\n resources:\n - '*'\n - sid: Allow Databricks to use KMS key for control plane managed services\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - arn:aws:iam::414351767826:root\n actions:\n - kms:Encrypt\n - kms:Decrypt\n resources:\n - '*'\n```\n{{% /example %}}\n{{% example %}}\n### Customer-managed key for workspace storage\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws from \"@pulumi/aws\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst config = new pulumi.Config();\nconst databricksAccountId = config.requireObject(\"databricksAccountId\");\nconst databricksCrossAccountRole = config.requireObject(\"databricksCrossAccountRole\");\nconst databricksStorageCmk = aws.iam.getPolicyDocument({\n version: \"2012-10-17\",\n statements: [\n {\n sid: \"Enable IAM User Permissions\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [data.aws_caller_identity.current.account_id],\n }],\n actions: [\"kms:*\"],\n resources: [\"*\"],\n },\n {\n sid: \"Allow Databricks to use KMS key for DBFS\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [\"arn:aws:iam::414351767826:root\"],\n }],\n actions: [\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n \"kms:ReEncrypt*\",\n \"kms:GenerateDataKey*\",\n \"kms:DescribeKey\",\n ],\n resources: [\"*\"],\n },\n {\n sid: \"Allow Databricks to use KMS key for DBFS (Grants)\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [\"arn:aws:iam::414351767826:root\"],\n }],\n actions: [\n \"kms:CreateGrant\",\n \"kms:ListGrants\",\n \"kms:RevokeGrant\",\n ],\n resources: [\"*\"],\n conditions: [{\n test: \"Bool\",\n variable: \"kms:GrantIsForAWSResource\",\n values: [\"true\"],\n }],\n },\n {\n sid: \"Allow Databricks to use KMS key for EBS\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [databricksCrossAccountRole],\n }],\n actions: [\n \"kms:Decrypt\",\n \"kms:GenerateDataKey*\",\n \"kms:CreateGrant\",\n \"kms:DescribeKey\",\n ],\n resources: [\"*\"],\n conditions: [{\n test: \"ForAnyValue:StringLike\",\n variable: \"kms:ViaService\",\n values: [\"ec2.*.amazonaws.com\"],\n }],\n },\n ],\n});\nconst storageCustomerManagedKey = new aws.kms.Key(\"storageCustomerManagedKey\", {policy: databricksStorageCmk.then(databricksStorageCmk =\u003e databricksStorageCmk.json)});\nconst storageCustomerManagedKeyAlias = new aws.kms.Alias(\"storageCustomerManagedKeyAlias\", {targetKeyId: storageCustomerManagedKey.keyId});\nconst storage = new databricks.MwsCustomerManagedKeys(\"storage\", {\n accountId: databricksAccountId,\n awsKeyInfo: {\n keyArn: storageCustomerManagedKey.arn,\n keyAlias: storageCustomerManagedKeyAlias.name,\n },\n useCases: [\"STORAGE\"],\n});\n```\n```python\nimport pulumi\nimport pulumi_aws as aws\nimport pulumi_databricks as databricks\n\nconfig = pulumi.Config()\ndatabricks_account_id = config.require_object(\"databricksAccountId\")\ndatabricks_cross_account_role = config.require_object(\"databricksCrossAccountRole\")\ndatabricks_storage_cmk = aws.iam.get_policy_document(version=\"2012-10-17\",\n statements=[\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Enable IAM User Permissions\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[data[\"aws_caller_identity\"][\"current\"][\"account_id\"]],\n )],\n actions=[\"kms:*\"],\n resources=[\"*\"],\n ),\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Allow Databricks to use KMS key for DBFS\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[\"arn:aws:iam::414351767826:root\"],\n )],\n actions=[\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n \"kms:ReEncrypt*\",\n \"kms:GenerateDataKey*\",\n \"kms:DescribeKey\",\n ],\n resources=[\"*\"],\n ),\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Allow Databricks to use KMS key for DBFS (Grants)\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[\"arn:aws:iam::414351767826:root\"],\n )],\n actions=[\n \"kms:CreateGrant\",\n \"kms:ListGrants\",\n \"kms:RevokeGrant\",\n ],\n resources=[\"*\"],\n conditions=[aws.iam.GetPolicyDocumentStatementConditionArgs(\n test=\"Bool\",\n variable=\"kms:GrantIsForAWSResource\",\n values=[\"true\"],\n )],\n ),\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Allow Databricks to use KMS key for EBS\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[databricks_cross_account_role],\n )],\n actions=[\n \"kms:Decrypt\",\n \"kms:GenerateDataKey*\",\n \"kms:CreateGrant\",\n \"kms:DescribeKey\",\n ],\n resources=[\"*\"],\n conditions=[aws.iam.GetPolicyDocumentStatementConditionArgs(\n test=\"ForAnyValue:StringLike\",\n variable=\"kms:ViaService\",\n values=[\"ec2.*.amazonaws.com\"],\n )],\n ),\n ])\nstorage_customer_managed_key = aws.kms.Key(\"storageCustomerManagedKey\", policy=databricks_storage_cmk.json)\nstorage_customer_managed_key_alias = aws.kms.Alias(\"storageCustomerManagedKeyAlias\", target_key_id=storage_customer_managed_key.key_id)\nstorage = databricks.MwsCustomerManagedKeys(\"storage\",\n account_id=databricks_account_id,\n aws_key_info=databricks.MwsCustomerManagedKeysAwsKeyInfoArgs(\n key_arn=storage_customer_managed_key.arn,\n key_alias=storage_customer_managed_key_alias.name,\n ),\n use_cases=[\"STORAGE\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Aws = Pulumi.Aws;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n var databricksAccountId = config.RequireObject\u003cdynamic\u003e(\"databricksAccountId\");\n var databricksCrossAccountRole = config.RequireObject\u003cdynamic\u003e(\"databricksCrossAccountRole\");\n var databricksStorageCmk = Aws.Iam.GetPolicyDocument.Invoke(new()\n {\n Version = \"2012-10-17\",\n Statements = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Enable IAM User Permissions\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n data.Aws_caller_identity.Current.Account_id,\n },\n },\n },\n Actions = new[]\n {\n \"kms:*\",\n },\n Resources = new[]\n {\n \"*\",\n },\n },\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Allow Databricks to use KMS key for DBFS\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n \"arn:aws:iam::414351767826:root\",\n },\n },\n },\n Actions = new[]\n {\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n \"kms:ReEncrypt*\",\n \"kms:GenerateDataKey*\",\n \"kms:DescribeKey\",\n },\n Resources = new[]\n {\n \"*\",\n },\n },\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Allow Databricks to use KMS key for DBFS (Grants)\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n \"arn:aws:iam::414351767826:root\",\n },\n },\n },\n Actions = new[]\n {\n \"kms:CreateGrant\",\n \"kms:ListGrants\",\n \"kms:RevokeGrant\",\n },\n Resources = new[]\n {\n \"*\",\n },\n Conditions = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementConditionInputArgs\n {\n Test = \"Bool\",\n Variable = \"kms:GrantIsForAWSResource\",\n Values = new[]\n {\n \"true\",\n },\n },\n },\n },\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Allow Databricks to use KMS key for EBS\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n databricksCrossAccountRole,\n },\n },\n },\n Actions = new[]\n {\n \"kms:Decrypt\",\n \"kms:GenerateDataKey*\",\n \"kms:CreateGrant\",\n \"kms:DescribeKey\",\n },\n Resources = new[]\n {\n \"*\",\n },\n Conditions = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementConditionInputArgs\n {\n Test = \"ForAnyValue:StringLike\",\n Variable = \"kms:ViaService\",\n Values = new[]\n {\n \"ec2.*.amazonaws.com\",\n },\n },\n },\n },\n },\n });\n\n var storageCustomerManagedKey = new Aws.Kms.Key(\"storageCustomerManagedKey\", new()\n {\n Policy = databricksStorageCmk.Apply(getPolicyDocumentResult =\u003e getPolicyDocumentResult.Json),\n });\n\n var storageCustomerManagedKeyAlias = new Aws.Kms.Alias(\"storageCustomerManagedKeyAlias\", new()\n {\n TargetKeyId = storageCustomerManagedKey.KeyId,\n });\n\n var storage = new Databricks.MwsCustomerManagedKeys(\"storage\", new()\n {\n AccountId = databricksAccountId,\n AwsKeyInfo = new Databricks.Inputs.MwsCustomerManagedKeysAwsKeyInfoArgs\n {\n KeyArn = storageCustomerManagedKey.Arn,\n KeyAlias = storageCustomerManagedKeyAlias.Name,\n },\n UseCases = new[]\n {\n \"STORAGE\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/iam\"\n\t\"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kms\"\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\nfunc main() {\npulumi.Run(func(ctx *pulumi.Context) error {\ncfg := config.New(ctx, \"\")\ndatabricksAccountId := cfg.RequireObject(\"databricksAccountId\")\ndatabricksCrossAccountRole := cfg.RequireObject(\"databricksCrossAccountRole\")\ndatabricksStorageCmk, err := iam.GetPolicyDocument(ctx, \u0026iam.GetPolicyDocumentArgs{\nVersion: pulumi.StringRef(\"2012-10-17\"),\nStatements: []iam.GetPolicyDocumentStatement{\n{\nSid: pulumi.StringRef(\"Enable IAM User Permissions\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: interface{}{\ndata.Aws_caller_identity.Current.Account_id,\n},\n},\n},\nActions: []string{\n\"kms:*\",\n},\nResources: []string{\n\"*\",\n},\n},\n{\nSid: pulumi.StringRef(\"Allow Databricks to use KMS key for DBFS\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: []string{\n\"arn:aws:iam::414351767826:root\",\n},\n},\n},\nActions: []string{\n\"kms:Encrypt\",\n\"kms:Decrypt\",\n\"kms:ReEncrypt*\",\n\"kms:GenerateDataKey*\",\n\"kms:DescribeKey\",\n},\nResources: []string{\n\"*\",\n},\n},\n{\nSid: pulumi.StringRef(\"Allow Databricks to use KMS key for DBFS (Grants)\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: []string{\n\"arn:aws:iam::414351767826:root\",\n},\n},\n},\nActions: []string{\n\"kms:CreateGrant\",\n\"kms:ListGrants\",\n\"kms:RevokeGrant\",\n},\nResources: []string{\n\"*\",\n},\nConditions: []iam.GetPolicyDocumentStatementCondition{\n{\nTest: \"Bool\",\nVariable: \"kms:GrantIsForAWSResource\",\nValues: []string{\n\"true\",\n},\n},\n},\n},\n{\nSid: pulumi.StringRef(\"Allow Databricks to use KMS key for EBS\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: interface{}{\ndatabricksCrossAccountRole,\n},\n},\n},\nActions: []string{\n\"kms:Decrypt\",\n\"kms:GenerateDataKey*\",\n\"kms:CreateGrant\",\n\"kms:DescribeKey\",\n},\nResources: []string{\n\"*\",\n},\nConditions: []iam.GetPolicyDocumentStatementCondition{\n{\nTest: \"ForAnyValue:StringLike\",\nVariable: \"kms:ViaService\",\nValues: []string{\n\"ec2.*.amazonaws.com\",\n},\n},\n},\n},\n},\n}, nil);\nif err != nil {\nreturn err\n}\nstorageCustomerManagedKey, err := kms.NewKey(ctx, \"storageCustomerManagedKey\", \u0026kms.KeyArgs{\nPolicy: *pulumi.String(databricksStorageCmk.Json),\n})\nif err != nil {\nreturn err\n}\nstorageCustomerManagedKeyAlias, err := kms.NewAlias(ctx, \"storageCustomerManagedKeyAlias\", \u0026kms.AliasArgs{\nTargetKeyId: storageCustomerManagedKey.KeyId,\n})\nif err != nil {\nreturn err\n}\n_, err = databricks.NewMwsCustomerManagedKeys(ctx, \"storage\", \u0026databricks.MwsCustomerManagedKeysArgs{\nAccountId: pulumi.Any(databricksAccountId),\nAwsKeyInfo: \u0026databricks.MwsCustomerManagedKeysAwsKeyInfoArgs{\nKeyArn: storageCustomerManagedKey.Arn,\nKeyAlias: storageCustomerManagedKeyAlias.Name,\n},\nUseCases: pulumi.StringArray{\npulumi.String(\"STORAGE\"),\n},\n})\nif err != nil {\nreturn err\n}\nreturn nil\n})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.aws.iam.IamFunctions;\nimport com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;\nimport com.pulumi.aws.kms.Key;\nimport com.pulumi.aws.kms.KeyArgs;\nimport com.pulumi.aws.kms.Alias;\nimport com.pulumi.aws.kms.AliasArgs;\nimport com.pulumi.databricks.MwsCustomerManagedKeys;\nimport com.pulumi.databricks.MwsCustomerManagedKeysArgs;\nimport com.pulumi.databricks.inputs.MwsCustomerManagedKeysAwsKeyInfoArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var config = ctx.config();\n final var databricksAccountId = config.get(\"databricksAccountId\");\n final var databricksCrossAccountRole = config.get(\"databricksCrossAccountRole\");\n final var databricksStorageCmk = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()\n .version(\"2012-10-17\")\n .statements( \n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Enable IAM User Permissions\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(data.aws_caller_identity().current().account_id())\n .build())\n .actions(\"kms:*\")\n .resources(\"*\")\n .build(),\n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Allow Databricks to use KMS key for DBFS\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(\"arn:aws:iam::414351767826:root\")\n .build())\n .actions( \n \"kms:Encrypt\",\n \"kms:Decrypt\",\n \"kms:ReEncrypt*\",\n \"kms:GenerateDataKey*\",\n \"kms:DescribeKey\")\n .resources(\"*\")\n .build(),\n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Allow Databricks to use KMS key for DBFS (Grants)\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(\"arn:aws:iam::414351767826:root\")\n .build())\n .actions( \n \"kms:CreateGrant\",\n \"kms:ListGrants\",\n \"kms:RevokeGrant\")\n .resources(\"*\")\n .conditions(GetPolicyDocumentStatementConditionArgs.builder()\n .test(\"Bool\")\n .variable(\"kms:GrantIsForAWSResource\")\n .values(\"true\")\n .build())\n .build(),\n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Allow Databricks to use KMS key for EBS\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(databricksCrossAccountRole)\n .build())\n .actions( \n \"kms:Decrypt\",\n \"kms:GenerateDataKey*\",\n \"kms:CreateGrant\",\n \"kms:DescribeKey\")\n .resources(\"*\")\n .conditions(GetPolicyDocumentStatementConditionArgs.builder()\n .test(\"ForAnyValue:StringLike\")\n .variable(\"kms:ViaService\")\n .values(\"ec2.*.amazonaws.com\")\n .build())\n .build())\n .build());\n\n var storageCustomerManagedKey = new Key(\"storageCustomerManagedKey\", KeyArgs.builder() \n .policy(databricksStorageCmk.applyValue(getPolicyDocumentResult -\u003e getPolicyDocumentResult.json()))\n .build());\n\n var storageCustomerManagedKeyAlias = new Alias(\"storageCustomerManagedKeyAlias\", AliasArgs.builder() \n .targetKeyId(storageCustomerManagedKey.keyId())\n .build());\n\n var storage = new MwsCustomerManagedKeys(\"storage\", MwsCustomerManagedKeysArgs.builder() \n .accountId(databricksAccountId)\n .awsKeyInfo(MwsCustomerManagedKeysAwsKeyInfoArgs.builder()\n .keyArn(storageCustomerManagedKey.arn())\n .keyAlias(storageCustomerManagedKeyAlias.name())\n .build())\n .useCases(\"STORAGE\")\n .build());\n\n }\n}\n```\n```yaml\nconfiguration:\n databricksAccountId:\n type: dynamic\n databricksCrossAccountRole:\n type: dynamic\nresources:\n storageCustomerManagedKey:\n type: aws:kms:Key\n properties:\n policy: ${databricksStorageCmk.json}\n storageCustomerManagedKeyAlias:\n type: aws:kms:Alias\n properties:\n targetKeyId: ${storageCustomerManagedKey.keyId}\n storage:\n type: databricks:MwsCustomerManagedKeys\n properties:\n accountId: ${databricksAccountId}\n awsKeyInfo:\n keyArn: ${storageCustomerManagedKey.arn}\n keyAlias: ${storageCustomerManagedKeyAlias.name}\n useCases:\n - STORAGE\nvariables:\n databricksStorageCmk:\n fn::invoke:\n Function: aws:iam:getPolicyDocument\n Arguments:\n version: 2012-10-17\n statements:\n - sid: Enable IAM User Permissions\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - ${data.aws_caller_identity.current.account_id}\n actions:\n - kms:*\n resources:\n - '*'\n - sid: Allow Databricks to use KMS key for DBFS\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - arn:aws:iam::414351767826:root\n actions:\n - kms:Encrypt\n - kms:Decrypt\n - kms:ReEncrypt*\n - kms:GenerateDataKey*\n - kms:DescribeKey\n resources:\n - '*'\n - sid: Allow Databricks to use KMS key for DBFS (Grants)\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - arn:aws:iam::414351767826:root\n actions:\n - kms:CreateGrant\n - kms:ListGrants\n - kms:RevokeGrant\n resources:\n - '*'\n conditions:\n - test: Bool\n variable: kms:GrantIsForAWSResource\n values:\n - 'true'\n - sid: Allow Databricks to use KMS key for EBS\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - ${databricksCrossAccountRole}\n actions:\n - kms:Decrypt\n - kms:GenerateDataKey*\n - kms:CreateGrant\n - kms:DescribeKey\n resources:\n - '*'\n conditions:\n - test: ForAnyValue:StringLike\n variable: kms:ViaService\n values:\n - ec2.*.amazonaws.com\n```\n{{% /example %}}\n{{% /examples %}}\n## Related Resources\n\nThe following resources are used in the same context:\n\n* Provisioning Databricks on AWS guide.\n* databricks.MwsCredentials to configure the cross-account role for creation of new workspaces within AWS.\n* databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html).\n* databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) \u0026 subnets for new workspaces within AWS.\n* databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS.\n* databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1).\n\n\n## Import\n\n-\u003e **Note** Importing this resource is not currently supported. ", + "description": "{{% examples %}}\n## Example Usage\n\n\u003e **Note** If you've used the resource before, please add `use_cases = [\"MANAGED_SERVICES\"]` to keep the previous behaviour.\n\n### Customer-managed key for managed services\n\nYou must configure this during workspace creation\n{{% example %}}\n### For AWS\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws from \"@pulumi/aws\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst config = new pulumi.Config();\nconst databricksAccountId = config.requireObject(\"databricksAccountId\");\nconst current = aws.getCallerIdentity({});\nconst databricksManagedServicesCmk = current.then(current =\u003e aws.iam.getPolicyDocument({\n version: \"2012-10-17\",\n statements: [\n {\n sid: \"Enable IAM User Permissions\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [current.accountId],\n }],\n actions: [\"kms:*\"],\n resources: [\"*\"],\n },\n {\n sid: \"Allow Databricks to use KMS key for control plane managed services\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [\"arn:aws:iam::414351767826:root\"],\n }],\n actions: [\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n ],\n resources: [\"*\"],\n },\n ],\n}));\nconst managedServicesCustomerManagedKey = new aws.kms.Key(\"managedServicesCustomerManagedKey\", {policy: databricksManagedServicesCmk.then(databricksManagedServicesCmk =\u003e databricksManagedServicesCmk.json)});\nconst managedServicesCustomerManagedKeyAlias = new aws.kms.Alias(\"managedServicesCustomerManagedKeyAlias\", {targetKeyId: managedServicesCustomerManagedKey.keyId});\nconst managedServices = new databricks.MwsCustomerManagedKeys(\"managedServices\", {\n accountId: databricksAccountId,\n awsKeyInfo: {\n keyArn: managedServicesCustomerManagedKey.arn,\n keyAlias: managedServicesCustomerManagedKeyAlias.name,\n },\n useCases: [\"MANAGED_SERVICES\"],\n});\n```\n```python\nimport pulumi\nimport pulumi_aws as aws\nimport pulumi_databricks as databricks\n\nconfig = pulumi.Config()\ndatabricks_account_id = config.require_object(\"databricksAccountId\")\ncurrent = aws.get_caller_identity()\ndatabricks_managed_services_cmk = aws.iam.get_policy_document(version=\"2012-10-17\",\n statements=[\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Enable IAM User Permissions\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[current.account_id],\n )],\n actions=[\"kms:*\"],\n resources=[\"*\"],\n ),\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Allow Databricks to use KMS key for control plane managed services\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[\"arn:aws:iam::414351767826:root\"],\n )],\n actions=[\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n ],\n resources=[\"*\"],\n ),\n ])\nmanaged_services_customer_managed_key = aws.kms.Key(\"managedServicesCustomerManagedKey\", policy=databricks_managed_services_cmk.json)\nmanaged_services_customer_managed_key_alias = aws.kms.Alias(\"managedServicesCustomerManagedKeyAlias\", target_key_id=managed_services_customer_managed_key.key_id)\nmanaged_services = databricks.MwsCustomerManagedKeys(\"managedServices\",\n account_id=databricks_account_id,\n aws_key_info=databricks.MwsCustomerManagedKeysAwsKeyInfoArgs(\n key_arn=managed_services_customer_managed_key.arn,\n key_alias=managed_services_customer_managed_key_alias.name,\n ),\n use_cases=[\"MANAGED_SERVICES\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Aws = Pulumi.Aws;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n var databricksAccountId = config.RequireObject\u003cdynamic\u003e(\"databricksAccountId\");\n var current = Aws.GetCallerIdentity.Invoke();\n\n var databricksManagedServicesCmk = Aws.Iam.GetPolicyDocument.Invoke(new()\n {\n Version = \"2012-10-17\",\n Statements = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Enable IAM User Permissions\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n current.Apply(getCallerIdentityResult =\u003e getCallerIdentityResult.AccountId),\n },\n },\n },\n Actions = new[]\n {\n \"kms:*\",\n },\n Resources = new[]\n {\n \"*\",\n },\n },\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Allow Databricks to use KMS key for control plane managed services\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n \"arn:aws:iam::414351767826:root\",\n },\n },\n },\n Actions = new[]\n {\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n },\n Resources = new[]\n {\n \"*\",\n },\n },\n },\n });\n\n var managedServicesCustomerManagedKey = new Aws.Kms.Key(\"managedServicesCustomerManagedKey\", new()\n {\n Policy = databricksManagedServicesCmk.Apply(getPolicyDocumentResult =\u003e getPolicyDocumentResult.Json),\n });\n\n var managedServicesCustomerManagedKeyAlias = new Aws.Kms.Alias(\"managedServicesCustomerManagedKeyAlias\", new()\n {\n TargetKeyId = managedServicesCustomerManagedKey.KeyId,\n });\n\n var managedServices = new Databricks.MwsCustomerManagedKeys(\"managedServices\", new()\n {\n AccountId = databricksAccountId,\n AwsKeyInfo = new Databricks.Inputs.MwsCustomerManagedKeysAwsKeyInfoArgs\n {\n KeyArn = managedServicesCustomerManagedKey.Arn,\n KeyAlias = managedServicesCustomerManagedKeyAlias.Name,\n },\n UseCases = new[]\n {\n \"MANAGED_SERVICES\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-aws/sdk/v5/go/aws\"\n\t\"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/iam\"\n\t\"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kms\"\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\nfunc main() {\npulumi.Run(func(ctx *pulumi.Context) error {\ncfg := config.New(ctx, \"\")\ndatabricksAccountId := cfg.RequireObject(\"databricksAccountId\")\ncurrent, err := aws.GetCallerIdentity(ctx, nil, nil);\nif err != nil {\nreturn err\n}\ndatabricksManagedServicesCmk, err := iam.GetPolicyDocument(ctx, \u0026iam.GetPolicyDocumentArgs{\nVersion: pulumi.StringRef(\"2012-10-17\"),\nStatements: []iam.GetPolicyDocumentStatement{\n{\nSid: pulumi.StringRef(\"Enable IAM User Permissions\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: interface{}{\ncurrent.AccountId,\n},\n},\n},\nActions: []string{\n\"kms:*\",\n},\nResources: []string{\n\"*\",\n},\n},\n{\nSid: pulumi.StringRef(\"Allow Databricks to use KMS key for control plane managed services\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: []string{\n\"arn:aws:iam::414351767826:root\",\n},\n},\n},\nActions: []string{\n\"kms:Encrypt\",\n\"kms:Decrypt\",\n},\nResources: []string{\n\"*\",\n},\n},\n},\n}, nil);\nif err != nil {\nreturn err\n}\nmanagedServicesCustomerManagedKey, err := kms.NewKey(ctx, \"managedServicesCustomerManagedKey\", \u0026kms.KeyArgs{\nPolicy: *pulumi.String(databricksManagedServicesCmk.Json),\n})\nif err != nil {\nreturn err\n}\nmanagedServicesCustomerManagedKeyAlias, err := kms.NewAlias(ctx, \"managedServicesCustomerManagedKeyAlias\", \u0026kms.AliasArgs{\nTargetKeyId: managedServicesCustomerManagedKey.KeyId,\n})\nif err != nil {\nreturn err\n}\n_, err = databricks.NewMwsCustomerManagedKeys(ctx, \"managedServices\", \u0026databricks.MwsCustomerManagedKeysArgs{\nAccountId: pulumi.Any(databricksAccountId),\nAwsKeyInfo: \u0026databricks.MwsCustomerManagedKeysAwsKeyInfoArgs{\nKeyArn: managedServicesCustomerManagedKey.Arn,\nKeyAlias: managedServicesCustomerManagedKeyAlias.Name,\n},\nUseCases: pulumi.StringArray{\npulumi.String(\"MANAGED_SERVICES\"),\n},\n})\nif err != nil {\nreturn err\n}\nreturn nil\n})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.aws.AwsFunctions;\nimport com.pulumi.aws.iam.IamFunctions;\nimport com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;\nimport com.pulumi.aws.kms.Key;\nimport com.pulumi.aws.kms.KeyArgs;\nimport com.pulumi.aws.kms.Alias;\nimport com.pulumi.aws.kms.AliasArgs;\nimport com.pulumi.databricks.MwsCustomerManagedKeys;\nimport com.pulumi.databricks.MwsCustomerManagedKeysArgs;\nimport com.pulumi.databricks.inputs.MwsCustomerManagedKeysAwsKeyInfoArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var config = ctx.config();\n final var databricksAccountId = config.get(\"databricksAccountId\");\n final var current = AwsFunctions.getCallerIdentity();\n\n final var databricksManagedServicesCmk = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()\n .version(\"2012-10-17\")\n .statements( \n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Enable IAM User Permissions\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(current.applyValue(getCallerIdentityResult -\u003e getCallerIdentityResult.accountId()))\n .build())\n .actions(\"kms:*\")\n .resources(\"*\")\n .build(),\n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Allow Databricks to use KMS key for control plane managed services\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(\"arn:aws:iam::414351767826:root\")\n .build())\n .actions( \n \"kms:Encrypt\",\n \"kms:Decrypt\")\n .resources(\"*\")\n .build())\n .build());\n\n var managedServicesCustomerManagedKey = new Key(\"managedServicesCustomerManagedKey\", KeyArgs.builder() \n .policy(databricksManagedServicesCmk.applyValue(getPolicyDocumentResult -\u003e getPolicyDocumentResult.json()))\n .build());\n\n var managedServicesCustomerManagedKeyAlias = new Alias(\"managedServicesCustomerManagedKeyAlias\", AliasArgs.builder() \n .targetKeyId(managedServicesCustomerManagedKey.keyId())\n .build());\n\n var managedServices = new MwsCustomerManagedKeys(\"managedServices\", MwsCustomerManagedKeysArgs.builder() \n .accountId(databricksAccountId)\n .awsKeyInfo(MwsCustomerManagedKeysAwsKeyInfoArgs.builder()\n .keyArn(managedServicesCustomerManagedKey.arn())\n .keyAlias(managedServicesCustomerManagedKeyAlias.name())\n .build())\n .useCases(\"MANAGED_SERVICES\")\n .build());\n\n }\n}\n```\n```yaml\nconfiguration:\n databricksAccountId:\n type: dynamic\nresources:\n managedServicesCustomerManagedKey:\n type: aws:kms:Key\n properties:\n policy: ${databricksManagedServicesCmk.json}\n managedServicesCustomerManagedKeyAlias:\n type: aws:kms:Alias\n properties:\n targetKeyId: ${managedServicesCustomerManagedKey.keyId}\n managedServices:\n type: databricks:MwsCustomerManagedKeys\n properties:\n accountId: ${databricksAccountId}\n awsKeyInfo:\n keyArn: ${managedServicesCustomerManagedKey.arn}\n keyAlias: ${managedServicesCustomerManagedKeyAlias.name}\n useCases:\n - MANAGED_SERVICES\nvariables:\n current:\n fn::invoke:\n Function: aws:getCallerIdentity\n Arguments: {}\n databricksManagedServicesCmk:\n fn::invoke:\n Function: aws:iam:getPolicyDocument\n Arguments:\n version: 2012-10-17\n statements:\n - sid: Enable IAM User Permissions\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - ${current.accountId}\n actions:\n - kms:*\n resources:\n - '*'\n - sid: Allow Databricks to use KMS key for control plane managed services\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - arn:aws:iam::414351767826:root\n actions:\n - kms:Encrypt\n - kms:Decrypt\n resources:\n - '*'\n```\n{{% /example %}}\n{{% example %}}\n### For GCP\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst config = new pulumi.Config();\nconst databricksAccountId = config.requireObject(\"databricksAccountId\");\nconst cmekResourceId = config.requireObject(\"cmekResourceId\");\nconst managedServices = new databricks.MwsCustomerManagedKeys(\"managedServices\", {\n accountId: databricksAccountId,\n gcpKeyInfo: {\n kmsKeyId: cmekResourceId,\n },\n useCases: [\"MANAGED_SERVICES\"],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nconfig = pulumi.Config()\ndatabricks_account_id = config.require_object(\"databricksAccountId\")\ncmek_resource_id = config.require_object(\"cmekResourceId\")\nmanaged_services = databricks.MwsCustomerManagedKeys(\"managedServices\",\n account_id=databricks_account_id,\n gcp_key_info=databricks.MwsCustomerManagedKeysGcpKeyInfoArgs(\n kms_key_id=cmek_resource_id,\n ),\n use_cases=[\"MANAGED_SERVICES\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n var databricksAccountId = config.RequireObject\u003cdynamic\u003e(\"databricksAccountId\");\n var cmekResourceId = config.RequireObject\u003cdynamic\u003e(\"cmekResourceId\");\n var managedServices = new Databricks.MwsCustomerManagedKeys(\"managedServices\", new()\n {\n AccountId = databricksAccountId,\n GcpKeyInfo = new Databricks.Inputs.MwsCustomerManagedKeysGcpKeyInfoArgs\n {\n KmsKeyId = cmekResourceId,\n },\n UseCases = new[]\n {\n \"MANAGED_SERVICES\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcfg := config.New(ctx, \"\")\n\t\tdatabricksAccountId := cfg.RequireObject(\"databricksAccountId\")\n\t\tcmekResourceId := cfg.RequireObject(\"cmekResourceId\")\n\t\t_, err := databricks.NewMwsCustomerManagedKeys(ctx, \"managedServices\", \u0026databricks.MwsCustomerManagedKeysArgs{\n\t\t\tAccountId: pulumi.Any(databricksAccountId),\n\t\t\tGcpKeyInfo: \u0026databricks.MwsCustomerManagedKeysGcpKeyInfoArgs{\n\t\t\t\tKmsKeyId: pulumi.Any(cmekResourceId),\n\t\t\t},\n\t\t\tUseCases: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"MANAGED_SERVICES\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.MwsCustomerManagedKeys;\nimport com.pulumi.databricks.MwsCustomerManagedKeysArgs;\nimport com.pulumi.databricks.inputs.MwsCustomerManagedKeysGcpKeyInfoArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var config = ctx.config();\n final var databricksAccountId = config.get(\"databricksAccountId\");\n final var cmekResourceId = config.get(\"cmekResourceId\");\n var managedServices = new MwsCustomerManagedKeys(\"managedServices\", MwsCustomerManagedKeysArgs.builder() \n .accountId(databricksAccountId)\n .gcpKeyInfo(MwsCustomerManagedKeysGcpKeyInfoArgs.builder()\n .kmsKeyId(cmekResourceId)\n .build())\n .useCases(\"MANAGED_SERVICES\")\n .build());\n\n }\n}\n```\n```yaml\nconfiguration:\n databricksAccountId:\n type: dynamic\n cmekResourceId:\n type: dynamic\nresources:\n managedServices:\n type: databricks:MwsCustomerManagedKeys\n properties:\n accountId: ${databricksAccountId}\n gcpKeyInfo:\n kmsKeyId: ${cmekResourceId}\n useCases:\n - MANAGED_SERVICES\n```\n{{% /example %}}\n### Customer-managed key for workspace storage\n{{% example %}}\n### For AWS\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws from \"@pulumi/aws\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst config = new pulumi.Config();\nconst databricksAccountId = config.requireObject(\"databricksAccountId\");\nconst databricksCrossAccountRole = config.requireObject(\"databricksCrossAccountRole\");\nconst databricksStorageCmk = aws.iam.getPolicyDocument({\n version: \"2012-10-17\",\n statements: [\n {\n sid: \"Enable IAM User Permissions\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [data.aws_caller_identity.current.account_id],\n }],\n actions: [\"kms:*\"],\n resources: [\"*\"],\n },\n {\n sid: \"Allow Databricks to use KMS key for DBFS\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [\"arn:aws:iam::414351767826:root\"],\n }],\n actions: [\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n \"kms:ReEncrypt*\",\n \"kms:GenerateDataKey*\",\n \"kms:DescribeKey\",\n ],\n resources: [\"*\"],\n },\n {\n sid: \"Allow Databricks to use KMS key for DBFS (Grants)\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [\"arn:aws:iam::414351767826:root\"],\n }],\n actions: [\n \"kms:CreateGrant\",\n \"kms:ListGrants\",\n \"kms:RevokeGrant\",\n ],\n resources: [\"*\"],\n conditions: [{\n test: \"Bool\",\n variable: \"kms:GrantIsForAWSResource\",\n values: [\"true\"],\n }],\n },\n {\n sid: \"Allow Databricks to use KMS key for EBS\",\n effect: \"Allow\",\n principals: [{\n type: \"AWS\",\n identifiers: [databricksCrossAccountRole],\n }],\n actions: [\n \"kms:Decrypt\",\n \"kms:GenerateDataKey*\",\n \"kms:CreateGrant\",\n \"kms:DescribeKey\",\n ],\n resources: [\"*\"],\n conditions: [{\n test: \"ForAnyValue:StringLike\",\n variable: \"kms:ViaService\",\n values: [\"ec2.*.amazonaws.com\"],\n }],\n },\n ],\n});\nconst storageCustomerManagedKey = new aws.kms.Key(\"storageCustomerManagedKey\", {policy: databricksStorageCmk.then(databricksStorageCmk =\u003e databricksStorageCmk.json)});\nconst storageCustomerManagedKeyAlias = new aws.kms.Alias(\"storageCustomerManagedKeyAlias\", {targetKeyId: storageCustomerManagedKey.keyId});\nconst storage = new databricks.MwsCustomerManagedKeys(\"storage\", {\n accountId: databricksAccountId,\n awsKeyInfo: {\n keyArn: storageCustomerManagedKey.arn,\n keyAlias: storageCustomerManagedKeyAlias.name,\n },\n useCases: [\"STORAGE\"],\n});\n```\n```python\nimport pulumi\nimport pulumi_aws as aws\nimport pulumi_databricks as databricks\n\nconfig = pulumi.Config()\ndatabricks_account_id = config.require_object(\"databricksAccountId\")\ndatabricks_cross_account_role = config.require_object(\"databricksCrossAccountRole\")\ndatabricks_storage_cmk = aws.iam.get_policy_document(version=\"2012-10-17\",\n statements=[\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Enable IAM User Permissions\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[data[\"aws_caller_identity\"][\"current\"][\"account_id\"]],\n )],\n actions=[\"kms:*\"],\n resources=[\"*\"],\n ),\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Allow Databricks to use KMS key for DBFS\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[\"arn:aws:iam::414351767826:root\"],\n )],\n actions=[\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n \"kms:ReEncrypt*\",\n \"kms:GenerateDataKey*\",\n \"kms:DescribeKey\",\n ],\n resources=[\"*\"],\n ),\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Allow Databricks to use KMS key for DBFS (Grants)\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[\"arn:aws:iam::414351767826:root\"],\n )],\n actions=[\n \"kms:CreateGrant\",\n \"kms:ListGrants\",\n \"kms:RevokeGrant\",\n ],\n resources=[\"*\"],\n conditions=[aws.iam.GetPolicyDocumentStatementConditionArgs(\n test=\"Bool\",\n variable=\"kms:GrantIsForAWSResource\",\n values=[\"true\"],\n )],\n ),\n aws.iam.GetPolicyDocumentStatementArgs(\n sid=\"Allow Databricks to use KMS key for EBS\",\n effect=\"Allow\",\n principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(\n type=\"AWS\",\n identifiers=[databricks_cross_account_role],\n )],\n actions=[\n \"kms:Decrypt\",\n \"kms:GenerateDataKey*\",\n \"kms:CreateGrant\",\n \"kms:DescribeKey\",\n ],\n resources=[\"*\"],\n conditions=[aws.iam.GetPolicyDocumentStatementConditionArgs(\n test=\"ForAnyValue:StringLike\",\n variable=\"kms:ViaService\",\n values=[\"ec2.*.amazonaws.com\"],\n )],\n ),\n ])\nstorage_customer_managed_key = aws.kms.Key(\"storageCustomerManagedKey\", policy=databricks_storage_cmk.json)\nstorage_customer_managed_key_alias = aws.kms.Alias(\"storageCustomerManagedKeyAlias\", target_key_id=storage_customer_managed_key.key_id)\nstorage = databricks.MwsCustomerManagedKeys(\"storage\",\n account_id=databricks_account_id,\n aws_key_info=databricks.MwsCustomerManagedKeysAwsKeyInfoArgs(\n key_arn=storage_customer_managed_key.arn,\n key_alias=storage_customer_managed_key_alias.name,\n ),\n use_cases=[\"STORAGE\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Aws = Pulumi.Aws;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n var databricksAccountId = config.RequireObject\u003cdynamic\u003e(\"databricksAccountId\");\n var databricksCrossAccountRole = config.RequireObject\u003cdynamic\u003e(\"databricksCrossAccountRole\");\n var databricksStorageCmk = Aws.Iam.GetPolicyDocument.Invoke(new()\n {\n Version = \"2012-10-17\",\n Statements = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Enable IAM User Permissions\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n data.Aws_caller_identity.Current.Account_id,\n },\n },\n },\n Actions = new[]\n {\n \"kms:*\",\n },\n Resources = new[]\n {\n \"*\",\n },\n },\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Allow Databricks to use KMS key for DBFS\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n \"arn:aws:iam::414351767826:root\",\n },\n },\n },\n Actions = new[]\n {\n \"kms:Encrypt\",\n \"kms:Decrypt\",\n \"kms:ReEncrypt*\",\n \"kms:GenerateDataKey*\",\n \"kms:DescribeKey\",\n },\n Resources = new[]\n {\n \"*\",\n },\n },\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Allow Databricks to use KMS key for DBFS (Grants)\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n \"arn:aws:iam::414351767826:root\",\n },\n },\n },\n Actions = new[]\n {\n \"kms:CreateGrant\",\n \"kms:ListGrants\",\n \"kms:RevokeGrant\",\n },\n Resources = new[]\n {\n \"*\",\n },\n Conditions = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementConditionInputArgs\n {\n Test = \"Bool\",\n Variable = \"kms:GrantIsForAWSResource\",\n Values = new[]\n {\n \"true\",\n },\n },\n },\n },\n new Aws.Iam.Inputs.GetPolicyDocumentStatementInputArgs\n {\n Sid = \"Allow Databricks to use KMS key for EBS\",\n Effect = \"Allow\",\n Principals = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementPrincipalInputArgs\n {\n Type = \"AWS\",\n Identifiers = new[]\n {\n databricksCrossAccountRole,\n },\n },\n },\n Actions = new[]\n {\n \"kms:Decrypt\",\n \"kms:GenerateDataKey*\",\n \"kms:CreateGrant\",\n \"kms:DescribeKey\",\n },\n Resources = new[]\n {\n \"*\",\n },\n Conditions = new[]\n {\n new Aws.Iam.Inputs.GetPolicyDocumentStatementConditionInputArgs\n {\n Test = \"ForAnyValue:StringLike\",\n Variable = \"kms:ViaService\",\n Values = new[]\n {\n \"ec2.*.amazonaws.com\",\n },\n },\n },\n },\n },\n });\n\n var storageCustomerManagedKey = new Aws.Kms.Key(\"storageCustomerManagedKey\", new()\n {\n Policy = databricksStorageCmk.Apply(getPolicyDocumentResult =\u003e getPolicyDocumentResult.Json),\n });\n\n var storageCustomerManagedKeyAlias = new Aws.Kms.Alias(\"storageCustomerManagedKeyAlias\", new()\n {\n TargetKeyId = storageCustomerManagedKey.KeyId,\n });\n\n var storage = new Databricks.MwsCustomerManagedKeys(\"storage\", new()\n {\n AccountId = databricksAccountId,\n AwsKeyInfo = new Databricks.Inputs.MwsCustomerManagedKeysAwsKeyInfoArgs\n {\n KeyArn = storageCustomerManagedKey.Arn,\n KeyAlias = storageCustomerManagedKeyAlias.Name,\n },\n UseCases = new[]\n {\n \"STORAGE\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/iam\"\n\t\"github.com/pulumi/pulumi-aws/sdk/v5/go/aws/kms\"\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\nfunc main() {\npulumi.Run(func(ctx *pulumi.Context) error {\ncfg := config.New(ctx, \"\")\ndatabricksAccountId := cfg.RequireObject(\"databricksAccountId\")\ndatabricksCrossAccountRole := cfg.RequireObject(\"databricksCrossAccountRole\")\ndatabricksStorageCmk, err := iam.GetPolicyDocument(ctx, \u0026iam.GetPolicyDocumentArgs{\nVersion: pulumi.StringRef(\"2012-10-17\"),\nStatements: []iam.GetPolicyDocumentStatement{\n{\nSid: pulumi.StringRef(\"Enable IAM User Permissions\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: interface{}{\ndata.Aws_caller_identity.Current.Account_id,\n},\n},\n},\nActions: []string{\n\"kms:*\",\n},\nResources: []string{\n\"*\",\n},\n},\n{\nSid: pulumi.StringRef(\"Allow Databricks to use KMS key for DBFS\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: []string{\n\"arn:aws:iam::414351767826:root\",\n},\n},\n},\nActions: []string{\n\"kms:Encrypt\",\n\"kms:Decrypt\",\n\"kms:ReEncrypt*\",\n\"kms:GenerateDataKey*\",\n\"kms:DescribeKey\",\n},\nResources: []string{\n\"*\",\n},\n},\n{\nSid: pulumi.StringRef(\"Allow Databricks to use KMS key for DBFS (Grants)\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: []string{\n\"arn:aws:iam::414351767826:root\",\n},\n},\n},\nActions: []string{\n\"kms:CreateGrant\",\n\"kms:ListGrants\",\n\"kms:RevokeGrant\",\n},\nResources: []string{\n\"*\",\n},\nConditions: []iam.GetPolicyDocumentStatementCondition{\n{\nTest: \"Bool\",\nVariable: \"kms:GrantIsForAWSResource\",\nValues: []string{\n\"true\",\n},\n},\n},\n},\n{\nSid: pulumi.StringRef(\"Allow Databricks to use KMS key for EBS\"),\nEffect: pulumi.StringRef(\"Allow\"),\nPrincipals: []iam.GetPolicyDocumentStatementPrincipal{\n{\nType: \"AWS\",\nIdentifiers: interface{}{\ndatabricksCrossAccountRole,\n},\n},\n},\nActions: []string{\n\"kms:Decrypt\",\n\"kms:GenerateDataKey*\",\n\"kms:CreateGrant\",\n\"kms:DescribeKey\",\n},\nResources: []string{\n\"*\",\n},\nConditions: []iam.GetPolicyDocumentStatementCondition{\n{\nTest: \"ForAnyValue:StringLike\",\nVariable: \"kms:ViaService\",\nValues: []string{\n\"ec2.*.amazonaws.com\",\n},\n},\n},\n},\n},\n}, nil);\nif err != nil {\nreturn err\n}\nstorageCustomerManagedKey, err := kms.NewKey(ctx, \"storageCustomerManagedKey\", \u0026kms.KeyArgs{\nPolicy: *pulumi.String(databricksStorageCmk.Json),\n})\nif err != nil {\nreturn err\n}\nstorageCustomerManagedKeyAlias, err := kms.NewAlias(ctx, \"storageCustomerManagedKeyAlias\", \u0026kms.AliasArgs{\nTargetKeyId: storageCustomerManagedKey.KeyId,\n})\nif err != nil {\nreturn err\n}\n_, err = databricks.NewMwsCustomerManagedKeys(ctx, \"storage\", \u0026databricks.MwsCustomerManagedKeysArgs{\nAccountId: pulumi.Any(databricksAccountId),\nAwsKeyInfo: \u0026databricks.MwsCustomerManagedKeysAwsKeyInfoArgs{\nKeyArn: storageCustomerManagedKey.Arn,\nKeyAlias: storageCustomerManagedKeyAlias.Name,\n},\nUseCases: pulumi.StringArray{\npulumi.String(\"STORAGE\"),\n},\n})\nif err != nil {\nreturn err\n}\nreturn nil\n})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.aws.iam.IamFunctions;\nimport com.pulumi.aws.iam.inputs.GetPolicyDocumentArgs;\nimport com.pulumi.aws.kms.Key;\nimport com.pulumi.aws.kms.KeyArgs;\nimport com.pulumi.aws.kms.Alias;\nimport com.pulumi.aws.kms.AliasArgs;\nimport com.pulumi.databricks.MwsCustomerManagedKeys;\nimport com.pulumi.databricks.MwsCustomerManagedKeysArgs;\nimport com.pulumi.databricks.inputs.MwsCustomerManagedKeysAwsKeyInfoArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var config = ctx.config();\n final var databricksAccountId = config.get(\"databricksAccountId\");\n final var databricksCrossAccountRole = config.get(\"databricksCrossAccountRole\");\n final var databricksStorageCmk = IamFunctions.getPolicyDocument(GetPolicyDocumentArgs.builder()\n .version(\"2012-10-17\")\n .statements( \n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Enable IAM User Permissions\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(data.aws_caller_identity().current().account_id())\n .build())\n .actions(\"kms:*\")\n .resources(\"*\")\n .build(),\n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Allow Databricks to use KMS key for DBFS\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(\"arn:aws:iam::414351767826:root\")\n .build())\n .actions( \n \"kms:Encrypt\",\n \"kms:Decrypt\",\n \"kms:ReEncrypt*\",\n \"kms:GenerateDataKey*\",\n \"kms:DescribeKey\")\n .resources(\"*\")\n .build(),\n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Allow Databricks to use KMS key for DBFS (Grants)\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(\"arn:aws:iam::414351767826:root\")\n .build())\n .actions( \n \"kms:CreateGrant\",\n \"kms:ListGrants\",\n \"kms:RevokeGrant\")\n .resources(\"*\")\n .conditions(GetPolicyDocumentStatementConditionArgs.builder()\n .test(\"Bool\")\n .variable(\"kms:GrantIsForAWSResource\")\n .values(\"true\")\n .build())\n .build(),\n GetPolicyDocumentStatementArgs.builder()\n .sid(\"Allow Databricks to use KMS key for EBS\")\n .effect(\"Allow\")\n .principals(GetPolicyDocumentStatementPrincipalArgs.builder()\n .type(\"AWS\")\n .identifiers(databricksCrossAccountRole)\n .build())\n .actions( \n \"kms:Decrypt\",\n \"kms:GenerateDataKey*\",\n \"kms:CreateGrant\",\n \"kms:DescribeKey\")\n .resources(\"*\")\n .conditions(GetPolicyDocumentStatementConditionArgs.builder()\n .test(\"ForAnyValue:StringLike\")\n .variable(\"kms:ViaService\")\n .values(\"ec2.*.amazonaws.com\")\n .build())\n .build())\n .build());\n\n var storageCustomerManagedKey = new Key(\"storageCustomerManagedKey\", KeyArgs.builder() \n .policy(databricksStorageCmk.applyValue(getPolicyDocumentResult -\u003e getPolicyDocumentResult.json()))\n .build());\n\n var storageCustomerManagedKeyAlias = new Alias(\"storageCustomerManagedKeyAlias\", AliasArgs.builder() \n .targetKeyId(storageCustomerManagedKey.keyId())\n .build());\n\n var storage = new MwsCustomerManagedKeys(\"storage\", MwsCustomerManagedKeysArgs.builder() \n .accountId(databricksAccountId)\n .awsKeyInfo(MwsCustomerManagedKeysAwsKeyInfoArgs.builder()\n .keyArn(storageCustomerManagedKey.arn())\n .keyAlias(storageCustomerManagedKeyAlias.name())\n .build())\n .useCases(\"STORAGE\")\n .build());\n\n }\n}\n```\n```yaml\nconfiguration:\n databricksAccountId:\n type: dynamic\n databricksCrossAccountRole:\n type: dynamic\nresources:\n storageCustomerManagedKey:\n type: aws:kms:Key\n properties:\n policy: ${databricksStorageCmk.json}\n storageCustomerManagedKeyAlias:\n type: aws:kms:Alias\n properties:\n targetKeyId: ${storageCustomerManagedKey.keyId}\n storage:\n type: databricks:MwsCustomerManagedKeys\n properties:\n accountId: ${databricksAccountId}\n awsKeyInfo:\n keyArn: ${storageCustomerManagedKey.arn}\n keyAlias: ${storageCustomerManagedKeyAlias.name}\n useCases:\n - STORAGE\nvariables:\n databricksStorageCmk:\n fn::invoke:\n Function: aws:iam:getPolicyDocument\n Arguments:\n version: 2012-10-17\n statements:\n - sid: Enable IAM User Permissions\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - ${data.aws_caller_identity.current.account_id}\n actions:\n - kms:*\n resources:\n - '*'\n - sid: Allow Databricks to use KMS key for DBFS\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - arn:aws:iam::414351767826:root\n actions:\n - kms:Encrypt\n - kms:Decrypt\n - kms:ReEncrypt*\n - kms:GenerateDataKey*\n - kms:DescribeKey\n resources:\n - '*'\n - sid: Allow Databricks to use KMS key for DBFS (Grants)\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - arn:aws:iam::414351767826:root\n actions:\n - kms:CreateGrant\n - kms:ListGrants\n - kms:RevokeGrant\n resources:\n - '*'\n conditions:\n - test: Bool\n variable: kms:GrantIsForAWSResource\n values:\n - 'true'\n - sid: Allow Databricks to use KMS key for EBS\n effect: Allow\n principals:\n - type: AWS\n identifiers:\n - ${databricksCrossAccountRole}\n actions:\n - kms:Decrypt\n - kms:GenerateDataKey*\n - kms:CreateGrant\n - kms:DescribeKey\n resources:\n - '*'\n conditions:\n - test: ForAnyValue:StringLike\n variable: kms:ViaService\n values:\n - ec2.*.amazonaws.com\n```\n{{% /example %}}\n{{% example %}}\n### For GCP\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst config = new pulumi.Config();\nconst databricksAccountId = config.requireObject(\"databricksAccountId\");\nconst cmekResourceId = config.requireObject(\"cmekResourceId\");\nconst storage = new databricks.MwsCustomerManagedKeys(\"storage\", {\n accountId: databricksAccountId,\n gcpKeyInfo: {\n kmsKeyId: cmekResourceId,\n },\n useCases: [\"STORAGE\"],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nconfig = pulumi.Config()\ndatabricks_account_id = config.require_object(\"databricksAccountId\")\ncmek_resource_id = config.require_object(\"cmekResourceId\")\nstorage = databricks.MwsCustomerManagedKeys(\"storage\",\n account_id=databricks_account_id,\n gcp_key_info=databricks.MwsCustomerManagedKeysGcpKeyInfoArgs(\n kms_key_id=cmek_resource_id,\n ),\n use_cases=[\"STORAGE\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n var databricksAccountId = config.RequireObject\u003cdynamic\u003e(\"databricksAccountId\");\n var cmekResourceId = config.RequireObject\u003cdynamic\u003e(\"cmekResourceId\");\n var storage = new Databricks.MwsCustomerManagedKeys(\"storage\", new()\n {\n AccountId = databricksAccountId,\n GcpKeyInfo = new Databricks.Inputs.MwsCustomerManagedKeysGcpKeyInfoArgs\n {\n KmsKeyId = cmekResourceId,\n },\n UseCases = new[]\n {\n \"STORAGE\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcfg := config.New(ctx, \"\")\n\t\tdatabricksAccountId := cfg.RequireObject(\"databricksAccountId\")\n\t\tcmekResourceId := cfg.RequireObject(\"cmekResourceId\")\n\t\t_, err := databricks.NewMwsCustomerManagedKeys(ctx, \"storage\", \u0026databricks.MwsCustomerManagedKeysArgs{\n\t\t\tAccountId: pulumi.Any(databricksAccountId),\n\t\t\tGcpKeyInfo: \u0026databricks.MwsCustomerManagedKeysGcpKeyInfoArgs{\n\t\t\t\tKmsKeyId: pulumi.Any(cmekResourceId),\n\t\t\t},\n\t\t\tUseCases: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"STORAGE\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.MwsCustomerManagedKeys;\nimport com.pulumi.databricks.MwsCustomerManagedKeysArgs;\nimport com.pulumi.databricks.inputs.MwsCustomerManagedKeysGcpKeyInfoArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var config = ctx.config();\n final var databricksAccountId = config.get(\"databricksAccountId\");\n final var cmekResourceId = config.get(\"cmekResourceId\");\n var storage = new MwsCustomerManagedKeys(\"storage\", MwsCustomerManagedKeysArgs.builder() \n .accountId(databricksAccountId)\n .gcpKeyInfo(MwsCustomerManagedKeysGcpKeyInfoArgs.builder()\n .kmsKeyId(cmekResourceId)\n .build())\n .useCases(\"STORAGE\")\n .build());\n\n }\n}\n```\n```yaml\nconfiguration:\n databricksAccountId:\n type: dynamic\n cmekResourceId:\n type: dynamic\nresources:\n storage:\n type: databricks:MwsCustomerManagedKeys\n properties:\n accountId: ${databricksAccountId}\n gcpKeyInfo:\n kmsKeyId: ${cmekResourceId}\n useCases:\n - STORAGE\n```\n{{% /example %}}\n{{% /examples %}}\n## Related Resources\n\nThe following resources are used in the same context:\n\n* Provisioning Databricks on AWS guide.\n* databricks.MwsCredentials to configure the cross-account role for creation of new workspaces within AWS.\n* databricks.MwsLogDelivery to configure delivery of [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html).\n* databricks.MwsNetworks to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) \u0026 subnets for new workspaces within AWS.\n* databricks.MwsStorageConfigurations to configure root bucket new workspaces within AWS.\n* databricks.MwsWorkspaces to set up [workspaces in E2 architecture on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1).\n\n\n## Import\n\n-\u003e **Note** Importing this resource is not currently supported. ", "properties": { "accountId": { "type": "string", @@ -12893,7 +13120,7 @@ }, "awsKeyInfo": { "$ref": "#/types/databricks:index/MwsCustomerManagedKeysAwsKeyInfo:MwsCustomerManagedKeysAwsKeyInfo", - "description": "This field is a block and is documented below.\n" + "description": "This field is a block and is documented below. This conflicts with `gcp_key_info`\n" }, "creationTime": { "type": "integer", @@ -12904,7 +13131,8 @@ "description": "(String) ID of the encryption key configuration object.\n" }, "gcpKeyInfo": { - "$ref": "#/types/databricks:index/MwsCustomerManagedKeysGcpKeyInfo:MwsCustomerManagedKeysGcpKeyInfo" + "$ref": "#/types/databricks:index/MwsCustomerManagedKeysGcpKeyInfo:MwsCustomerManagedKeysGcpKeyInfo", + "description": "This field is a block and is documented below. This conflicts with `aws_key_info`\n" }, "useCases": { "type": "array", @@ -12928,7 +13156,7 @@ }, "awsKeyInfo": { "$ref": "#/types/databricks:index/MwsCustomerManagedKeysAwsKeyInfo:MwsCustomerManagedKeysAwsKeyInfo", - "description": "This field is a block and is documented below.\n", + "description": "This field is a block and is documented below. This conflicts with `gcp_key_info`\n", "willReplaceOnChanges": true }, "creationTime": { @@ -12941,6 +13169,7 @@ }, "gcpKeyInfo": { "$ref": "#/types/databricks:index/MwsCustomerManagedKeysGcpKeyInfo:MwsCustomerManagedKeysGcpKeyInfo", + "description": "This field is a block and is documented below. This conflicts with `aws_key_info`\n", "willReplaceOnChanges": true }, "useCases": { @@ -12966,7 +13195,7 @@ }, "awsKeyInfo": { "$ref": "#/types/databricks:index/MwsCustomerManagedKeysAwsKeyInfo:MwsCustomerManagedKeysAwsKeyInfo", - "description": "This field is a block and is documented below.\n", + "description": "This field is a block and is documented below. This conflicts with `gcp_key_info`\n", "willReplaceOnChanges": true }, "creationTime": { @@ -12979,6 +13208,7 @@ }, "gcpKeyInfo": { "$ref": "#/types/databricks:index/MwsCustomerManagedKeysGcpKeyInfo:MwsCustomerManagedKeysGcpKeyInfo", + "description": "This field is a block and is documented below. This conflicts with `aws_key_info`\n", "willReplaceOnChanges": true }, "useCases": { @@ -13898,7 +14128,8 @@ "description": "`storage_configuration_id` from storage configuration.\n" }, "storageCustomerManagedKeyId": { - "type": "string" + "type": "string", + "description": "`customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage \u0026 Cluster Volumes.\n" }, "token": { "$ref": "#/types/databricks:index/MwsWorkspacesToken:MwsWorkspacesToken" @@ -14015,7 +14246,8 @@ "willReplaceOnChanges": true }, "storageCustomerManagedKeyId": { - "type": "string" + "type": "string", + "description": "`customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage \u0026 Cluster Volumes.\n" }, "token": { "$ref": "#/types/databricks:index/MwsWorkspacesToken:MwsWorkspacesToken" @@ -14128,7 +14360,8 @@ "willReplaceOnChanges": true }, "storageCustomerManagedKeyId": { - "type": "string" + "type": "string", + "description": "`customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage \u0026 Cluster Volumes.\n" }, "token": { "$ref": "#/types/databricks:index/MwsWorkspacesToken:MwsWorkspacesToken" @@ -15886,6 +16119,10 @@ "items": { "$ref": "#/types/databricks:index/ShareObject:ShareObject" } + }, + "owner": { + "type": "string", + "description": "User name/group name/sp application_id of the share owner.\n" } }, "required": [ @@ -15912,6 +16149,10 @@ "items": { "$ref": "#/types/databricks:index/ShareObject:ShareObject" } + }, + "owner": { + "type": "string", + "description": "User name/group name/sp application_id of the share owner.\n" } }, "stateInputs": { @@ -15935,6 +16176,10 @@ "items": { "$ref": "#/types/databricks:index/ShareObject:ShareObject" } + }, + "owner": { + "type": "string", + "description": "User name/group name/sp application_id of the share owner.\n" } }, "type": "object" @@ -15943,6 +16188,9 @@ "databricks:index/sqlAlert:SqlAlert": { "description": "This resource allows you to manage [Databricks SQL Alerts](https://docs.databricks.com/sql/user/queries/index.html).\n\n**Note:** To manage [SQLA resources](https://docs.databricks.com/sql/get-started/concepts.html) you must have `databricks_sql_access` on your databricks.Group or databricks_user.\n\n## Related Resources\n\nThe following resources are often used in the same context:\n\n* End to end workspace management guide.\n* databricks.SqlQuery to manage Databricks SQL [Queries](https://docs.databricks.com/sql/user/queries/index.html).\n* databricks.SqlEndpoint to manage Databricks SQL [Endpoints](https://docs.databricks.com/sql/admin/sql-endpoints.html).\n* databricks.Directory to manage directories in [Databricks Workpace](https://docs.databricks.com/workspace/workspace-objects.html).\n", "properties": { + "createdAt": { + "type": "string" + }, "name": { "type": "string", "description": "Name of the alert.\n" @@ -15962,14 +16210,22 @@ "rearm": { "type": "integer", "description": "Number of seconds after being triggered before the alert rearms itself and can be triggered again. If not defined, alert will never be triggered again.\n" + }, + "updatedAt": { + "type": "string" } }, "required": [ + "createdAt", "name", "options", - "queryId" + "queryId", + "updatedAt" ], "inputProperties": { + "createdAt": { + "type": "string" + }, "name": { "type": "string", "description": "Name of the alert.\n" @@ -15990,6 +16246,9 @@ "rearm": { "type": "integer", "description": "Number of seconds after being triggered before the alert rearms itself and can be triggered again. If not defined, alert will never be triggered again.\n" + }, + "updatedAt": { + "type": "string" } }, "requiredInputs": [ @@ -15999,6 +16258,9 @@ "stateInputs": { "description": "Input properties used for looking up and filtering SqlAlert resources.\n", "properties": { + "createdAt": { + "type": "string" + }, "name": { "type": "string", "description": "Name of the alert.\n" @@ -16019,6 +16281,9 @@ "rearm": { "type": "integer", "description": "Number of seconds after being triggered before the alert rearms itself and can be triggered again. If not defined, alert will never be triggered again.\n" + }, + "updatedAt": { + "type": "string" } }, "type": "object" @@ -16027,6 +16292,9 @@ "databricks:index/sqlDashboard:SqlDashboard": { "description": "This resource is used to manage [Databricks SQL Dashboards](https://docs.databricks.com/sql/user/dashboards/index.html). To manage [SQLA resources](https://docs.databricks.com/sql/get-started/concepts.html) you must have `databricks_sql_access` on your databricks.Group or databricks_user.\n\n**Note:** documentation for this resource is a work in progress.\n\nA dashboard may have one or more widgets.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst sharedDir = new databricks.Directory(\"sharedDir\", {path: \"/Shared/Dashboards\"});\nconst d1 = new databricks.SqlDashboard(\"d1\", {\n parent: pulumi.interpolate`folders/${sharedDir.objectId}`,\n tags: [\n \"some-tag\",\n \"another-tag\",\n ],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nshared_dir = databricks.Directory(\"sharedDir\", path=\"/Shared/Dashboards\")\nd1 = databricks.SqlDashboard(\"d1\",\n parent=shared_dir.object_id.apply(lambda object_id: f\"folders/{object_id}\"),\n tags=[\n \"some-tag\",\n \"another-tag\",\n ])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var sharedDir = new Databricks.Directory(\"sharedDir\", new()\n {\n Path = \"/Shared/Dashboards\",\n });\n\n var d1 = new Databricks.SqlDashboard(\"d1\", new()\n {\n Parent = sharedDir.ObjectId.Apply(objectId =\u003e $\"folders/{objectId}\"),\n Tags = new[]\n {\n \"some-tag\",\n \"another-tag\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tsharedDir, err := databricks.NewDirectory(ctx, \"sharedDir\", \u0026databricks.DirectoryArgs{\n\t\t\tPath: pulumi.String(\"/Shared/Dashboards\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewSqlDashboard(ctx, \"d1\", \u0026databricks.SqlDashboardArgs{\n\t\t\tParent: sharedDir.ObjectId.ApplyT(func(objectId int) (string, error) {\n\t\t\t\treturn fmt.Sprintf(\"folders/%v\", objectId), nil\n\t\t\t}).(pulumi.StringOutput),\n\t\t\tTags: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"some-tag\"),\n\t\t\t\tpulumi.String(\"another-tag\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Directory;\nimport com.pulumi.databricks.DirectoryArgs;\nimport com.pulumi.databricks.SqlDashboard;\nimport com.pulumi.databricks.SqlDashboardArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var sharedDir = new Directory(\"sharedDir\", DirectoryArgs.builder() \n .path(\"/Shared/Dashboards\")\n .build());\n\n var d1 = new SqlDashboard(\"d1\", SqlDashboardArgs.builder() \n .parent(sharedDir.objectId().applyValue(objectId -\u003e String.format(\"folders/%s\", objectId)))\n .tags( \n \"some-tag\",\n \"another-tag\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n sharedDir:\n type: databricks:Directory\n properties:\n path: /Shared/Dashboards\n d1:\n type: databricks:SqlDashboard\n properties:\n parent: folders/${sharedDir.objectId}\n tags:\n - some-tag\n - another-tag\n```\n\nExample permission to share dashboard with all users:\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst d1 = new databricks.Permissions(\"d1\", {\n sqlDashboardId: databricks_sql_dashboard.d1.id,\n accessControls: [{\n groupName: data.databricks_group.users.display_name,\n permissionLevel: \"CAN_RUN\",\n }],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nd1 = databricks.Permissions(\"d1\",\n sql_dashboard_id=databricks_sql_dashboard[\"d1\"][\"id\"],\n access_controls=[databricks.PermissionsAccessControlArgs(\n group_name=data[\"databricks_group\"][\"users\"][\"display_name\"],\n permission_level=\"CAN_RUN\",\n )])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var d1 = new Databricks.Permissions(\"d1\", new()\n {\n SqlDashboardId = databricks_sql_dashboard.D1.Id,\n AccessControls = new[]\n {\n new Databricks.Inputs.PermissionsAccessControlArgs\n {\n GroupName = data.Databricks_group.Users.Display_name,\n PermissionLevel = \"CAN_RUN\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewPermissions(ctx, \"d1\", \u0026databricks.PermissionsArgs{\n\t\t\tSqlDashboardId: pulumi.Any(databricks_sql_dashboard.D1.Id),\n\t\t\tAccessControls: databricks.PermissionsAccessControlArray{\n\t\t\t\t\u0026databricks.PermissionsAccessControlArgs{\n\t\t\t\t\tGroupName: pulumi.Any(data.Databricks_group.Users.Display_name),\n\t\t\t\t\tPermissionLevel: pulumi.String(\"CAN_RUN\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Permissions;\nimport com.pulumi.databricks.PermissionsArgs;\nimport com.pulumi.databricks.inputs.PermissionsAccessControlArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var d1 = new Permissions(\"d1\", PermissionsArgs.builder() \n .sqlDashboardId(databricks_sql_dashboard.d1().id())\n .accessControls(PermissionsAccessControlArgs.builder()\n .groupName(data.databricks_group().users().display_name())\n .permissionLevel(\"CAN_RUN\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n d1:\n type: databricks:Permissions\n properties:\n sqlDashboardId: ${databricks_sql_dashboard.d1.id}\n accessControls:\n - groupName: ${data.databricks_group.users.display_name}\n permissionLevel: CAN_RUN\n```\n{{% /example %}}\n{{% /examples %}}\n## Related Resources\n\nThe following resources are often used in the same context:\n\n* End to end workspace management guide.\n* databricks.SqlEndpoint to manage Databricks SQL [Endpoints](https://docs.databricks.com/sql/admin/sql-endpoints.html).\n* databricks.SqlGlobalConfig to configure the security policy, databricks_instance_profile, and [data access properties](https://docs.databricks.com/sql/admin/data-access-configuration.html) for all databricks.SqlEndpoint of workspace.\n* databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html).\n\n\n## Import\n\nYou can import a `databricks_sql_dashboard` resource with ID like the followingbash\n\n```sh\n $ pulumi import databricks:index/sqlDashboard:SqlDashboard this \u003cdashboard-id\u003e\n```\n\n ", "properties": { + "createdAt": { + "type": "string" + }, "name": { "type": "string" }, @@ -16038,12 +16306,20 @@ "items": { "type": "string" } + }, + "updatedAt": { + "type": "string" } }, "required": [ - "name" + "createdAt", + "name", + "updatedAt" ], "inputProperties": { + "createdAt": { + "type": "string" + }, "name": { "type": "string" }, @@ -16056,11 +16332,17 @@ "items": { "type": "string" } + }, + "updatedAt": { + "type": "string" } }, "stateInputs": { "description": "Input properties used for looking up and filtering SqlDashboard resources.\n", "properties": { + "createdAt": { + "type": "string" + }, "name": { "type": "string" }, @@ -16073,6 +16355,9 @@ "items": { "type": "string" } + }, + "updatedAt": { + "type": "string" } }, "type": "object" @@ -16530,6 +16815,9 @@ "databricks:index/sqlQuery:SqlQuery": { "description": "To manage [SQLA resources](https://docs.databricks.com/sql/get-started/concepts.html) you must have `databricks_sql_access` on your databricks.Group or databricks_user.\n\n**Note:** documentation for this resource is a work in progress.\n\nA query may have one or more visualizations.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst sharedDir = new databricks.Directory(\"sharedDir\", {path: \"/Shared/Queries\"});\nconst q1 = new databricks.SqlQuery(\"q1\", {\n dataSourceId: databricks_sql_endpoint.example.data_source_id,\n query: ` SELECT {{ p1 }} AS p1\n WHERE 1=1\n AND p2 in ({{ p2 }})\n AND event_date \u003e date '{{ p3 }}'\n`,\n parent: pulumi.interpolate`folders/${sharedDir.objectId}`,\n runAsRole: \"viewer\",\n parameters: [\n {\n name: \"p1\",\n title: \"Title for p1\",\n text: {\n value: \"default\",\n },\n },\n {\n name: \"p2\",\n title: \"Title for p2\",\n \"enum\": {\n options: [\n \"default\",\n \"foo\",\n \"bar\",\n ],\n value: \"default\",\n multiple: {\n prefix: \"\\\"\",\n suffix: \"\\\"\",\n separator: \",\",\n },\n },\n },\n {\n name: \"p3\",\n title: \"Title for p3\",\n date: {\n value: \"2022-01-01\",\n },\n },\n ],\n tags: [\n \"t1\",\n \"t2\",\n ],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nshared_dir = databricks.Directory(\"sharedDir\", path=\"/Shared/Queries\")\nq1 = databricks.SqlQuery(\"q1\",\n data_source_id=databricks_sql_endpoint[\"example\"][\"data_source_id\"],\n query=\"\"\" SELECT {{ p1 }} AS p1\n WHERE 1=1\n AND p2 in ({{ p2 }})\n AND event_date \u003e date '{{ p3 }}'\n\"\"\",\n parent=shared_dir.object_id.apply(lambda object_id: f\"folders/{object_id}\"),\n run_as_role=\"viewer\",\n parameters=[\n databricks.SqlQueryParameterArgs(\n name=\"p1\",\n title=\"Title for p1\",\n text=databricks.SqlQueryParameterTextArgs(\n value=\"default\",\n ),\n ),\n databricks.SqlQueryParameterArgs(\n name=\"p2\",\n title=\"Title for p2\",\n enum=databricks.SqlQueryParameterEnumArgs(\n options=[\n \"default\",\n \"foo\",\n \"bar\",\n ],\n value=\"default\",\n multiple=databricks.SqlQueryParameterEnumMultipleArgs(\n prefix=\"\\\"\",\n suffix=\"\\\"\",\n separator=\",\",\n ),\n ),\n ),\n databricks.SqlQueryParameterArgs(\n name=\"p3\",\n title=\"Title for p3\",\n date=databricks.SqlQueryParameterDateArgs(\n value=\"2022-01-01\",\n ),\n ),\n ],\n tags=[\n \"t1\",\n \"t2\",\n ])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var sharedDir = new Databricks.Directory(\"sharedDir\", new()\n {\n Path = \"/Shared/Queries\",\n });\n\n var q1 = new Databricks.SqlQuery(\"q1\", new()\n {\n DataSourceId = databricks_sql_endpoint.Example.Data_source_id,\n Query = @\" SELECT {{ p1 }} AS p1\n WHERE 1=1\n AND p2 in ({{ p2 }})\n AND event_date \u003e date '{{ p3 }}'\n\",\n Parent = sharedDir.ObjectId.Apply(objectId =\u003e $\"folders/{objectId}\"),\n RunAsRole = \"viewer\",\n Parameters = new[]\n {\n new Databricks.Inputs.SqlQueryParameterArgs\n {\n Name = \"p1\",\n Title = \"Title for p1\",\n Text = new Databricks.Inputs.SqlQueryParameterTextArgs\n {\n Value = \"default\",\n },\n },\n new Databricks.Inputs.SqlQueryParameterArgs\n {\n Name = \"p2\",\n Title = \"Title for p2\",\n Enum = new Databricks.Inputs.SqlQueryParameterEnumArgs\n {\n Options = new[]\n {\n \"default\",\n \"foo\",\n \"bar\",\n },\n Value = \"default\",\n Multiple = new Databricks.Inputs.SqlQueryParameterEnumMultipleArgs\n {\n Prefix = \"\\\"\",\n Suffix = \"\\\"\",\n Separator = \",\",\n },\n },\n },\n new Databricks.Inputs.SqlQueryParameterArgs\n {\n Name = \"p3\",\n Title = \"Title for p3\",\n Date = new Databricks.Inputs.SqlQueryParameterDateArgs\n {\n Value = \"2022-01-01\",\n },\n },\n },\n Tags = new[]\n {\n \"t1\",\n \"t2\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tsharedDir, err := databricks.NewDirectory(ctx, \"sharedDir\", \u0026databricks.DirectoryArgs{\n\t\t\tPath: pulumi.String(\"/Shared/Queries\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databricks.NewSqlQuery(ctx, \"q1\", \u0026databricks.SqlQueryArgs{\n\t\t\tDataSourceId: pulumi.Any(databricks_sql_endpoint.Example.Data_source_id),\n\t\t\tQuery: pulumi.String(\" SELECT {{ p1 }} AS p1\\n WHERE 1=1\\n AND p2 in ({{ p2 }})\\n AND event_date \u003e date '{{ p3 }}'\\n\"),\n\t\t\tParent: sharedDir.ObjectId.ApplyT(func(objectId int) (string, error) {\n\t\t\t\treturn fmt.Sprintf(\"folders/%v\", objectId), nil\n\t\t\t}).(pulumi.StringOutput),\n\t\t\tRunAsRole: pulumi.String(\"viewer\"),\n\t\t\tParameters: databricks.SqlQueryParameterArray{\n\t\t\t\t\u0026databricks.SqlQueryParameterArgs{\n\t\t\t\t\tName: pulumi.String(\"p1\"),\n\t\t\t\t\tTitle: pulumi.String(\"Title for p1\"),\n\t\t\t\t\tText: \u0026databricks.SqlQueryParameterTextArgs{\n\t\t\t\t\t\tValue: pulumi.String(\"default\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\u0026databricks.SqlQueryParameterArgs{\n\t\t\t\t\tName: pulumi.String(\"p2\"),\n\t\t\t\t\tTitle: pulumi.String(\"Title for p2\"),\n\t\t\t\t\tEnum: \u0026databricks.SqlQueryParameterEnumArgs{\n\t\t\t\t\t\tOptions: pulumi.StringArray{\n\t\t\t\t\t\t\tpulumi.String(\"default\"),\n\t\t\t\t\t\t\tpulumi.String(\"foo\"),\n\t\t\t\t\t\t\tpulumi.String(\"bar\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tValue: pulumi.String(\"default\"),\n\t\t\t\t\t\tMultiple: \u0026databricks.SqlQueryParameterEnumMultipleArgs{\n\t\t\t\t\t\t\tPrefix: pulumi.String(\"\\\"\"),\n\t\t\t\t\t\t\tSuffix: pulumi.String(\"\\\"\"),\n\t\t\t\t\t\t\tSeparator: pulumi.String(\",\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\u0026databricks.SqlQueryParameterArgs{\n\t\t\t\t\tName: pulumi.String(\"p3\"),\n\t\t\t\t\tTitle: pulumi.String(\"Title for p3\"),\n\t\t\t\t\tDate: \u0026databricks.SqlQueryParameterDateArgs{\n\t\t\t\t\t\tValue: pulumi.String(\"2022-01-01\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tTags: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"t1\"),\n\t\t\t\tpulumi.String(\"t2\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Directory;\nimport com.pulumi.databricks.DirectoryArgs;\nimport com.pulumi.databricks.SqlQuery;\nimport com.pulumi.databricks.SqlQueryArgs;\nimport com.pulumi.databricks.inputs.SqlQueryParameterArgs;\nimport com.pulumi.databricks.inputs.SqlQueryParameterTextArgs;\nimport com.pulumi.databricks.inputs.SqlQueryParameterEnumArgs;\nimport com.pulumi.databricks.inputs.SqlQueryParameterEnumMultipleArgs;\nimport com.pulumi.databricks.inputs.SqlQueryParameterDateArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var sharedDir = new Directory(\"sharedDir\", DirectoryArgs.builder() \n .path(\"/Shared/Queries\")\n .build());\n\n var q1 = new SqlQuery(\"q1\", SqlQueryArgs.builder() \n .dataSourceId(databricks_sql_endpoint.example().data_source_id())\n .query(\"\"\"\n SELECT {{ p1 }} AS p1\n WHERE 1=1\n AND p2 in ({{ p2 }})\n AND event_date \u003e date '{{ p3 }}'\n \"\"\")\n .parent(sharedDir.objectId().applyValue(objectId -\u003e String.format(\"folders/%s\", objectId)))\n .runAsRole(\"viewer\")\n .parameters( \n SqlQueryParameterArgs.builder()\n .name(\"p1\")\n .title(\"Title for p1\")\n .text(SqlQueryParameterTextArgs.builder()\n .value(\"default\")\n .build())\n .build(),\n SqlQueryParameterArgs.builder()\n .name(\"p2\")\n .title(\"Title for p2\")\n .enum_(SqlQueryParameterEnumArgs.builder()\n .options( \n \"default\",\n \"foo\",\n \"bar\")\n .value(\"default\")\n .multiple(SqlQueryParameterEnumMultipleArgs.builder()\n .prefix(\"\\\"\")\n .suffix(\"\\\"\")\n .separator(\",\")\n .build())\n .build())\n .build(),\n SqlQueryParameterArgs.builder()\n .name(\"p3\")\n .title(\"Title for p3\")\n .date(SqlQueryParameterDateArgs.builder()\n .value(\"2022-01-01\")\n .build())\n .build())\n .tags( \n \"t1\",\n \"t2\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n sharedDir:\n type: databricks:Directory\n properties:\n path: /Shared/Queries\n q1:\n type: databricks:SqlQuery\n properties:\n dataSourceId: ${databricks_sql_endpoint.example.data_source_id}\n query: |2\n SELECT {{ p1 }} AS p1\n WHERE 1=1\n AND p2 in ({{ p2 }})\n AND event_date \u003e date '{{ p3 }}'\n parent: folders/${sharedDir.objectId}\n runAsRole: viewer\n parameters:\n - name: p1\n title: Title for p1\n text:\n value: default\n - name: p2\n title: Title for p2\n enum:\n options:\n - default\n - foo\n - bar\n value: default\n multiple:\n prefix: '\"'\n suffix: '\"'\n separator: ','\n - name: p3\n title: Title for p3\n date:\n value: 2022-01-01\n tags:\n - t1\n - t2\n```\n\nExample permission to share query with all users:\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as databricks from \"@pulumi/databricks\";\n\nconst q1 = new databricks.Permissions(\"q1\", {\n sqlQueryId: databricks_sql_query.q1.id,\n accessControls: [\n {\n groupName: data.databricks_group.users.display_name,\n permissionLevel: \"CAN_RUN\",\n },\n {\n groupName: data.databricks_group.team.display_name,\n permissionLevel: \"CAN_EDIT\",\n },\n ],\n});\n```\n```python\nimport pulumi\nimport pulumi_databricks as databricks\n\nq1 = databricks.Permissions(\"q1\",\n sql_query_id=databricks_sql_query[\"q1\"][\"id\"],\n access_controls=[\n databricks.PermissionsAccessControlArgs(\n group_name=data[\"databricks_group\"][\"users\"][\"display_name\"],\n permission_level=\"CAN_RUN\",\n ),\n databricks.PermissionsAccessControlArgs(\n group_name=data[\"databricks_group\"][\"team\"][\"display_name\"],\n permission_level=\"CAN_EDIT\",\n ),\n ])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Databricks = Pulumi.Databricks;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var q1 = new Databricks.Permissions(\"q1\", new()\n {\n SqlQueryId = databricks_sql_query.Q1.Id,\n AccessControls = new[]\n {\n new Databricks.Inputs.PermissionsAccessControlArgs\n {\n GroupName = data.Databricks_group.Users.Display_name,\n PermissionLevel = \"CAN_RUN\",\n },\n new Databricks.Inputs.PermissionsAccessControlArgs\n {\n GroupName = data.Databricks_group.Team.Display_name,\n PermissionLevel = \"CAN_EDIT\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-databricks/sdk/go/databricks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databricks.NewPermissions(ctx, \"q1\", \u0026databricks.PermissionsArgs{\n\t\t\tSqlQueryId: pulumi.Any(databricks_sql_query.Q1.Id),\n\t\t\tAccessControls: databricks.PermissionsAccessControlArray{\n\t\t\t\t\u0026databricks.PermissionsAccessControlArgs{\n\t\t\t\t\tGroupName: pulumi.Any(data.Databricks_group.Users.Display_name),\n\t\t\t\t\tPermissionLevel: pulumi.String(\"CAN_RUN\"),\n\t\t\t\t},\n\t\t\t\t\u0026databricks.PermissionsAccessControlArgs{\n\t\t\t\t\tGroupName: pulumi.Any(data.Databricks_group.Team.Display_name),\n\t\t\t\t\tPermissionLevel: pulumi.String(\"CAN_EDIT\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.databricks.Permissions;\nimport com.pulumi.databricks.PermissionsArgs;\nimport com.pulumi.databricks.inputs.PermissionsAccessControlArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var q1 = new Permissions(\"q1\", PermissionsArgs.builder() \n .sqlQueryId(databricks_sql_query.q1().id())\n .accessControls( \n PermissionsAccessControlArgs.builder()\n .groupName(data.databricks_group().users().display_name())\n .permissionLevel(\"CAN_RUN\")\n .build(),\n PermissionsAccessControlArgs.builder()\n .groupName(data.databricks_group().team().display_name())\n .permissionLevel(\"CAN_EDIT\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n q1:\n type: databricks:Permissions\n properties:\n sqlQueryId: ${databricks_sql_query.q1.id}\n accessControls:\n - groupName: ${data.databricks_group.users.display_name}\n permissionLevel: CAN_RUN\n - groupName: ${data.databricks_group.team.display_name}\n permissionLevel: CAN_EDIT\n```\n{{% /example %}}\n{{% /examples %}}\n## Troubleshooting\n\nIn case you see `Error: cannot create sql query: Internal Server Error` during `pulumi up`; double check that you are using the correct `data_source_id`\n\nOperations on `databricks.SqlQuery` schedules are ⛔️ deprecated. You can create, update or delete a schedule for SQLA and other Databricks resources using the databricks.Job resource.\n\n## Related Resources\n\nThe following resources are often used in the same context:\n\n* End to end workspace management guide.\n* databricks.SqlDashboard to manage Databricks SQL [Dashboards](https://docs.databricks.com/sql/user/dashboards/index.html).\n* databricks.SqlEndpoint to manage Databricks SQL [Endpoints](https://docs.databricks.com/sql/admin/sql-endpoints.html).\n* databricks.SqlGlobalConfig to configure the security policy, databricks_instance_profile, and [data access properties](https://docs.databricks.com/sql/admin/data-access-configuration.html) for all databricks.SqlEndpoint of workspace.\n* databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html).\n* databricks.Job to schedule Databricks SQL queries (as well as dashboards and alerts) using Databricks Jobs.\n\n\n## Import\n\nYou can import a `databricks_sql_query` resource with ID like the followingbash\n\n```sh\n $ pulumi import databricks:index/sqlQuery:SqlQuery this \u003cquery-id\u003e\n```\n\n ", "properties": { + "createdAt": { + "type": "string" + }, "dataSourceId": { "type": "string" }, @@ -16563,14 +16851,22 @@ "items": { "type": "string" } + }, + "updatedAt": { + "type": "string" } }, "required": [ + "createdAt", "dataSourceId", "name", - "query" + "query", + "updatedAt" ], "inputProperties": { + "createdAt": { + "type": "string" + }, "dataSourceId": { "type": "string" }, @@ -16605,6 +16901,9 @@ "items": { "type": "string" } + }, + "updatedAt": { + "type": "string" } }, "requiredInputs": [ @@ -16614,6 +16913,9 @@ "stateInputs": { "description": "Input properties used for looking up and filtering SqlQuery resources.\n", "properties": { + "createdAt": { + "type": "string" + }, "dataSourceId": { "type": "string" }, @@ -16648,6 +16950,9 @@ "items": { "type": "string" } + }, + "updatedAt": { + "type": "string" } }, "type": "object" @@ -17065,6 +17370,9 @@ "databricksGcpServiceAccount": { "$ref": "#/types/databricks:index/StorageCredentialDatabricksGcpServiceAccount:StorageCredentialDatabricksGcpServiceAccount" }, + "forceDestroy": { + "type": "boolean" + }, "gcpServiceAccountKey": { "$ref": "#/types/databricks:index/StorageCredentialGcpServiceAccountKey:StorageCredentialGcpServiceAccountKey" }, @@ -17106,6 +17414,9 @@ "databricksGcpServiceAccount": { "$ref": "#/types/databricks:index/StorageCredentialDatabricksGcpServiceAccount:StorageCredentialDatabricksGcpServiceAccount" }, + "forceDestroy": { + "type": "boolean" + }, "gcpServiceAccountKey": { "$ref": "#/types/databricks:index/StorageCredentialGcpServiceAccountKey:StorageCredentialGcpServiceAccountKey" }, @@ -17144,6 +17455,9 @@ "databricksGcpServiceAccount": { "$ref": "#/types/databricks:index/StorageCredentialDatabricksGcpServiceAccount:StorageCredentialDatabricksGcpServiceAccount" }, + "forceDestroy": { + "type": "boolean" + }, "gcpServiceAccountKey": { "$ref": "#/types/databricks:index/StorageCredentialGcpServiceAccountKey:StorageCredentialGcpServiceAccountKey" }, @@ -18305,10 +18619,13 @@ } }, "databricks:index/getCurrentUser:getCurrentUser": { - "description": "## Exported attributes\n\nData source exposes the following attributes:\n\n* `id` - The id of the calling user.\n* `external_id` - ID of the user in an external identity provider.\n* `user_name` - Name of the user, e.g. `mr.foo@example.com`. If the currently logged-in identity is a service principal, returns the application ID, e.g. `11111111-2222-3333-4444-555666777888`\n* `home` - Home folder of the user, e.g. `/Users/mr.foo@example.com`.\n* `repos` - Personal Repos location of the user, e.g. `/Repos/mr.foo@example.com`.\n* `alphanumeric` - Alphanumeric representation of user local name. e.g. `mr_foo`.\n* `workspace_url` - URL of the current Databricks workspace.\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* End to end workspace management guide\n* databricks.Directory to manage directories in [Databricks Workpace](https://docs.databricks.com/workspace/workspace-objects.html).\n* databricks.Notebook to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html).\n* databricks.Repo to manage [Databricks Repos](https://docs.databricks.com/repos.html).\n", + "description": "## Exported attributes\n\nData source exposes the following attributes:\n\n* `id` - The id of the calling user.\n* `external_id` - ID of the user in an external identity provider.\n* `user_name` - Name of the user, e.g. `mr.foo@example.com`. If the currently logged-in identity is a service principal, returns the application ID, e.g. `11111111-2222-3333-4444-555666777888`\n* `home` - Home folder of the user, e.g. `/Users/mr.foo@example.com`.\n* `repos` - Personal Repos location of the user, e.g. `/Repos/mr.foo@example.com`.\n* `alphanumeric` - Alphanumeric representation of user local name. e.g. `mr_foo`.\n* `workspace_url` - URL of the current Databricks workspace.\n* `acl_principal_id` - identifier for use in databricks_access_control_rule_set, e.g. `users/mr.foo@example.com` if current user is user, or `servicePrincipals/00000000-0000-0000-0000-000000000000` if current user is service principal.\n\n## Related Resources\n\nThe following resources are used in the same context:\n\n* End to end workspace management guide\n* databricks.Directory to manage directories in [Databricks Workpace](https://docs.databricks.com/workspace/workspace-objects.html).\n* databricks.Notebook to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html).\n* databricks.Repo to manage [Databricks Repos](https://docs.databricks.com/repos.html).\n", "outputs": { "description": "A collection of values returned by getCurrentUser.\n", "properties": { + "aclPrincipalId": { + "type": "string" + }, "alphanumeric": { "type": "string" }, @@ -18334,6 +18651,7 @@ }, "type": "object", "required": [ + "aclPrincipalId", "alphanumeric", "externalId", "home", @@ -18498,6 +18816,10 @@ "inputs": { "description": "A collection of arguments for invoking getGroup.\n", "properties": { + "aclPrincipalId": { + "type": "string", + "description": "identifier for use in databricks_access_control_rule_set, e.g. `groups/Some Group`.\n" + }, "allowClusterCreate": { "type": "boolean", "description": "True if group members can create clusters\n" @@ -18575,6 +18897,10 @@ "outputs": { "description": "A collection of values returned by getGroup.\n", "properties": { + "aclPrincipalId": { + "type": "string", + "description": "identifier for use in databricks_access_control_rule_set, e.g. `groups/Some Group`.\n" + }, "allowClusterCreate": { "type": "boolean", "description": "True if group members can create clusters\n" @@ -18648,6 +18974,7 @@ }, "type": "object", "required": [ + "aclPrincipalId", "childGroups", "displayName", "externalId", @@ -19292,6 +19619,10 @@ "inputs": { "description": "A collection of arguments for invoking getServicePrincipal.\n", "properties": { + "aclPrincipalId": { + "type": "string", + "description": "identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`.\n" + }, "active": { "type": "boolean", "description": "Whether service principal is active or not.\n" @@ -19329,6 +19660,10 @@ "outputs": { "description": "A collection of values returned by getServicePrincipal.\n", "properties": { + "aclPrincipalId": { + "type": "string", + "description": "identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`.\n" + }, "active": { "type": "boolean", "description": "Whether service principal is active or not.\n" @@ -19362,6 +19697,7 @@ }, "type": "object", "required": [ + "aclPrincipalId", "active", "applicationId", "displayName", @@ -19895,6 +20231,10 @@ "outputs": { "description": "A collection of values returned by getUser.\n", "properties": { + "aclPrincipalId": { + "type": "string", + "description": "identifier for use in databricks_access_control_rule_set, e.g. `users/mr.foo@example.com`.\n" + }, "alphanumeric": { "type": "string", "description": "Alphanumeric representation of user local name. e.g. `mr_foo`.\n" @@ -19932,6 +20272,7 @@ }, "type": "object", "required": [ + "aclPrincipalId", "alphanumeric", "applicationId", "displayName", diff --git a/provider/go.mod b/provider/go.mod index bc698705..926abc31 100644 --- a/provider/go.mod +++ b/provider/go.mod @@ -5,17 +5,17 @@ go 1.20 replace github.com/hashicorp/terraform-plugin-sdk/v2 => github.com/pulumi/terraform-plugin-sdk/v2 v2.0.0-20230710100801-03a71d0fca3d require ( - github.com/databricks/databricks-sdk-go v0.15.0 - github.com/databricks/terraform-provider-databricks v1.23.0 - github.com/pulumi/pulumi-terraform-bridge/v3 v3.56.2 + github.com/databricks/databricks-sdk-go v0.17.0 + github.com/databricks/terraform-provider-databricks v1.24.0 + github.com/pulumi/pulumi-terraform-bridge/v3 v3.57.0 ) require ( - cloud.google.com/go v0.110.4 // indirect - cloud.google.com/go/compute v1.22.0 // indirect + cloud.google.com/go v0.110.6 // indirect + cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.0 // indirect - cloud.google.com/go/kms v1.12.1 // indirect + cloud.google.com/go/iam v1.1.1 // indirect + cloud.google.com/go/kms v1.15.0 // indirect cloud.google.com/go/logging v1.7.0 // indirect cloud.google.com/go/longrunning v0.5.1 // indirect cloud.google.com/go/storage v1.30.1 // indirect @@ -90,7 +90,7 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/s2a-go v0.1.4 // indirect + github.com/google/s2a-go v0.1.5 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.3.0 // indirect github.com/google/wire v0.5.0 // indirect @@ -126,8 +126,8 @@ require ( github.com/hashicorp/terraform-json v0.17.1 // indirect github.com/hashicorp/terraform-plugin-go v0.18.0 // indirect github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect - github.com/hashicorp/terraform-plugin-sdk/v2 v2.27.0 // indirect - github.com/hashicorp/terraform-registry-address v0.2.1 // indirect + github.com/hashicorp/terraform-plugin-sdk/v2 v2.28.0 // indirect + github.com/hashicorp/terraform-registry-address v0.2.2 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/vault/api v1.8.2 // indirect github.com/hashicorp/vault/sdk v0.6.1 // indirect @@ -207,28 +207,28 @@ require ( github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect - github.com/zclconf/go-cty v1.13.2 // indirect + github.com/zclconf/go-cty v1.13.3 // indirect go.opencensus.io v0.24.0 // indirect go.uber.org/atomic v1.10.0 // indirect gocloud.dev v0.29.0 // indirect gocloud.dev/secrets/hashivault v0.27.0 // indirect - golang.org/x/crypto v0.11.0 // indirect - golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect + golang.org/x/crypto v0.12.0 // indirect + golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 // indirect golang.org/x/mod v0.12.0 // indirect - golang.org/x/net v0.12.0 // indirect - golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/net v0.14.0 // indirect + golang.org/x/oauth2 v0.11.0 // indirect golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/sys v0.11.0 // indirect + golang.org/x/term v0.11.0 // indirect + golang.org/x/text v0.12.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/api v0.134.0 // indirect + google.golang.org/api v0.138.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771 // indirect - google.golang.org/grpc v1.56.2 // indirect + google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/grpc v1.57.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect diff --git a/provider/go.sum b/provider/go.sum index 7f95a911..aa0a63e8 100644 --- a/provider/go.sum +++ b/provider/go.sum @@ -41,8 +41,8 @@ cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFO cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= cloud.google.com/go v0.109.0/go.mod h1:2sYycXt75t/CSB5R9M2wPU1tJmire7AQZTPtITcGBVE= cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go v0.110.4 h1:1JYyxKMN9hd5dR2MYTPWkGUgcoxVVhg0LKNKEo0qvmk= -cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.6 h1:8uYAkj3YHTP/1iwReuHPxLSbdcyc+dSBbzFMrVwDR6Q= +cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= @@ -179,8 +179,8 @@ cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63 cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= -cloud.google.com/go/compute v1.22.0 h1:cB8R6FtUtT1TYGl5R3xuxnW6OUIc/DrT2aiR16TTG7Y= -cloud.google.com/go/compute v1.22.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= @@ -323,8 +323,8 @@ cloud.google.com/go/iam v0.10.0/go.mod h1:nXAECrMt2qHpF6RZUZseteD6QyanL68reN4OXP cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/iam v1.1.0 h1:67gSqaPukx7O8WLLHMa0PNs3EBGd2eE4d+psbO/CO94= -cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk= +cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y= +cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= @@ -344,8 +344,8 @@ cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4 cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= -cloud.google.com/go/kms v1.12.1 h1:xZmZuwy2cwzsocmKDOPu4BL7umg8QXagQx6fKVmf45U= -cloud.google.com/go/kms v1.12.1/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/kms v1.15.0 h1:xYl5WEaSekKYN5gGRyhjvZKM22GVBBCzegGNVPy+aIs= +cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= @@ -1137,10 +1137,10 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= -github.com/databricks/databricks-sdk-go v0.15.0 h1:xnJvrEklzSqclFqc+YN4tknfT34dCN6bRdusQDqZOqw= -github.com/databricks/databricks-sdk-go v0.15.0/go.mod h1:Xupq4Bo+/NHWvrMOKQkCvafZYgXgKGANcVLCzMS/OSE= -github.com/databricks/terraform-provider-databricks v1.23.0 h1:8IaphOmfdxVVxHAuSxYP95PSMm/tyB37Oj/W/hSTykM= -github.com/databricks/terraform-provider-databricks v1.23.0/go.mod h1:XhIE+XC41AHPQV8SF+lc6vGPfvar19sWn8pY4+l+nzM= +github.com/databricks/databricks-sdk-go v0.17.0 h1:l1WNdMIQ5Olu9Idow/2RhMFYE0G7aCMaNb2sYjBzmlc= +github.com/databricks/databricks-sdk-go v0.17.0/go.mod h1:T7ECrCYkBHPVjzVJvjPwV+j8wC1NMalvj8r2Lfm/zVc= +github.com/databricks/terraform-provider-databricks v1.24.0 h1:8fsJhUpPUDAAmTspg7C7FGtVr9tGBkbSe0l5SKNglJ0= +github.com/databricks/terraform-provider-databricks v1.24.0/go.mod h1:d/Ud66MkP+QG4Z4HLs5C+ch1yfi3f3fd/JUvxN79Zkc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -1538,8 +1538,8 @@ github.com/google/pprof v0.0.0-20220318212150-b2ab0324ddda/go.mod h1:KgnwoLYCZ8I github.com/google/pprof v0.0.0-20220608213341-c488b8fa1db3/go.mod h1:gSuNB+gJaOiQKLEZ+q+PK9Mq3SOzhRcw2GsGS/FhYDk= github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= -github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.5 h1:8IYp3w9nysqv3JH+NJgXJzGbDHzLOTj43BmSkp+O7qg= +github.com/google/s2a-go v0.1.5/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= @@ -1751,8 +1751,9 @@ github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwU github.com/hashicorp/terraform-plugin-sdk v1.7.0 h1:B//oq0ZORG+EkVrIJy0uPGSonvmXqxSzXe8+GhknoW0= github.com/hashicorp/terraform-plugin-sdk v1.7.0/go.mod h1:OjgQmey5VxnPej/buEhe+YqKm0KNvV3QqU4hkqHqPCY= github.com/hashicorp/terraform-plugin-test v1.2.0/go.mod h1:QIJHYz8j+xJtdtLrFTlzQVC0ocr3rf/OjIpgZLK56Hs= -github.com/hashicorp/terraform-registry-address v0.2.1 h1:QuTf6oJ1+WSflJw6WYOHhLgwUiQ0FrROpHPYFtwTYWM= github.com/hashicorp/terraform-registry-address v0.2.1/go.mod h1:BSE9fIFzp0qWsJUUyGquo4ldV9k2n+psif6NYkBRS3Y= +github.com/hashicorp/terraform-registry-address v0.2.2 h1:lPQBg403El8PPicg/qONZJDC6YlgCVbWDtNmmZKtBno= +github.com/hashicorp/terraform-registry-address v0.2.2/go.mod h1:LtwNbCihUoUZ3RYriyS2wF/lGPB6gF9ICLRtuDk7hSo= github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv26+X7JPS+buii2c9/ctc= @@ -2310,8 +2311,8 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T github.com/pulumi/pulumi-java/pkg v0.9.4 h1:gIQZmlUI1o9ye8CL2XFqtmAX6Lwr9uj/+HzjboiSmK4= github.com/pulumi/pulumi-java/pkg v0.9.4/go.mod h1:c6rSw/+q4O0IImgJ9axxoC6QesbPYWBaG5gimbHouUQ= github.com/pulumi/pulumi-terraform-bridge/testing v0.0.1 h1:SCg1gjfY9N4yn8U8peIUYATifjoDABkyR7H9lmefsfc= -github.com/pulumi/pulumi-terraform-bridge/v3 v3.56.2 h1:NY9kPxzquV8rW/YYYlu0o7LLF/NmfUGEY/uZ06h/CMw= -github.com/pulumi/pulumi-terraform-bridge/v3 v3.56.2/go.mod h1:ykaml8e6XS/yI9JOcNZ+6gLirs6EWTB0FmjbT+JyEdU= +github.com/pulumi/pulumi-terraform-bridge/v3 v3.57.0 h1:munOxi56glme47MT8/wI29o9wrRBJrEQuwjAgm1zviI= +github.com/pulumi/pulumi-terraform-bridge/v3 v3.57.0/go.mod h1:ykaml8e6XS/yI9JOcNZ+6gLirs6EWTB0FmjbT+JyEdU= github.com/pulumi/pulumi-terraform-bridge/x/muxer v0.0.4 h1:rIzMmtcVpPX8ynaz6/nW5AHNY63DiNfCohqmxWvMpM4= github.com/pulumi/pulumi-terraform-bridge/x/muxer v0.0.4/go.mod h1:Kt8RIZWa/N8rW3+0g6NrqCBmF3o+HuIhFaZpssEkG6w= github.com/pulumi/pulumi-yaml v1.1.1 h1:8pyBNIU8+ym0wYpjhsCqN+cutygfK1XbhY2YEeNfyXY= @@ -2554,8 +2555,9 @@ github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= github.com/zclconf/go-cty v1.13.0/go.mod h1:YKQzy/7pZ7iq2jNFzy5go57xdxdWoLLpaEp4u238AE0= github.com/zclconf/go-cty v1.13.1/go.mod h1:YKQzy/7pZ7iq2jNFzy5go57xdxdWoLLpaEp4u238AE0= -github.com/zclconf/go-cty v1.13.2 h1:4GvrUxe/QUDYuJKAav4EYqdM47/kZa672LwmXFmEKT0= github.com/zclconf/go-cty v1.13.2/go.mod h1:YKQzy/7pZ7iq2jNFzy5go57xdxdWoLLpaEp4u238AE0= +github.com/zclconf/go-cty v1.13.3 h1:m+b9q3YDbg6Bec5rr+KGy1MzEVzY/jC2X+YX4yqKtHI= +github.com/zclconf/go-cty v1.13.3/go.mod h1:YKQzy/7pZ7iq2jNFzy5go57xdxdWoLLpaEp4u238AE0= github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI= github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= github.com/zclconf/go-cty-yaml v1.0.1 h1:up11wlgAaDvlAGENcFDnZgkn0qUJurso7k6EpURKNF8= @@ -2748,8 +2750,8 @@ golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58 golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2767,8 +2769,8 @@ golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMk golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/exp v0.0.0-20230124195608-d38c7dcee874/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -2919,8 +2921,8 @@ golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= -golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= -golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2956,8 +2958,8 @@ golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= +golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -3146,8 +3148,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -3163,8 +3165,8 @@ golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -3182,8 +3184,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -3294,7 +3296,7 @@ golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= -golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -3380,8 +3382,8 @@ google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/ google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= -google.golang.org/api v0.134.0 h1:ktL4Goua+UBgoP1eL1/60LwZJqa1sIzkLmvoR3hR6Gw= -google.golang.org/api v0.134.0/go.mod h1:sjRL3UnjTx5UqNQS9EWr9N8p7xbHpy1k0XGRLCf3Spk= +google.golang.org/api v0.138.0 h1:K/tVp05MxNVbHShRw9m7e9VJGdagNeTdMzqPH7AUqr0= +google.golang.org/api v0.138.0/go.mod h1:4xyob8CxC+0GChNBvEUAk8VBKNvYOTWM9T3v3UfRxuY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -3545,12 +3547,12 @@ google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOl google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 h1:Au6te5hbKUV8pIYWHqOUZ1pva5qK/rwbIhoXEUB9Lu8= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= -google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 h1:XVeBY8d/FaK4848myy41HBqnDwvxeV3zMZhwN1TvAMU= -google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771 h1:Z8qdAF9GFsmcUuWQ5KVYIpP3PCKydn/YKORnghIalu4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44= +google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= @@ -3604,8 +3606,8 @@ google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.56.0/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= -google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= +google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/sdk/dotnet/AccessControlRuleSet.cs b/sdk/dotnet/AccessControlRuleSet.cs index 2492883c..c5ac5776 100644 --- a/sdk/dotnet/AccessControlRuleSet.cs +++ b/sdk/dotnet/AccessControlRuleSet.cs @@ -12,10 +12,259 @@ namespace Pulumi.Databricks /// /// This resource allows you to manage access rules on Databricks account level resources. For convenience we allow accessing this resource through the Databricks account and workspace. /// - /// > **Note** Currently, we only support managing access rules on service principal resources through `databricks.AccessControlRuleSet`. + /// > **Note** Currently, we only support managing access rules on service principal, group and account resources through `databricks.AccessControlRuleSet`. /// /// > **Warning** `databricks.AccessControlRuleSet` cannot be used to manage access rules for resources supported by databricks_permissions. Refer to its documentation for more information. /// + /// ## Service principal rule set usage + /// + /// Through a Databricks workspace: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var accountId = "00000000-0000-0000-0000-000000000000"; + /// + /// var ds = Databricks.GetGroup.Invoke(new() + /// { + /// DisplayName = "Data Science", + /// }); + /// + /// var automationSp = new Databricks.ServicePrincipal("automationSp", new() + /// { + /// DisplayName = "SP_FOR_AUTOMATION", + /// }); + /// + /// var automationSpRuleSet = new Databricks.AccessControlRuleSet("automationSpRuleSet", new() + /// { + /// GrantRules = new[] + /// { + /// new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs + /// { + /// Principals = new[] + /// { + /// ds.Apply(getGroupResult => getGroupResult.AclPrincipalId), + /// }, + /// Role = "roles/servicePrincipal.user", + /// }, + /// }, + /// }); + /// + /// }); + /// ``` + /// + /// Through AWS Databricks account: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var accountId = "00000000-0000-0000-0000-000000000000"; + /// + /// // account level group creation + /// var ds = new Databricks.Group("ds"); + /// + /// var automationSp = new Databricks.ServicePrincipal("automationSp", new() + /// { + /// DisplayName = "SP_FOR_AUTOMATION", + /// }); + /// + /// var automationSpRuleSet = new Databricks.AccessControlRuleSet("automationSpRuleSet", new() + /// { + /// GrantRules = new[] + /// { + /// new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs + /// { + /// Principals = new[] + /// { + /// ds.AclPrincipalId, + /// }, + /// Role = "roles/servicePrincipal.user", + /// }, + /// }, + /// }); + /// + /// }); + /// ``` + /// + /// Through Azure Databricks account: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var accountId = "00000000-0000-0000-0000-000000000000"; + /// + /// // account level group creation + /// var ds = new Databricks.Group("ds"); + /// + /// var automationSp = new Databricks.ServicePrincipal("automationSp", new() + /// { + /// ApplicationId = "00000000-0000-0000-0000-000000000000", + /// DisplayName = "SP_FOR_AUTOMATION", + /// }); + /// + /// var automationSpRuleSet = new Databricks.AccessControlRuleSet("automationSpRuleSet", new() + /// { + /// GrantRules = new[] + /// { + /// new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs + /// { + /// Principals = new[] + /// { + /// ds.AclPrincipalId, + /// }, + /// Role = "roles/servicePrincipal.user", + /// }, + /// }, + /// }); + /// + /// }); + /// ``` + /// + /// Through GCP Databricks account: + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var accountId = "00000000-0000-0000-0000-000000000000"; + /// + /// // account level group creation + /// var ds = new Databricks.Group("ds"); + /// + /// var automationSp = new Databricks.ServicePrincipal("automationSp", new() + /// { + /// DisplayName = "SP_FOR_AUTOMATION", + /// }); + /// + /// var automationSpRuleSet = new Databricks.AccessControlRuleSet("automationSpRuleSet", new() + /// { + /// GrantRules = new[] + /// { + /// new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs + /// { + /// Principals = new[] + /// { + /// ds.AclPrincipalId, + /// }, + /// Role = "roles/servicePrincipal.user", + /// }, + /// }, + /// }); + /// + /// }); + /// ``` + /// + /// ## Group rule set usage + /// + /// Refer to the appropriate provider configuration as shown in the examples for service principal rule set. + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var accountId = "00000000-0000-0000-0000-000000000000"; + /// + /// var ds = Databricks.GetGroup.Invoke(new() + /// { + /// DisplayName = "Data Science", + /// }); + /// + /// var john = Databricks.GetUser.Invoke(new() + /// { + /// UserName = "john.doe@example.com", + /// }); + /// + /// var dsGroupRuleSet = new Databricks.AccessControlRuleSet("dsGroupRuleSet", new() + /// { + /// GrantRules = new[] + /// { + /// new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs + /// { + /// Principals = new[] + /// { + /// john.Apply(getUserResult => getUserResult.AclPrincipalId), + /// }, + /// Role = "roles/group.manager", + /// }, + /// }, + /// }); + /// + /// }); + /// ``` + /// + /// ## Account rule set usage + /// + /// Refer to the appropriate provider configuration as shown in the examples for service principal rule set. + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var accountId = "00000000-0000-0000-0000-000000000000"; + /// + /// var ds = Databricks.GetGroup.Invoke(new() + /// { + /// DisplayName = "Data Science", + /// }); + /// + /// var john = Databricks.GetUser.Invoke(new() + /// { + /// UserName = "john.doe@example.com", + /// }); + /// + /// var accountRuleSet = new Databricks.AccessControlRuleSet("accountRuleSet", new() + /// { + /// GrantRules = new[] + /// { + /// new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs + /// { + /// Principals = new[] + /// { + /// john.Apply(getUserResult => getUserResult.AclPrincipalId), + /// }, + /// Role = "roles/group.manager", + /// }, + /// new Databricks.Inputs.AccessControlRuleSetGrantRuleArgs + /// { + /// Principals = new[] + /// { + /// data.Databricks_user.Ds.Acl_principal_id, + /// }, + /// Role = "roles/servicePrincipal.manager", + /// }, + /// }, + /// }); + /// + /// }); + /// ``` + /// /// ## Related Resources /// /// The following resources are often used in the same context: @@ -41,6 +290,8 @@ public partial class AccessControlRuleSet : global::Pulumi.CustomResource /// /// Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: /// * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + /// * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + /// * `accounts/{account_id}/ruleSets/default` /// [Output("name")] public Output Name { get; private set; } = null!; @@ -108,6 +359,8 @@ public InputList GrantRules /// /// Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: /// * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + /// * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + /// * `accounts/{account_id}/ruleSets/default` /// [Input("name")] public Input? Name { get; set; } @@ -140,6 +393,8 @@ public InputList GrantRules /// /// Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: /// * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + /// * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + /// * `accounts/{account_id}/ruleSets/default` /// [Input("name")] public Input? Name { get; set; } diff --git a/sdk/dotnet/Catalog.cs b/sdk/dotnet/Catalog.cs index dcb10e3a..108be19e 100644 --- a/sdk/dotnet/Catalog.cs +++ b/sdk/dotnet/Catalog.cs @@ -57,6 +57,12 @@ public partial class Catalog : global::Pulumi.CustomResource [Output("comment")] public Output Comment { get; private set; } = null!; + /// + /// For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + /// + [Output("connectionName")] + public Output ConnectionName { get; private set; } = null!; + /// /// Delete catalog regardless of its contents. /// @@ -160,6 +166,12 @@ public sealed class CatalogArgs : global::Pulumi.ResourceArgs [Input("comment")] public Input? Comment { get; set; } + /// + /// For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + /// + [Input("connectionName")] + public Input? ConnectionName { get; set; } + /// /// Delete catalog regardless of its contents. /// @@ -231,6 +243,12 @@ public sealed class CatalogState : global::Pulumi.ResourceArgs [Input("comment")] public Input? Comment { get; set; } + /// + /// For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + /// + [Input("connectionName")] + public Input? ConnectionName { get; set; } + /// /// Delete catalog regardless of its contents. /// diff --git a/sdk/dotnet/Connection.cs b/sdk/dotnet/Connection.cs new file mode 100644 index 00000000..7e021c20 --- /dev/null +++ b/sdk/dotnet/Connection.cs @@ -0,0 +1,282 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks +{ + /// + /// Lakehouse Federation is the query federation platform for Databricks. Databricks uses Unity Catalog to manage query federation. To make a dataset available for read-only querying using Lakehouse Federation, you create the following: + /// + /// - A connection, a securable object in Unity Catalog that specifies a path and credentials for accessing an external database system. + /// - A foreign catalog + /// + /// This resource manages connections in Unity Catalog + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var mysql = new Databricks.Connection("mysql", new() + /// { + /// Comment = "this is a connection to mysql db", + /// ConnectionType = "MYSQL", + /// Options = + /// { + /// { "host", "test.mysql.database.azure.com" }, + /// { "password", "password" }, + /// { "port", "3306" }, + /// { "user", "user" }, + /// }, + /// Properties = + /// { + /// { "purpose", "testing" }, + /// }, + /// }); + /// + /// }); + /// ``` + /// + /// ## Import + /// + /// This resource can be imported by `name` bash + /// + /// ```sh + /// $ pulumi import databricks:index/connection:Connection this <connection_name> + /// ``` + /// + [DatabricksResourceType("databricks:index/connection:Connection")] + public partial class Connection : global::Pulumi.CustomResource + { + /// + /// Free-form text. + /// + [Output("comment")] + public Output Comment { get; private set; } = null!; + + /// + /// Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + /// + [Output("connectionType")] + public Output ConnectionType { get; private set; } = null!; + + [Output("metastoreId")] + public Output MetastoreId { get; private set; } = null!; + + /// + /// Name of the Connection. + /// + [Output("name")] + public Output Name { get; private set; } = null!; + + /// + /// The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + /// + [Output("options")] + public Output> Options { get; private set; } = null!; + + /// + /// Name of the connection owner. + /// + [Output("owner")] + public Output Owner { get; private set; } = null!; + + /// + /// Free-form connection properties. + /// + [Output("properties")] + public Output?> Properties { get; private set; } = null!; + + [Output("readOnly")] + public Output ReadOnly { get; private set; } = null!; + + + /// + /// Create a Connection resource with the given unique name, arguments, and options. + /// + /// + /// The unique name of the resource + /// The arguments used to populate this resource's properties + /// A bag of options that control this resource's behavior + public Connection(string name, ConnectionArgs args, CustomResourceOptions? options = null) + : base("databricks:index/connection:Connection", name, args ?? new ConnectionArgs(), MakeResourceOptions(options, "")) + { + } + + private Connection(string name, Input id, ConnectionState? state = null, CustomResourceOptions? options = null) + : base("databricks:index/connection:Connection", name, state, MakeResourceOptions(options, id)) + { + } + + private static CustomResourceOptions MakeResourceOptions(CustomResourceOptions? options, Input? id) + { + var defaultOptions = new CustomResourceOptions + { + Version = Utilities.Version, + AdditionalSecretOutputs = + { + "options", + }, + }; + var merged = CustomResourceOptions.Merge(defaultOptions, options); + // Override the ID if one was specified for consistency with other language SDKs. + merged.Id = id ?? merged.Id; + return merged; + } + /// + /// Get an existing Connection resource's state with the given name, ID, and optional extra + /// properties used to qualify the lookup. + /// + /// + /// The unique name of the resulting resource. + /// The unique provider ID of the resource to lookup. + /// Any extra arguments used during the lookup. + /// A bag of options that control this resource's behavior + public static Connection Get(string name, Input id, ConnectionState? state = null, CustomResourceOptions? options = null) + { + return new Connection(name, id, state, options); + } + } + + public sealed class ConnectionArgs : global::Pulumi.ResourceArgs + { + /// + /// Free-form text. + /// + [Input("comment")] + public Input? Comment { get; set; } + + /// + /// Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + /// + [Input("connectionType", required: true)] + public Input ConnectionType { get; set; } = null!; + + [Input("metastoreId")] + public Input? MetastoreId { get; set; } + + /// + /// Name of the Connection. + /// + [Input("name")] + public Input? Name { get; set; } + + [Input("options", required: true)] + private InputMap? _options; + + /// + /// The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + /// + public InputMap Options + { + get => _options ?? (_options = new InputMap()); + set + { + var emptySecret = Output.CreateSecret(ImmutableDictionary.Create()); + _options = Output.All(value, emptySecret).Apply(v => v[0]); + } + } + + /// + /// Name of the connection owner. + /// + [Input("owner")] + public Input? Owner { get; set; } + + [Input("properties")] + private InputMap? _properties; + + /// + /// Free-form connection properties. + /// + public InputMap Properties + { + get => _properties ?? (_properties = new InputMap()); + set => _properties = value; + } + + [Input("readOnly")] + public Input? ReadOnly { get; set; } + + public ConnectionArgs() + { + } + public static new ConnectionArgs Empty => new ConnectionArgs(); + } + + public sealed class ConnectionState : global::Pulumi.ResourceArgs + { + /// + /// Free-form text. + /// + [Input("comment")] + public Input? Comment { get; set; } + + /// + /// Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + /// + [Input("connectionType")] + public Input? ConnectionType { get; set; } + + [Input("metastoreId")] + public Input? MetastoreId { get; set; } + + /// + /// Name of the Connection. + /// + [Input("name")] + public Input? Name { get; set; } + + [Input("options")] + private InputMap? _options; + + /// + /// The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + /// + public InputMap Options + { + get => _options ?? (_options = new InputMap()); + set + { + var emptySecret = Output.CreateSecret(ImmutableDictionary.Create()); + _options = Output.All(value, emptySecret).Apply(v => v[0]); + } + } + + /// + /// Name of the connection owner. + /// + [Input("owner")] + public Input? Owner { get; set; } + + [Input("properties")] + private InputMap? _properties; + + /// + /// Free-form connection properties. + /// + public InputMap Properties + { + get => _properties ?? (_properties = new InputMap()); + set => _properties = value; + } + + [Input("readOnly")] + public Input? ReadOnly { get; set; } + + public ConnectionState() + { + } + public static new ConnectionState Empty => new ConnectionState(); + } +} diff --git a/sdk/dotnet/ExternalLocation.cs b/sdk/dotnet/ExternalLocation.cs index 2e66b0e6..8bbdd623 100644 --- a/sdk/dotnet/ExternalLocation.cs +++ b/sdk/dotnet/ExternalLocation.cs @@ -26,6 +26,12 @@ namespace Pulumi.Databricks [DatabricksResourceType("databricks:index/externalLocation:ExternalLocation")] public partial class ExternalLocation : global::Pulumi.CustomResource { + /// + /// The ARN of the s3 access point to use with the external location (AWS). + /// + [Output("accessPoint")] + public Output AccessPoint { get; private set; } = null!; + /// /// User-supplied free-form text. /// @@ -33,17 +39,29 @@ public partial class ExternalLocation : global::Pulumi.CustomResource public Output Comment { get; private set; } = null!; /// - /// Name of the databricks.StorageCredential to use with this External Location. + /// Name of the databricks.StorageCredential to use with this external location. /// [Output("credentialName")] public Output CredentialName { get; private set; } = null!; + /// + /// The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + /// + [Output("encryptionDetails")] + public Output EncryptionDetails { get; private set; } = null!; + /// /// Destroy external location regardless of its dependents. /// [Output("forceDestroy")] public Output ForceDestroy { get; private set; } = null!; + /// + /// Update external location regardless of its dependents. + /// + [Output("forceUpdate")] + public Output ForceUpdate { get; private set; } = null!; + [Output("metastoreId")] public Output MetastoreId { get; private set; } = null!; @@ -54,7 +72,7 @@ public partial class ExternalLocation : global::Pulumi.CustomResource public Output Name { get; private set; } = null!; /// - /// Username/groupname/sp application_id of the external Location owner. + /// Username/groupname/sp application_id of the external location owner. /// [Output("owner")] public Output Owner { get; private set; } = null!; @@ -123,6 +141,12 @@ public static ExternalLocation Get(string name, Input id, ExternalLocati public sealed class ExternalLocationArgs : global::Pulumi.ResourceArgs { + /// + /// The ARN of the s3 access point to use with the external location (AWS). + /// + [Input("accessPoint")] + public Input? AccessPoint { get; set; } + /// /// User-supplied free-form text. /// @@ -130,17 +154,29 @@ public sealed class ExternalLocationArgs : global::Pulumi.ResourceArgs public Input? Comment { get; set; } /// - /// Name of the databricks.StorageCredential to use with this External Location. + /// Name of the databricks.StorageCredential to use with this external location. /// [Input("credentialName", required: true)] public Input CredentialName { get; set; } = null!; + /// + /// The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + /// + [Input("encryptionDetails")] + public Input? EncryptionDetails { get; set; } + /// /// Destroy external location regardless of its dependents. /// [Input("forceDestroy")] public Input? ForceDestroy { get; set; } + /// + /// Update external location regardless of its dependents. + /// + [Input("forceUpdate")] + public Input? ForceUpdate { get; set; } + [Input("metastoreId")] public Input? MetastoreId { get; set; } @@ -151,7 +187,7 @@ public sealed class ExternalLocationArgs : global::Pulumi.ResourceArgs public Input? Name { get; set; } /// - /// Username/groupname/sp application_id of the external Location owner. + /// Username/groupname/sp application_id of the external location owner. /// [Input("owner")] public Input? Owner { get; set; } @@ -182,6 +218,12 @@ public ExternalLocationArgs() public sealed class ExternalLocationState : global::Pulumi.ResourceArgs { + /// + /// The ARN of the s3 access point to use with the external location (AWS). + /// + [Input("accessPoint")] + public Input? AccessPoint { get; set; } + /// /// User-supplied free-form text. /// @@ -189,17 +231,29 @@ public sealed class ExternalLocationState : global::Pulumi.ResourceArgs public Input? Comment { get; set; } /// - /// Name of the databricks.StorageCredential to use with this External Location. + /// Name of the databricks.StorageCredential to use with this external location. /// [Input("credentialName")] public Input? CredentialName { get; set; } + /// + /// The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + /// + [Input("encryptionDetails")] + public Input? EncryptionDetails { get; set; } + /// /// Destroy external location regardless of its dependents. /// [Input("forceDestroy")] public Input? ForceDestroy { get; set; } + /// + /// Update external location regardless of its dependents. + /// + [Input("forceUpdate")] + public Input? ForceUpdate { get; set; } + [Input("metastoreId")] public Input? MetastoreId { get; set; } @@ -210,7 +264,7 @@ public sealed class ExternalLocationState : global::Pulumi.ResourceArgs public Input? Name { get; set; } /// - /// Username/groupname/sp application_id of the external Location owner. + /// Username/groupname/sp application_id of the external location owner. /// [Input("owner")] public Input? Owner { get; set; } diff --git a/sdk/dotnet/GetCurrentUser.cs b/sdk/dotnet/GetCurrentUser.cs index 3fc698d2..6ae01e43 100644 --- a/sdk/dotnet/GetCurrentUser.cs +++ b/sdk/dotnet/GetCurrentUser.cs @@ -23,6 +23,7 @@ public static class GetCurrentUser /// * `repos` - Personal Repos location of the user, e.g. `/Repos/mr.foo@example.com`. /// * `alphanumeric` - Alphanumeric representation of user local name. e.g. `mr_foo`. /// * `workspace_url` - URL of the current Databricks workspace. + /// * `acl_principal_id` - identifier for use in databricks_access_control_rule_set, e.g. `users/mr.foo@example.com` if current user is user, or `servicePrincipals/00000000-0000-0000-0000-000000000000` if current user is service principal. /// /// ## Related Resources /// @@ -41,6 +42,7 @@ public static Task InvokeAsync(InvokeOptions? options = nu [OutputType] public sealed class GetCurrentUserResult { + public readonly string AclPrincipalId; public readonly string Alphanumeric; public readonly string ExternalId; public readonly string Home; @@ -54,6 +56,8 @@ public sealed class GetCurrentUserResult [OutputConstructor] private GetCurrentUserResult( + string aclPrincipalId, + string alphanumeric, string externalId, @@ -68,6 +72,7 @@ private GetCurrentUserResult( string workspaceUrl) { + AclPrincipalId = aclPrincipalId; Alphanumeric = alphanumeric; ExternalId = externalId; Home = home; diff --git a/sdk/dotnet/GetGroup.cs b/sdk/dotnet/GetGroup.cs index 7a764583..be23c5c9 100644 --- a/sdk/dotnet/GetGroup.cs +++ b/sdk/dotnet/GetGroup.cs @@ -121,6 +121,12 @@ public static Output Invoke(GetGroupInvokeArgs args, InvokeOptio public sealed class GetGroupArgs : global::Pulumi.InvokeArgs { + /// + /// identifier for use in databricks_access_control_rule_set, e.g. `groups/Some Group`. + /// + [Input("aclPrincipalId")] + public string? AclPrincipalId { get; set; } + /// /// True if group members can create clusters /// @@ -234,6 +240,12 @@ public GetGroupArgs() public sealed class GetGroupInvokeArgs : global::Pulumi.InvokeArgs { + /// + /// identifier for use in databricks_access_control_rule_set, e.g. `groups/Some Group`. + /// + [Input("aclPrincipalId")] + public Input? AclPrincipalId { get; set; } + /// /// True if group members can create clusters /// @@ -349,6 +361,10 @@ public GetGroupInvokeArgs() [OutputType] public sealed class GetGroupResult { + /// + /// identifier for use in databricks_access_control_rule_set, e.g. `groups/Some Group`. + /// + public readonly string AclPrincipalId; /// /// True if group members can create clusters /// @@ -393,6 +409,8 @@ public sealed class GetGroupResult [OutputConstructor] private GetGroupResult( + string aclPrincipalId, + bool? allowClusterCreate, bool? allowInstancePoolCreate, @@ -421,6 +439,7 @@ private GetGroupResult( bool? workspaceAccess) { + AclPrincipalId = aclPrincipalId; AllowClusterCreate = allowClusterCreate; AllowInstancePoolCreate = allowInstancePoolCreate; ChildGroups = childGroups; diff --git a/sdk/dotnet/GetServicePrincipal.cs b/sdk/dotnet/GetServicePrincipal.cs index e1e0eeb3..bd58206d 100644 --- a/sdk/dotnet/GetServicePrincipal.cs +++ b/sdk/dotnet/GetServicePrincipal.cs @@ -125,6 +125,12 @@ public static Output Invoke(GetServicePrincipalInvoke public sealed class GetServicePrincipalArgs : global::Pulumi.InvokeArgs { + /// + /// identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. + /// + [Input("aclPrincipalId")] + public string? AclPrincipalId { get; set; } + /// /// Whether service principal is active or not. /// @@ -178,6 +184,12 @@ public GetServicePrincipalArgs() public sealed class GetServicePrincipalInvokeArgs : global::Pulumi.InvokeArgs { + /// + /// identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. + /// + [Input("aclPrincipalId")] + public Input? AclPrincipalId { get; set; } + /// /// Whether service principal is active or not. /// @@ -233,6 +245,10 @@ public GetServicePrincipalInvokeArgs() [OutputType] public sealed class GetServicePrincipalResult { + /// + /// identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. + /// + public readonly string AclPrincipalId; /// /// Whether service principal is active or not. /// @@ -262,6 +278,8 @@ public sealed class GetServicePrincipalResult [OutputConstructor] private GetServicePrincipalResult( + string aclPrincipalId, + bool active, string applicationId, @@ -278,6 +296,7 @@ private GetServicePrincipalResult( string spId) { + AclPrincipalId = aclPrincipalId; Active = active; ApplicationId = applicationId; DisplayName = displayName; diff --git a/sdk/dotnet/GetUser.cs b/sdk/dotnet/GetUser.cs index 2d2947d9..c9627935 100644 --- a/sdk/dotnet/GetUser.cs +++ b/sdk/dotnet/GetUser.cs @@ -169,6 +169,10 @@ public GetUserInvokeArgs() [OutputType] public sealed class GetUserResult { + /// + /// identifier for use in databricks_access_control_rule_set, e.g. `users/mr.foo@example.com`. + /// + public readonly string AclPrincipalId; /// /// Alphanumeric representation of user local name. e.g. `mr_foo`. /// @@ -202,6 +206,8 @@ public sealed class GetUserResult [OutputConstructor] private GetUserResult( + string aclPrincipalId, + string alphanumeric, string applicationId, @@ -220,6 +226,7 @@ private GetUserResult( string? userName) { + AclPrincipalId = aclPrincipalId; Alphanumeric = alphanumeric; ApplicationId = applicationId; DisplayName = displayName; diff --git a/sdk/dotnet/Grants.cs b/sdk/dotnet/Grants.cs index c7d38ccf..34b6c58d 100644 --- a/sdk/dotnet/Grants.cs +++ b/sdk/dotnet/Grants.cs @@ -18,6 +18,9 @@ public partial class Grants : global::Pulumi.CustomResource [Output("externalLocation")] public Output ExternalLocation { get; private set; } = null!; + [Output("foreignConnection")] + public Output ForeignConnection { get; private set; } = null!; + [Output("function")] public Output Function { get; private set; } = null!; @@ -100,6 +103,9 @@ public sealed class GrantsArgs : global::Pulumi.ResourceArgs [Input("externalLocation")] public Input? ExternalLocation { get; set; } + [Input("foreignConnection")] + public Input? ForeignConnection { get; set; } + [Input("function")] public Input? Function { get; set; } @@ -149,6 +155,9 @@ public sealed class GrantsState : global::Pulumi.ResourceArgs [Input("externalLocation")] public Input? ExternalLocation { get; set; } + [Input("foreignConnection")] + public Input? ForeignConnection { get; set; } + [Input("function")] public Input? Function { get; set; } diff --git a/sdk/dotnet/Inputs/AccessControlRuleSetGrantRuleArgs.cs b/sdk/dotnet/Inputs/AccessControlRuleSetGrantRuleArgs.cs index 2006beb6..910a36c1 100644 --- a/sdk/dotnet/Inputs/AccessControlRuleSetGrantRuleArgs.cs +++ b/sdk/dotnet/Inputs/AccessControlRuleSetGrantRuleArgs.cs @@ -28,9 +28,10 @@ public InputList Principals } /// - /// Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles). + /// Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles) or [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page). /// * `roles/servicePrincipal.manager` - Manager of a service principal. /// * `roles/servicePrincipal.user` - User of a service principal. + /// * `roles/group.manager` - Manager of a group. /// [Input("role", required: true)] public Input Role { get; set; } = null!; diff --git a/sdk/dotnet/Inputs/AccessControlRuleSetGrantRuleGetArgs.cs b/sdk/dotnet/Inputs/AccessControlRuleSetGrantRuleGetArgs.cs index 3a0a889d..d1160a4d 100644 --- a/sdk/dotnet/Inputs/AccessControlRuleSetGrantRuleGetArgs.cs +++ b/sdk/dotnet/Inputs/AccessControlRuleSetGrantRuleGetArgs.cs @@ -28,9 +28,10 @@ public InputList Principals } /// - /// Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles). + /// Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles) or [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page). /// * `roles/servicePrincipal.manager` - Manager of a service principal. /// * `roles/servicePrincipal.user` - User of a service principal. + /// * `roles/group.manager` - Manager of a group. /// [Input("role", required: true)] public Input Role { get; set; } = null!; diff --git a/sdk/dotnet/Inputs/ExternalLocationEncryptionDetailsArgs.cs b/sdk/dotnet/Inputs/ExternalLocationEncryptionDetailsArgs.cs new file mode 100644 index 00000000..e96f4da6 --- /dev/null +++ b/sdk/dotnet/Inputs/ExternalLocationEncryptionDetailsArgs.cs @@ -0,0 +1,23 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class ExternalLocationEncryptionDetailsArgs : global::Pulumi.ResourceArgs + { + [Input("sseEncryptionDetails")] + public Input? SseEncryptionDetails { get; set; } + + public ExternalLocationEncryptionDetailsArgs() + { + } + public static new ExternalLocationEncryptionDetailsArgs Empty => new ExternalLocationEncryptionDetailsArgs(); + } +} diff --git a/sdk/dotnet/Inputs/ExternalLocationEncryptionDetailsGetArgs.cs b/sdk/dotnet/Inputs/ExternalLocationEncryptionDetailsGetArgs.cs new file mode 100644 index 00000000..0bbe5c5b --- /dev/null +++ b/sdk/dotnet/Inputs/ExternalLocationEncryptionDetailsGetArgs.cs @@ -0,0 +1,23 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class ExternalLocationEncryptionDetailsGetArgs : global::Pulumi.ResourceArgs + { + [Input("sseEncryptionDetails")] + public Input? SseEncryptionDetails { get; set; } + + public ExternalLocationEncryptionDetailsGetArgs() + { + } + public static new ExternalLocationEncryptionDetailsGetArgs Empty => new ExternalLocationEncryptionDetailsGetArgs(); + } +} diff --git a/sdk/dotnet/Inputs/ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs.cs b/sdk/dotnet/Inputs/ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs.cs new file mode 100644 index 00000000..34639e56 --- /dev/null +++ b/sdk/dotnet/Inputs/ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs : global::Pulumi.ResourceArgs + { + [Input("algorithm")] + public Input? Algorithm { get; set; } + + [Input("awsKmsKeyArn")] + public Input? AwsKmsKeyArn { get; set; } + + public ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs() + { + } + public static new ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs Empty => new ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs(); + } +} diff --git a/sdk/dotnet/Inputs/ExternalLocationEncryptionDetailsSseEncryptionDetailsGetArgs.cs b/sdk/dotnet/Inputs/ExternalLocationEncryptionDetailsSseEncryptionDetailsGetArgs.cs new file mode 100644 index 00000000..17a4cead --- /dev/null +++ b/sdk/dotnet/Inputs/ExternalLocationEncryptionDetailsSseEncryptionDetailsGetArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class ExternalLocationEncryptionDetailsSseEncryptionDetailsGetArgs : global::Pulumi.ResourceArgs + { + [Input("algorithm")] + public Input? Algorithm { get; set; } + + [Input("awsKmsKeyArn")] + public Input? AwsKmsKeyArn { get; set; } + + public ExternalLocationEncryptionDetailsSseEncryptionDetailsGetArgs() + { + } + public static new ExternalLocationEncryptionDetailsSseEncryptionDetailsGetArgs Empty => new ExternalLocationEncryptionDetailsSseEncryptionDetailsGetArgs(); + } +} diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsRunJobTask.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsRunJobTask.cs index 5bb1485c..3dae90ca 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsRunJobTask.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsRunJobTask.cs @@ -13,7 +13,7 @@ namespace Pulumi.Databricks.Inputs public sealed class GetJobJobSettingsSettingsRunJobTaskArgs : global::Pulumi.InvokeArgs { [Input("jobId", required: true)] - public string JobId { get; set; } = null!; + public int JobId { get; set; } [Input("jobParameters")] private Dictionary? _jobParameters; diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsRunJobTaskArgs.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsRunJobTaskArgs.cs index 84c9bb4c..a9723d9b 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsRunJobTaskArgs.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsRunJobTaskArgs.cs @@ -13,7 +13,7 @@ namespace Pulumi.Databricks.Inputs public sealed class GetJobJobSettingsSettingsRunJobTaskInputArgs : global::Pulumi.ResourceArgs { [Input("jobId", required: true)] - public Input JobId { get; set; } = null!; + public Input JobId { get; set; } = null!; [Input("jobParameters")] private InputMap? _jobParameters; diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskRunJobTask.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskRunJobTask.cs index f3858422..03368b23 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskRunJobTask.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskRunJobTask.cs @@ -13,7 +13,7 @@ namespace Pulumi.Databricks.Inputs public sealed class GetJobJobSettingsSettingsTaskRunJobTaskArgs : global::Pulumi.InvokeArgs { [Input("jobId", required: true)] - public string JobId { get; set; } = null!; + public int JobId { get; set; } [Input("jobParameters")] private Dictionary? _jobParameters; diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskRunJobTaskArgs.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskRunJobTaskArgs.cs index bc01109a..51f95859 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskRunJobTaskArgs.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskRunJobTaskArgs.cs @@ -13,7 +13,7 @@ namespace Pulumi.Databricks.Inputs public sealed class GetJobJobSettingsSettingsTaskRunJobTaskInputArgs : global::Pulumi.ResourceArgs { [Input("jobId", required: true)] - public Input JobId { get; set; } = null!; + public Input JobId { get; set; } = null!; [Input("jobParameters")] private InputMap? _jobParameters; diff --git a/sdk/dotnet/Inputs/JobRunJobTaskArgs.cs b/sdk/dotnet/Inputs/JobRunJobTaskArgs.cs index 6ea74a8b..068eb52c 100644 --- a/sdk/dotnet/Inputs/JobRunJobTaskArgs.cs +++ b/sdk/dotnet/Inputs/JobRunJobTaskArgs.cs @@ -16,7 +16,7 @@ public sealed class JobRunJobTaskArgs : global::Pulumi.ResourceArgs /// (String) ID of the job /// [Input("jobId", required: true)] - public Input JobId { get; set; } = null!; + public Input JobId { get; set; } = null!; [Input("jobParameters")] private InputMap? _jobParameters; diff --git a/sdk/dotnet/Inputs/JobRunJobTaskGetArgs.cs b/sdk/dotnet/Inputs/JobRunJobTaskGetArgs.cs index 3fd98e39..0d4f0bd0 100644 --- a/sdk/dotnet/Inputs/JobRunJobTaskGetArgs.cs +++ b/sdk/dotnet/Inputs/JobRunJobTaskGetArgs.cs @@ -16,7 +16,7 @@ public sealed class JobRunJobTaskGetArgs : global::Pulumi.ResourceArgs /// (String) ID of the job /// [Input("jobId", required: true)] - public Input JobId { get; set; } = null!; + public Input JobId { get; set; } = null!; [Input("jobParameters")] private InputMap? _jobParameters; diff --git a/sdk/dotnet/Inputs/JobTaskRunJobTaskArgs.cs b/sdk/dotnet/Inputs/JobTaskRunJobTaskArgs.cs index 8972ed67..f461041f 100644 --- a/sdk/dotnet/Inputs/JobTaskRunJobTaskArgs.cs +++ b/sdk/dotnet/Inputs/JobTaskRunJobTaskArgs.cs @@ -16,7 +16,7 @@ public sealed class JobTaskRunJobTaskArgs : global::Pulumi.ResourceArgs /// (String) ID of the job /// [Input("jobId", required: true)] - public Input JobId { get; set; } = null!; + public Input JobId { get; set; } = null!; [Input("jobParameters")] private InputMap? _jobParameters; diff --git a/sdk/dotnet/Inputs/JobTaskRunJobTaskGetArgs.cs b/sdk/dotnet/Inputs/JobTaskRunJobTaskGetArgs.cs index 4340862a..264bbe30 100644 --- a/sdk/dotnet/Inputs/JobTaskRunJobTaskGetArgs.cs +++ b/sdk/dotnet/Inputs/JobTaskRunJobTaskGetArgs.cs @@ -16,7 +16,7 @@ public sealed class JobTaskRunJobTaskGetArgs : global::Pulumi.ResourceArgs /// (String) ID of the job /// [Input("jobId", required: true)] - public Input JobId { get; set; } = null!; + public Input JobId { get; set; } = null!; [Input("jobParameters")] private InputMap? _jobParameters; diff --git a/sdk/dotnet/Inputs/MetastoreDataAccessAwsIamRoleArgs.cs b/sdk/dotnet/Inputs/MetastoreDataAccessAwsIamRoleArgs.cs index b8a3c23a..0fe4c79f 100644 --- a/sdk/dotnet/Inputs/MetastoreDataAccessAwsIamRoleArgs.cs +++ b/sdk/dotnet/Inputs/MetastoreDataAccessAwsIamRoleArgs.cs @@ -15,7 +15,7 @@ public sealed class MetastoreDataAccessAwsIamRoleArgs : global::Pulumi.ResourceA /// /// The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF` /// - /// `azure_service_principal` optional configuration block for credential details for Azure: + /// `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure (Recommended): /// [Input("roleArn", required: true)] public Input RoleArn { get; set; } = null!; diff --git a/sdk/dotnet/Inputs/MetastoreDataAccessAwsIamRoleGetArgs.cs b/sdk/dotnet/Inputs/MetastoreDataAccessAwsIamRoleGetArgs.cs index 0a39a54e..b57f5544 100644 --- a/sdk/dotnet/Inputs/MetastoreDataAccessAwsIamRoleGetArgs.cs +++ b/sdk/dotnet/Inputs/MetastoreDataAccessAwsIamRoleGetArgs.cs @@ -15,7 +15,7 @@ public sealed class MetastoreDataAccessAwsIamRoleGetArgs : global::Pulumi.Resour /// /// The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF` /// - /// `azure_service_principal` optional configuration block for credential details for Azure: + /// `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure (Recommended): /// [Input("roleArn", required: true)] public Input RoleArn { get; set; } = null!; diff --git a/sdk/dotnet/Inputs/MetastoreDataAccessAzureServicePrincipalArgs.cs b/sdk/dotnet/Inputs/MetastoreDataAccessAzureServicePrincipalArgs.cs index 0de8018c..1e3ff7b2 100644 --- a/sdk/dotnet/Inputs/MetastoreDataAccessAzureServicePrincipalArgs.cs +++ b/sdk/dotnet/Inputs/MetastoreDataAccessAzureServicePrincipalArgs.cs @@ -23,8 +23,6 @@ public sealed class MetastoreDataAccessAzureServicePrincipalArgs : global::Pulum /// /// The client secret generated for the above app ID in AAD. **This field is redacted on output** - /// - /// `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure: /// public Input? ClientSecret { diff --git a/sdk/dotnet/Inputs/MetastoreDataAccessAzureServicePrincipalGetArgs.cs b/sdk/dotnet/Inputs/MetastoreDataAccessAzureServicePrincipalGetArgs.cs index 2465b847..cc48e596 100644 --- a/sdk/dotnet/Inputs/MetastoreDataAccessAzureServicePrincipalGetArgs.cs +++ b/sdk/dotnet/Inputs/MetastoreDataAccessAzureServicePrincipalGetArgs.cs @@ -23,8 +23,6 @@ public sealed class MetastoreDataAccessAzureServicePrincipalGetArgs : global::Pu /// /// The client secret generated for the above app ID in AAD. **This field is redacted on output** - /// - /// `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure: /// public Input? ClientSecret { diff --git a/sdk/dotnet/Inputs/MetastoreDataAccessDatabricksGcpServiceAccountArgs.cs b/sdk/dotnet/Inputs/MetastoreDataAccessDatabricksGcpServiceAccountArgs.cs index 9f0b035f..13f60e7c 100644 --- a/sdk/dotnet/Inputs/MetastoreDataAccessDatabricksGcpServiceAccountArgs.cs +++ b/sdk/dotnet/Inputs/MetastoreDataAccessDatabricksGcpServiceAccountArgs.cs @@ -14,6 +14,8 @@ public sealed class MetastoreDataAccessDatabricksGcpServiceAccountArgs : global: { /// /// The email of the GCP service account created, to be granted access to relevant buckets. + /// + /// `azure_service_principal` optional configuration block for credential details for Azure (Legacy): /// [Input("email")] public Input? Email { get; set; } diff --git a/sdk/dotnet/Inputs/MetastoreDataAccessDatabricksGcpServiceAccountGetArgs.cs b/sdk/dotnet/Inputs/MetastoreDataAccessDatabricksGcpServiceAccountGetArgs.cs index 5a1a0770..42fb2b70 100644 --- a/sdk/dotnet/Inputs/MetastoreDataAccessDatabricksGcpServiceAccountGetArgs.cs +++ b/sdk/dotnet/Inputs/MetastoreDataAccessDatabricksGcpServiceAccountGetArgs.cs @@ -14,6 +14,8 @@ public sealed class MetastoreDataAccessDatabricksGcpServiceAccountGetArgs : glob { /// /// The email of the GCP service account created, to be granted access to relevant buckets. + /// + /// `azure_service_principal` optional configuration block for credential details for Azure (Legacy): /// [Input("email")] public Input? Email { get; set; } diff --git a/sdk/dotnet/Inputs/MetastoreDataAccessGcpServiceAccountKeyArgs.cs b/sdk/dotnet/Inputs/MetastoreDataAccessGcpServiceAccountKeyArgs.cs index d43c549b..c967bcff 100644 --- a/sdk/dotnet/Inputs/MetastoreDataAccessGcpServiceAccountKeyArgs.cs +++ b/sdk/dotnet/Inputs/MetastoreDataAccessGcpServiceAccountKeyArgs.cs @@ -14,6 +14,8 @@ public sealed class MetastoreDataAccessGcpServiceAccountKeyArgs : global::Pulumi { /// /// The email of the GCP service account created, to be granted access to relevant buckets. + /// + /// `azure_service_principal` optional configuration block for credential details for Azure (Legacy): /// [Input("email", required: true)] public Input Email { get; set; } = null!; diff --git a/sdk/dotnet/Inputs/MetastoreDataAccessGcpServiceAccountKeyGetArgs.cs b/sdk/dotnet/Inputs/MetastoreDataAccessGcpServiceAccountKeyGetArgs.cs index 6891abba..9b12b6be 100644 --- a/sdk/dotnet/Inputs/MetastoreDataAccessGcpServiceAccountKeyGetArgs.cs +++ b/sdk/dotnet/Inputs/MetastoreDataAccessGcpServiceAccountKeyGetArgs.cs @@ -14,6 +14,8 @@ public sealed class MetastoreDataAccessGcpServiceAccountKeyGetArgs : global::Pul { /// /// The email of the GCP service account created, to be granted access to relevant buckets. + /// + /// `azure_service_principal` optional configuration block for credential details for Azure (Legacy): /// [Input("email", required: true)] public Input Email { get; set; } = null!; diff --git a/sdk/dotnet/Inputs/MlflowModelTagArgs.cs b/sdk/dotnet/Inputs/MlflowModelTagArgs.cs index cc4d3cd0..b5745b01 100644 --- a/sdk/dotnet/Inputs/MlflowModelTagArgs.cs +++ b/sdk/dotnet/Inputs/MlflowModelTagArgs.cs @@ -12,11 +12,11 @@ namespace Pulumi.Databricks.Inputs public sealed class MlflowModelTagArgs : global::Pulumi.ResourceArgs { - [Input("key", required: true)] - public Input Key { get; set; } = null!; + [Input("key")] + public Input? Key { get; set; } - [Input("value", required: true)] - public Input Value { get; set; } = null!; + [Input("value")] + public Input? Value { get; set; } public MlflowModelTagArgs() { diff --git a/sdk/dotnet/Inputs/MlflowModelTagGetArgs.cs b/sdk/dotnet/Inputs/MlflowModelTagGetArgs.cs index 392ce418..cee91ac2 100644 --- a/sdk/dotnet/Inputs/MlflowModelTagGetArgs.cs +++ b/sdk/dotnet/Inputs/MlflowModelTagGetArgs.cs @@ -12,11 +12,11 @@ namespace Pulumi.Databricks.Inputs public sealed class MlflowModelTagGetArgs : global::Pulumi.ResourceArgs { - [Input("key", required: true)] - public Input Key { get; set; } = null!; + [Input("key")] + public Input? Key { get; set; } - [Input("value", required: true)] - public Input Value { get; set; } = null!; + [Input("value")] + public Input? Value { get; set; } public MlflowModelTagGetArgs() { diff --git a/sdk/dotnet/Inputs/MlflowWebhookHttpUrlSpecArgs.cs b/sdk/dotnet/Inputs/MlflowWebhookHttpUrlSpecArgs.cs index 97b64d45..322b01fc 100644 --- a/sdk/dotnet/Inputs/MlflowWebhookHttpUrlSpecArgs.cs +++ b/sdk/dotnet/Inputs/MlflowWebhookHttpUrlSpecArgs.cs @@ -12,21 +12,11 @@ namespace Pulumi.Databricks.Inputs public sealed class MlflowWebhookHttpUrlSpecArgs : global::Pulumi.ResourceArgs { - [Input("authorization")] - private Input? _authorization; - /// /// Value of the authorization header that should be sent in the request sent by the wehbook. It should be of the form `<auth type> <credentials>`, e.g. `Bearer <access_token>`. If set to an empty string, no authorization header will be included in the request. /// - public Input? Authorization - { - get => _authorization; - set - { - var emptySecret = Output.CreateSecret(0); - _authorization = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1); - } - } + [Input("authorization")] + public Input? Authorization { get; set; } /// /// Enable/disable SSL certificate validation. Default is `true`. For self-signed certificates, this field must be `false` AND the destination server must disable certificate validation as well. For security purposes, it is encouraged to perform secret validation with the HMAC-encoded portion of the payload and acknowledge the risk associated with disabling hostname validation whereby it becomes more likely that requests can be maliciously routed to an unintended host. diff --git a/sdk/dotnet/Inputs/MlflowWebhookHttpUrlSpecGetArgs.cs b/sdk/dotnet/Inputs/MlflowWebhookHttpUrlSpecGetArgs.cs index 63db59ea..e195ea84 100644 --- a/sdk/dotnet/Inputs/MlflowWebhookHttpUrlSpecGetArgs.cs +++ b/sdk/dotnet/Inputs/MlflowWebhookHttpUrlSpecGetArgs.cs @@ -12,21 +12,11 @@ namespace Pulumi.Databricks.Inputs public sealed class MlflowWebhookHttpUrlSpecGetArgs : global::Pulumi.ResourceArgs { - [Input("authorization")] - private Input? _authorization; - /// /// Value of the authorization header that should be sent in the request sent by the wehbook. It should be of the form `<auth type> <credentials>`, e.g. `Bearer <access_token>`. If set to an empty string, no authorization header will be included in the request. /// - public Input? Authorization - { - get => _authorization; - set - { - var emptySecret = Output.CreateSecret(0); - _authorization = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1); - } - } + [Input("authorization")] + public Input? Authorization { get; set; } /// /// Enable/disable SSL certificate validation. Default is `true`. For self-signed certificates, this field must be `false` AND the destination server must disable certificate validation as well. For security purposes, it is encouraged to perform secret validation with the HMAC-encoded portion of the payload and acknowledge the risk associated with disabling hostname validation whereby it becomes more likely that requests can be maliciously routed to an unintended host. diff --git a/sdk/dotnet/Inputs/MlflowWebhookJobSpecArgs.cs b/sdk/dotnet/Inputs/MlflowWebhookJobSpecArgs.cs index a8d2329c..8f74b366 100644 --- a/sdk/dotnet/Inputs/MlflowWebhookJobSpecArgs.cs +++ b/sdk/dotnet/Inputs/MlflowWebhookJobSpecArgs.cs @@ -12,21 +12,11 @@ namespace Pulumi.Databricks.Inputs public sealed class MlflowWebhookJobSpecArgs : global::Pulumi.ResourceArgs { - [Input("accessToken", required: true)] - private Input? _accessToken; - /// /// The personal access token used to authorize webhook's job runs. /// - public Input? AccessToken - { - get => _accessToken; - set - { - var emptySecret = Output.CreateSecret(0); - _accessToken = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1); - } - } + [Input("accessToken", required: true)] + public Input AccessToken { get; set; } = null!; /// /// ID of the Databricks job that the webhook runs. diff --git a/sdk/dotnet/Inputs/MlflowWebhookJobSpecGetArgs.cs b/sdk/dotnet/Inputs/MlflowWebhookJobSpecGetArgs.cs index d00c6f45..4c9f4bb4 100644 --- a/sdk/dotnet/Inputs/MlflowWebhookJobSpecGetArgs.cs +++ b/sdk/dotnet/Inputs/MlflowWebhookJobSpecGetArgs.cs @@ -12,21 +12,11 @@ namespace Pulumi.Databricks.Inputs public sealed class MlflowWebhookJobSpecGetArgs : global::Pulumi.ResourceArgs { - [Input("accessToken", required: true)] - private Input? _accessToken; - /// /// The personal access token used to authorize webhook's job runs. /// - public Input? AccessToken - { - get => _accessToken; - set - { - var emptySecret = Output.CreateSecret(0); - _accessToken = Output.Tuple?, int>(value, emptySecret).Apply(t => t.Item1); - } - } + [Input("accessToken", required: true)] + public Input AccessToken { get; set; } = null!; /// /// ID of the Databricks job that the webhook runs. diff --git a/sdk/dotnet/Inputs/ModelServingConfigServedModelArgs.cs b/sdk/dotnet/Inputs/ModelServingConfigServedModelArgs.cs index d4681788..ecd0e21a 100644 --- a/sdk/dotnet/Inputs/ModelServingConfigServedModelArgs.cs +++ b/sdk/dotnet/Inputs/ModelServingConfigServedModelArgs.cs @@ -14,12 +14,19 @@ public sealed class ModelServingConfigServedModelArgs : global::Pulumi.ResourceA { [Input("environmentVars")] private InputMap? _environmentVars; + + /// + /// a map of environment variable name/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`. + /// public InputMap EnvironmentVars { get => _environmentVars ?? (_environmentVars = new InputMap()); set => _environmentVars = value; } + /// + /// ARN of the instance profile that the served model will use to access AWS resources. + /// [Input("instanceProfileArn")] public Input? InstanceProfileArn { get; set; } diff --git a/sdk/dotnet/Inputs/ModelServingConfigServedModelGetArgs.cs b/sdk/dotnet/Inputs/ModelServingConfigServedModelGetArgs.cs index a17f3f66..ec66cddd 100644 --- a/sdk/dotnet/Inputs/ModelServingConfigServedModelGetArgs.cs +++ b/sdk/dotnet/Inputs/ModelServingConfigServedModelGetArgs.cs @@ -14,12 +14,19 @@ public sealed class ModelServingConfigServedModelGetArgs : global::Pulumi.Resour { [Input("environmentVars")] private InputMap? _environmentVars; + + /// + /// a map of environment variable name/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`. + /// public InputMap EnvironmentVars { get => _environmentVars ?? (_environmentVars = new InputMap()); set => _environmentVars = value; } + /// + /// ARN of the instance profile that the served model will use to access AWS resources. + /// [Input("instanceProfileArn")] public Input? InstanceProfileArn { get; set; } diff --git a/sdk/dotnet/Inputs/MwsCustomerManagedKeysGcpKeyInfoArgs.cs b/sdk/dotnet/Inputs/MwsCustomerManagedKeysGcpKeyInfoArgs.cs index b71c8b81..035bcc3a 100644 --- a/sdk/dotnet/Inputs/MwsCustomerManagedKeysGcpKeyInfoArgs.cs +++ b/sdk/dotnet/Inputs/MwsCustomerManagedKeysGcpKeyInfoArgs.cs @@ -12,6 +12,9 @@ namespace Pulumi.Databricks.Inputs public sealed class MwsCustomerManagedKeysGcpKeyInfoArgs : global::Pulumi.ResourceArgs { + /// + /// The GCP KMS key's resource name. + /// [Input("kmsKeyId", required: true)] public Input KmsKeyId { get; set; } = null!; diff --git a/sdk/dotnet/Inputs/MwsCustomerManagedKeysGcpKeyInfoGetArgs.cs b/sdk/dotnet/Inputs/MwsCustomerManagedKeysGcpKeyInfoGetArgs.cs index 58a8d517..90e142ee 100644 --- a/sdk/dotnet/Inputs/MwsCustomerManagedKeysGcpKeyInfoGetArgs.cs +++ b/sdk/dotnet/Inputs/MwsCustomerManagedKeysGcpKeyInfoGetArgs.cs @@ -12,6 +12,9 @@ namespace Pulumi.Databricks.Inputs public sealed class MwsCustomerManagedKeysGcpKeyInfoGetArgs : global::Pulumi.ResourceArgs { + /// + /// The GCP KMS key's resource name. + /// [Input("kmsKeyId", required: true)] public Input KmsKeyId { get; set; } = null!; diff --git a/sdk/dotnet/Inputs/MwsWorkspacesTokenArgs.cs b/sdk/dotnet/Inputs/MwsWorkspacesTokenArgs.cs index feaf71c3..d5591c25 100644 --- a/sdk/dotnet/Inputs/MwsWorkspacesTokenArgs.cs +++ b/sdk/dotnet/Inputs/MwsWorkspacesTokenArgs.cs @@ -15,6 +15,9 @@ public sealed class MwsWorkspacesTokenArgs : global::Pulumi.ResourceArgs [Input("comment")] public Input? Comment { get; set; } + /// + /// Token expiry lifetime. By default its 2592000 (30 days). + /// [Input("lifetimeSeconds")] public Input? LifetimeSeconds { get; set; } diff --git a/sdk/dotnet/Inputs/MwsWorkspacesTokenGetArgs.cs b/sdk/dotnet/Inputs/MwsWorkspacesTokenGetArgs.cs index 5f0aa7f9..c82d5200 100644 --- a/sdk/dotnet/Inputs/MwsWorkspacesTokenGetArgs.cs +++ b/sdk/dotnet/Inputs/MwsWorkspacesTokenGetArgs.cs @@ -15,6 +15,9 @@ public sealed class MwsWorkspacesTokenGetArgs : global::Pulumi.ResourceArgs [Input("comment")] public Input? Comment { get; set; } + /// + /// Token expiry lifetime. By default its 2592000 (30 days). + /// [Input("lifetimeSeconds")] public Input? LifetimeSeconds { get; set; } diff --git a/sdk/dotnet/Inputs/SqlTableColumnArgs.cs b/sdk/dotnet/Inputs/SqlTableColumnArgs.cs index b0764ccb..d13839c2 100644 --- a/sdk/dotnet/Inputs/SqlTableColumnArgs.cs +++ b/sdk/dotnet/Inputs/SqlTableColumnArgs.cs @@ -31,10 +31,10 @@ public sealed class SqlTableColumnArgs : global::Pulumi.ResourceArgs public Input? Nullable { get; set; } /// - /// Column type spec (with metadata) as SQL text + /// Column type spec (with metadata) as SQL text. Not supported for `VIEW` table_type. /// - [Input("type", required: true)] - public Input Type { get; set; } = null!; + [Input("type")] + public Input? Type { get; set; } public SqlTableColumnArgs() { diff --git a/sdk/dotnet/Inputs/SqlTableColumnGetArgs.cs b/sdk/dotnet/Inputs/SqlTableColumnGetArgs.cs index 22745a22..36cf9f50 100644 --- a/sdk/dotnet/Inputs/SqlTableColumnGetArgs.cs +++ b/sdk/dotnet/Inputs/SqlTableColumnGetArgs.cs @@ -31,10 +31,10 @@ public sealed class SqlTableColumnGetArgs : global::Pulumi.ResourceArgs public Input? Nullable { get; set; } /// - /// Column type spec (with metadata) as SQL text + /// Column type spec (with metadata) as SQL text. Not supported for `VIEW` table_type. /// - [Input("type", required: true)] - public Input Type { get; set; } = null!; + [Input("type")] + public Input? Type { get; set; } public SqlTableColumnGetArgs() { diff --git a/sdk/dotnet/Metastore.cs b/sdk/dotnet/Metastore.cs index 2996718a..1215d81e 100644 --- a/sdk/dotnet/Metastore.cs +++ b/sdk/dotnet/Metastore.cs @@ -10,9 +10,6 @@ namespace Pulumi.Databricks { /// - /// > **Notes** - /// Unity Catalog APIs are accessible via **workspace-level APIs**. This design may change in the future. - /// /// A metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces in order to control which workloads use each metastore. /// /// Unity Catalog offers a new metastore with built in security and auditing. This is distinct to the metastore used in previous versions of Databricks (based on the Hive Metastore). @@ -67,6 +64,9 @@ public partial class Metastore : global::Pulumi.CustomResource [Output("globalMetastoreId")] public Output GlobalMetastoreId { get; private set; } = null!; + [Output("metastoreId")] + public Output MetastoreId { get; private set; } = null!; + /// /// Name of metastore. /// @@ -79,6 +79,9 @@ public partial class Metastore : global::Pulumi.CustomResource [Output("owner")] public Output Owner { get; private set; } = null!; + /// + /// The region of the metastore + /// [Output("region")] public Output Region { get; private set; } = null!; @@ -88,6 +91,9 @@ public partial class Metastore : global::Pulumi.CustomResource [Output("storageRoot")] public Output StorageRoot { get; private set; } = null!; + [Output("storageRootCredentialId")] + public Output StorageRootCredentialId { get; private set; } = null!; + [Output("updatedAt")] public Output UpdatedAt { get; private set; } = null!; @@ -179,6 +185,9 @@ public sealed class MetastoreArgs : global::Pulumi.ResourceArgs [Input("globalMetastoreId")] public Input? GlobalMetastoreId { get; set; } + [Input("metastoreId")] + public Input? MetastoreId { get; set; } + /// /// Name of metastore. /// @@ -191,6 +200,9 @@ public sealed class MetastoreArgs : global::Pulumi.ResourceArgs [Input("owner")] public Input? Owner { get; set; } + /// + /// The region of the metastore + /// [Input("region")] public Input? Region { get; set; } @@ -200,6 +212,9 @@ public sealed class MetastoreArgs : global::Pulumi.ResourceArgs [Input("storageRoot", required: true)] public Input StorageRoot { get; set; } = null!; + [Input("storageRootCredentialId")] + public Input? StorageRootCredentialId { get; set; } + [Input("updatedAt")] public Input? UpdatedAt { get; set; } @@ -253,6 +268,9 @@ public sealed class MetastoreState : global::Pulumi.ResourceArgs [Input("globalMetastoreId")] public Input? GlobalMetastoreId { get; set; } + [Input("metastoreId")] + public Input? MetastoreId { get; set; } + /// /// Name of metastore. /// @@ -265,6 +283,9 @@ public sealed class MetastoreState : global::Pulumi.ResourceArgs [Input("owner")] public Input? Owner { get; set; } + /// + /// The region of the metastore + /// [Input("region")] public Input? Region { get; set; } @@ -274,6 +295,9 @@ public sealed class MetastoreState : global::Pulumi.ResourceArgs [Input("storageRoot")] public Input? StorageRoot { get; set; } + [Input("storageRootCredentialId")] + public Input? StorageRootCredentialId { get; set; } + [Input("updatedAt")] public Input? UpdatedAt { get; set; } diff --git a/sdk/dotnet/MetastoreAssignment.cs b/sdk/dotnet/MetastoreAssignment.cs index f416506d..012de42b 100644 --- a/sdk/dotnet/MetastoreAssignment.cs +++ b/sdk/dotnet/MetastoreAssignment.cs @@ -26,6 +26,7 @@ namespace Pulumi.Databricks /// { /// StorageRoot = $"s3://{aws_s3_bucket.Metastore.Id}/metastore", /// Owner = "uc admins", + /// Region = "us-east-1", /// ForceDestroy = true, /// }); /// diff --git a/sdk/dotnet/MlflowModel.cs b/sdk/dotnet/MlflowModel.cs index affe137c..c182a2b1 100644 --- a/sdk/dotnet/MlflowModel.cs +++ b/sdk/dotnet/MlflowModel.cs @@ -70,7 +70,7 @@ namespace Pulumi.Databricks public partial class MlflowModel : global::Pulumi.CustomResource { [Output("creationTimestamp")] - public Output CreationTimestamp { get; private set; } = null!; + public Output CreationTimestamp { get; private set; } = null!; /// /// The description of the MLflow model. @@ -79,7 +79,7 @@ public partial class MlflowModel : global::Pulumi.CustomResource public Output Description { get; private set; } = null!; [Output("lastUpdatedTimestamp")] - public Output LastUpdatedTimestamp { get; private set; } = null!; + public Output LastUpdatedTimestamp { get; private set; } = null!; /// /// Name of MLflow model. Change of name triggers new resource. @@ -87,9 +87,6 @@ public partial class MlflowModel : global::Pulumi.CustomResource [Output("name")] public Output Name { get; private set; } = null!; - [Output("registeredModelId")] - public Output RegisteredModelId { get; private set; } = null!; - /// /// Tags for the MLflow model. /// @@ -97,7 +94,7 @@ public partial class MlflowModel : global::Pulumi.CustomResource public Output> Tags { get; private set; } = null!; [Output("userId")] - public Output UserId { get; private set; } = null!; + public Output UserId { get; private set; } = null!; /// @@ -163,9 +160,6 @@ public sealed class MlflowModelArgs : global::Pulumi.ResourceArgs [Input("name")] public Input? Name { get; set; } - [Input("registeredModelId")] - public Input? RegisteredModelId { get; set; } - [Input("tags")] private InputList? _tags; @@ -207,9 +201,6 @@ public sealed class MlflowModelState : global::Pulumi.ResourceArgs [Input("name")] public Input? Name { get; set; } - [Input("registeredModelId")] - public Input? RegisteredModelId { get; set; } - [Input("tags")] private InputList? _tags; diff --git a/sdk/dotnet/MwsCustomerManagedKeys.cs b/sdk/dotnet/MwsCustomerManagedKeys.cs index 79e285cc..99e51d61 100644 --- a/sdk/dotnet/MwsCustomerManagedKeys.cs +++ b/sdk/dotnet/MwsCustomerManagedKeys.cs @@ -13,9 +13,11 @@ namespace Pulumi.Databricks /// ## Example Usage /// /// > **Note** If you've used the resource before, please add `use_cases = ["MANAGED_SERVICES"]` to keep the previous behaviour. + /// /// ### Customer-managed key for managed services /// /// You must configure this during workspace creation + /// ### For AWS /// /// ```csharp /// using System.Collections.Generic; @@ -113,7 +115,36 @@ namespace Pulumi.Databricks /// /// }); /// ``` + /// ### For GCP + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var config = new Config(); + /// var databricksAccountId = config.RequireObject<dynamic>("databricksAccountId"); + /// var cmekResourceId = config.RequireObject<dynamic>("cmekResourceId"); + /// var managedServices = new Databricks.MwsCustomerManagedKeys("managedServices", new() + /// { + /// AccountId = databricksAccountId, + /// GcpKeyInfo = new Databricks.Inputs.MwsCustomerManagedKeysGcpKeyInfoArgs + /// { + /// KmsKeyId = cmekResourceId, + /// }, + /// UseCases = new[] + /// { + /// "MANAGED_SERVICES", + /// }, + /// }); + /// + /// }); + /// ``` /// ### Customer-managed key for workspace storage + /// ### For AWS /// /// ```csharp /// using System.Collections.Generic; @@ -290,6 +321,34 @@ namespace Pulumi.Databricks /// /// }); /// ``` + /// ### For GCP + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Databricks = Pulumi.Databricks; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var config = new Config(); + /// var databricksAccountId = config.RequireObject<dynamic>("databricksAccountId"); + /// var cmekResourceId = config.RequireObject<dynamic>("cmekResourceId"); + /// var storage = new Databricks.MwsCustomerManagedKeys("storage", new() + /// { + /// AccountId = databricksAccountId, + /// GcpKeyInfo = new Databricks.Inputs.MwsCustomerManagedKeysGcpKeyInfoArgs + /// { + /// KmsKeyId = cmekResourceId, + /// }, + /// UseCases = new[] + /// { + /// "STORAGE", + /// }, + /// }); + /// + /// }); + /// ``` /// ## Related Resources /// /// The following resources are used in the same context: @@ -315,7 +374,7 @@ public partial class MwsCustomerManagedKeys : global::Pulumi.CustomResource public Output AccountId { get; private set; } = null!; /// - /// This field is a block and is documented below. + /// This field is a block and is documented below. This conflicts with `gcp_key_info` /// [Output("awsKeyInfo")] public Output AwsKeyInfo { get; private set; } = null!; @@ -332,6 +391,9 @@ public partial class MwsCustomerManagedKeys : global::Pulumi.CustomResource [Output("customerManagedKeyId")] public Output CustomerManagedKeyId { get; private set; } = null!; + /// + /// This field is a block and is documented below. This conflicts with `aws_key_info` + /// [Output("gcpKeyInfo")] public Output GcpKeyInfo { get; private set; } = null!; @@ -394,7 +456,7 @@ public sealed class MwsCustomerManagedKeysArgs : global::Pulumi.ResourceArgs public Input AccountId { get; set; } = null!; /// - /// This field is a block and is documented below. + /// This field is a block and is documented below. This conflicts with `gcp_key_info` /// [Input("awsKeyInfo")] public Input? AwsKeyInfo { get; set; } @@ -411,6 +473,9 @@ public sealed class MwsCustomerManagedKeysArgs : global::Pulumi.ResourceArgs [Input("customerManagedKeyId")] public Input? CustomerManagedKeyId { get; set; } + /// + /// This field is a block and is documented below. This conflicts with `aws_key_info` + /// [Input("gcpKeyInfo")] public Input? GcpKeyInfo { get; set; } @@ -441,7 +506,7 @@ public sealed class MwsCustomerManagedKeysState : global::Pulumi.ResourceArgs public Input? AccountId { get; set; } /// - /// This field is a block and is documented below. + /// This field is a block and is documented below. This conflicts with `gcp_key_info` /// [Input("awsKeyInfo")] public Input? AwsKeyInfo { get; set; } @@ -458,6 +523,9 @@ public sealed class MwsCustomerManagedKeysState : global::Pulumi.ResourceArgs [Input("customerManagedKeyId")] public Input? CustomerManagedKeyId { get; set; } + /// + /// This field is a block and is documented below. This conflicts with `aws_key_info` + /// [Input("gcpKeyInfo")] public Input? GcpKeyInfo { get; set; } diff --git a/sdk/dotnet/MwsWorkspaces.cs b/sdk/dotnet/MwsWorkspaces.cs index d9a13acd..95310ddb 100644 --- a/sdk/dotnet/MwsWorkspaces.cs +++ b/sdk/dotnet/MwsWorkspaces.cs @@ -104,6 +104,9 @@ public partial class MwsWorkspaces : global::Pulumi.CustomResource [Output("storageConfigurationId")] public Output StorageConfigurationId { get; private set; } = null!; + /// + /// `customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. + /// [Output("storageCustomerManagedKeyId")] public Output StorageCustomerManagedKeyId { get; private set; } = null!; @@ -287,6 +290,9 @@ public Input? AccountId [Input("storageConfigurationId")] public Input? StorageConfigurationId { get; set; } + /// + /// `customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. + /// [Input("storageCustomerManagedKeyId")] public Input? StorageCustomerManagedKeyId { get; set; } @@ -428,6 +434,9 @@ public Input? AccountId [Input("storageConfigurationId")] public Input? StorageConfigurationId { get; set; } + /// + /// `customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. + /// [Input("storageCustomerManagedKeyId")] public Input? StorageCustomerManagedKeyId { get; set; } diff --git a/sdk/dotnet/Outputs/AccessControlRuleSetGrantRule.cs b/sdk/dotnet/Outputs/AccessControlRuleSetGrantRule.cs index 76ae3020..cc86e465 100644 --- a/sdk/dotnet/Outputs/AccessControlRuleSetGrantRule.cs +++ b/sdk/dotnet/Outputs/AccessControlRuleSetGrantRule.cs @@ -21,9 +21,10 @@ public sealed class AccessControlRuleSetGrantRule /// public readonly ImmutableArray Principals; /// - /// Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles). + /// Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles) or [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page). /// * `roles/servicePrincipal.manager` - Manager of a service principal. /// * `roles/servicePrincipal.user` - User of a service principal. + /// * `roles/group.manager` - Manager of a group. /// public readonly string Role; diff --git a/sdk/dotnet/Outputs/ExternalLocationEncryptionDetails.cs b/sdk/dotnet/Outputs/ExternalLocationEncryptionDetails.cs new file mode 100644 index 00000000..dceb8408 --- /dev/null +++ b/sdk/dotnet/Outputs/ExternalLocationEncryptionDetails.cs @@ -0,0 +1,24 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Outputs +{ + + [OutputType] + public sealed class ExternalLocationEncryptionDetails + { + public readonly Outputs.ExternalLocationEncryptionDetailsSseEncryptionDetails? SseEncryptionDetails; + + [OutputConstructor] + private ExternalLocationEncryptionDetails(Outputs.ExternalLocationEncryptionDetailsSseEncryptionDetails? sseEncryptionDetails) + { + SseEncryptionDetails = sseEncryptionDetails; + } + } +} diff --git a/sdk/dotnet/Outputs/ExternalLocationEncryptionDetailsSseEncryptionDetails.cs b/sdk/dotnet/Outputs/ExternalLocationEncryptionDetailsSseEncryptionDetails.cs new file mode 100644 index 00000000..d0bc31ba --- /dev/null +++ b/sdk/dotnet/Outputs/ExternalLocationEncryptionDetailsSseEncryptionDetails.cs @@ -0,0 +1,29 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Outputs +{ + + [OutputType] + public sealed class ExternalLocationEncryptionDetailsSseEncryptionDetails + { + public readonly string? Algorithm; + public readonly string? AwsKmsKeyArn; + + [OutputConstructor] + private ExternalLocationEncryptionDetailsSseEncryptionDetails( + string? algorithm, + + string? awsKmsKeyArn) + { + Algorithm = algorithm; + AwsKmsKeyArn = awsKmsKeyArn; + } + } +} diff --git a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsRunJobTaskResult.cs b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsRunJobTaskResult.cs index dd8a7475..18922f92 100644 --- a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsRunJobTaskResult.cs +++ b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsRunJobTaskResult.cs @@ -13,12 +13,12 @@ namespace Pulumi.Databricks.Outputs [OutputType] public sealed class GetJobJobSettingsSettingsRunJobTaskResult { - public readonly string JobId; + public readonly int JobId; public readonly ImmutableDictionary? JobParameters; [OutputConstructor] private GetJobJobSettingsSettingsRunJobTaskResult( - string jobId, + int jobId, ImmutableDictionary? jobParameters) { diff --git a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskRunJobTaskResult.cs b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskRunJobTaskResult.cs index 433ff661..76242060 100644 --- a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskRunJobTaskResult.cs +++ b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskRunJobTaskResult.cs @@ -13,12 +13,12 @@ namespace Pulumi.Databricks.Outputs [OutputType] public sealed class GetJobJobSettingsSettingsTaskRunJobTaskResult { - public readonly string JobId; + public readonly int JobId; public readonly ImmutableDictionary? JobParameters; [OutputConstructor] private GetJobJobSettingsSettingsTaskRunJobTaskResult( - string jobId, + int jobId, ImmutableDictionary? jobParameters) { diff --git a/sdk/dotnet/Outputs/JobRunJobTask.cs b/sdk/dotnet/Outputs/JobRunJobTask.cs index c75f9aee..93f284f6 100644 --- a/sdk/dotnet/Outputs/JobRunJobTask.cs +++ b/sdk/dotnet/Outputs/JobRunJobTask.cs @@ -16,7 +16,7 @@ public sealed class JobRunJobTask /// /// (String) ID of the job /// - public readonly string JobId; + public readonly int JobId; /// /// (Map) Job parameters for the task /// @@ -24,7 +24,7 @@ public sealed class JobRunJobTask [OutputConstructor] private JobRunJobTask( - string jobId, + int jobId, ImmutableDictionary? jobParameters) { diff --git a/sdk/dotnet/Outputs/JobTaskRunJobTask.cs b/sdk/dotnet/Outputs/JobTaskRunJobTask.cs index ac74cb9e..9da05f3f 100644 --- a/sdk/dotnet/Outputs/JobTaskRunJobTask.cs +++ b/sdk/dotnet/Outputs/JobTaskRunJobTask.cs @@ -16,7 +16,7 @@ public sealed class JobTaskRunJobTask /// /// (String) ID of the job /// - public readonly string JobId; + public readonly int JobId; /// /// (Map) Job parameters for the task /// @@ -24,7 +24,7 @@ public sealed class JobTaskRunJobTask [OutputConstructor] private JobTaskRunJobTask( - string jobId, + int jobId, ImmutableDictionary? jobParameters) { diff --git a/sdk/dotnet/Outputs/MetastoreDataAccessAwsIamRole.cs b/sdk/dotnet/Outputs/MetastoreDataAccessAwsIamRole.cs index b689113a..97dab4aa 100644 --- a/sdk/dotnet/Outputs/MetastoreDataAccessAwsIamRole.cs +++ b/sdk/dotnet/Outputs/MetastoreDataAccessAwsIamRole.cs @@ -16,7 +16,7 @@ public sealed class MetastoreDataAccessAwsIamRole /// /// The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF` /// - /// `azure_service_principal` optional configuration block for credential details for Azure: + /// `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure (Recommended): /// public readonly string RoleArn; diff --git a/sdk/dotnet/Outputs/MetastoreDataAccessAzureServicePrincipal.cs b/sdk/dotnet/Outputs/MetastoreDataAccessAzureServicePrincipal.cs index f4bb0780..515f762e 100644 --- a/sdk/dotnet/Outputs/MetastoreDataAccessAzureServicePrincipal.cs +++ b/sdk/dotnet/Outputs/MetastoreDataAccessAzureServicePrincipal.cs @@ -19,8 +19,6 @@ public sealed class MetastoreDataAccessAzureServicePrincipal public readonly string ApplicationId; /// /// The client secret generated for the above app ID in AAD. **This field is redacted on output** - /// - /// `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure: /// public readonly string ClientSecret; /// diff --git a/sdk/dotnet/Outputs/MetastoreDataAccessDatabricksGcpServiceAccount.cs b/sdk/dotnet/Outputs/MetastoreDataAccessDatabricksGcpServiceAccount.cs index ac23c103..8aaab037 100644 --- a/sdk/dotnet/Outputs/MetastoreDataAccessDatabricksGcpServiceAccount.cs +++ b/sdk/dotnet/Outputs/MetastoreDataAccessDatabricksGcpServiceAccount.cs @@ -15,6 +15,8 @@ public sealed class MetastoreDataAccessDatabricksGcpServiceAccount { /// /// The email of the GCP service account created, to be granted access to relevant buckets. + /// + /// `azure_service_principal` optional configuration block for credential details for Azure (Legacy): /// public readonly string? Email; diff --git a/sdk/dotnet/Outputs/MetastoreDataAccessGcpServiceAccountKey.cs b/sdk/dotnet/Outputs/MetastoreDataAccessGcpServiceAccountKey.cs index ae3cdb32..dc3b9af2 100644 --- a/sdk/dotnet/Outputs/MetastoreDataAccessGcpServiceAccountKey.cs +++ b/sdk/dotnet/Outputs/MetastoreDataAccessGcpServiceAccountKey.cs @@ -15,6 +15,8 @@ public sealed class MetastoreDataAccessGcpServiceAccountKey { /// /// The email of the GCP service account created, to be granted access to relevant buckets. + /// + /// `azure_service_principal` optional configuration block for credential details for Azure (Legacy): /// public readonly string Email; public readonly string PrivateKey; diff --git a/sdk/dotnet/Outputs/MlflowModelTag.cs b/sdk/dotnet/Outputs/MlflowModelTag.cs index b5d8e2a7..5e5e7097 100644 --- a/sdk/dotnet/Outputs/MlflowModelTag.cs +++ b/sdk/dotnet/Outputs/MlflowModelTag.cs @@ -13,14 +13,14 @@ namespace Pulumi.Databricks.Outputs [OutputType] public sealed class MlflowModelTag { - public readonly string Key; - public readonly string Value; + public readonly string? Key; + public readonly string? Value; [OutputConstructor] private MlflowModelTag( - string key, + string? key, - string value) + string? value) { Key = key; Value = value; diff --git a/sdk/dotnet/Outputs/ModelServingConfigServedModel.cs b/sdk/dotnet/Outputs/ModelServingConfigServedModel.cs index 24d0c170..572dad57 100644 --- a/sdk/dotnet/Outputs/ModelServingConfigServedModel.cs +++ b/sdk/dotnet/Outputs/ModelServingConfigServedModel.cs @@ -13,7 +13,13 @@ namespace Pulumi.Databricks.Outputs [OutputType] public sealed class ModelServingConfigServedModel { + /// + /// a map of environment variable name/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`. + /// public readonly ImmutableDictionary? EnvironmentVars; + /// + /// ARN of the instance profile that the served model will use to access AWS resources. + /// public readonly string? InstanceProfileArn; /// /// The name of the model in Databricks Model Registry to be served. diff --git a/sdk/dotnet/Outputs/MwsCustomerManagedKeysGcpKeyInfo.cs b/sdk/dotnet/Outputs/MwsCustomerManagedKeysGcpKeyInfo.cs index c1226ca9..c0749f7e 100644 --- a/sdk/dotnet/Outputs/MwsCustomerManagedKeysGcpKeyInfo.cs +++ b/sdk/dotnet/Outputs/MwsCustomerManagedKeysGcpKeyInfo.cs @@ -13,6 +13,9 @@ namespace Pulumi.Databricks.Outputs [OutputType] public sealed class MwsCustomerManagedKeysGcpKeyInfo { + /// + /// The GCP KMS key's resource name. + /// public readonly string KmsKeyId; [OutputConstructor] diff --git a/sdk/dotnet/Outputs/MwsWorkspacesToken.cs b/sdk/dotnet/Outputs/MwsWorkspacesToken.cs index 13d8c8fd..4a46eb8a 100644 --- a/sdk/dotnet/Outputs/MwsWorkspacesToken.cs +++ b/sdk/dotnet/Outputs/MwsWorkspacesToken.cs @@ -14,6 +14,9 @@ namespace Pulumi.Databricks.Outputs public sealed class MwsWorkspacesToken { public readonly string? Comment; + /// + /// Token expiry lifetime. By default its 2592000 (30 days). + /// public readonly int? LifetimeSeconds; public readonly string? TokenId; public readonly string? TokenValue; diff --git a/sdk/dotnet/Outputs/SqlTableColumn.cs b/sdk/dotnet/Outputs/SqlTableColumn.cs index d5d9697d..16beb93a 100644 --- a/sdk/dotnet/Outputs/SqlTableColumn.cs +++ b/sdk/dotnet/Outputs/SqlTableColumn.cs @@ -26,9 +26,9 @@ public sealed class SqlTableColumn /// public readonly bool? Nullable; /// - /// Column type spec (with metadata) as SQL text + /// Column type spec (with metadata) as SQL text. Not supported for `VIEW` table_type. /// - public readonly string Type; + public readonly string? Type; [OutputConstructor] private SqlTableColumn( @@ -38,7 +38,7 @@ private SqlTableColumn( bool? nullable, - string type) + string? type) { Comment = comment; Name = name; diff --git a/sdk/dotnet/Share.cs b/sdk/dotnet/Share.cs index f58653c1..a7c0b435 100644 --- a/sdk/dotnet/Share.cs +++ b/sdk/dotnet/Share.cs @@ -33,6 +33,12 @@ public partial class Share : global::Pulumi.CustomResource [Output("objects")] public Output> Objects { get; private set; } = null!; + /// + /// User name/group name/sp application_id of the share owner. + /// + [Output("owner")] + public Output Owner { get; private set; } = null!; + /// /// Create a Share resource with the given unique name, arguments, and options. @@ -105,6 +111,12 @@ public InputList Objects set => _objects = value; } + /// + /// User name/group name/sp application_id of the share owner. + /// + [Input("owner")] + public Input? Owner { get; set; } + public ShareArgs() { } @@ -139,6 +151,12 @@ public InputList Objects set => _objects = value; } + /// + /// User name/group name/sp application_id of the share owner. + /// + [Input("owner")] + public Input? Owner { get; set; } + public ShareState() { } diff --git a/sdk/dotnet/SqlAlert.cs b/sdk/dotnet/SqlAlert.cs index 088551bc..c35983d8 100644 --- a/sdk/dotnet/SqlAlert.cs +++ b/sdk/dotnet/SqlAlert.cs @@ -26,6 +26,9 @@ namespace Pulumi.Databricks [DatabricksResourceType("databricks:index/sqlAlert:SqlAlert")] public partial class SqlAlert : global::Pulumi.CustomResource { + [Output("createdAt")] + public Output CreatedAt { get; private set; } = null!; + /// /// Name of the alert. /// @@ -56,6 +59,9 @@ public partial class SqlAlert : global::Pulumi.CustomResource [Output("rearm")] public Output Rearm { get; private set; } = null!; + [Output("updatedAt")] + public Output UpdatedAt { get; private set; } = null!; + /// /// Create a SqlAlert resource with the given unique name, arguments, and options. @@ -102,6 +108,9 @@ public static SqlAlert Get(string name, Input id, SqlAlertState? state = public sealed class SqlAlertArgs : global::Pulumi.ResourceArgs { + [Input("createdAt")] + public Input? CreatedAt { get; set; } + /// /// Name of the alert. /// @@ -132,6 +141,9 @@ public sealed class SqlAlertArgs : global::Pulumi.ResourceArgs [Input("rearm")] public Input? Rearm { get; set; } + [Input("updatedAt")] + public Input? UpdatedAt { get; set; } + public SqlAlertArgs() { } @@ -140,6 +152,9 @@ public SqlAlertArgs() public sealed class SqlAlertState : global::Pulumi.ResourceArgs { + [Input("createdAt")] + public Input? CreatedAt { get; set; } + /// /// Name of the alert. /// @@ -170,6 +185,9 @@ public sealed class SqlAlertState : global::Pulumi.ResourceArgs [Input("rearm")] public Input? Rearm { get; set; } + [Input("updatedAt")] + public Input? UpdatedAt { get; set; } + public SqlAlertState() { } diff --git a/sdk/dotnet/SqlDashboard.cs b/sdk/dotnet/SqlDashboard.cs index 31eba3c6..9002263e 100644 --- a/sdk/dotnet/SqlDashboard.cs +++ b/sdk/dotnet/SqlDashboard.cs @@ -89,6 +89,9 @@ namespace Pulumi.Databricks [DatabricksResourceType("databricks:index/sqlDashboard:SqlDashboard")] public partial class SqlDashboard : global::Pulumi.CustomResource { + [Output("createdAt")] + public Output CreatedAt { get; private set; } = null!; + [Output("name")] public Output Name { get; private set; } = null!; @@ -98,6 +101,9 @@ public partial class SqlDashboard : global::Pulumi.CustomResource [Output("tags")] public Output> Tags { get; private set; } = null!; + [Output("updatedAt")] + public Output UpdatedAt { get; private set; } = null!; + /// /// Create a SqlDashboard resource with the given unique name, arguments, and options. @@ -144,6 +150,9 @@ public static SqlDashboard Get(string name, Input id, SqlDashboardState? public sealed class SqlDashboardArgs : global::Pulumi.ResourceArgs { + [Input("createdAt")] + public Input? CreatedAt { get; set; } + [Input("name")] public Input? Name { get; set; } @@ -158,6 +167,9 @@ public InputList Tags set => _tags = value; } + [Input("updatedAt")] + public Input? UpdatedAt { get; set; } + public SqlDashboardArgs() { } @@ -166,6 +178,9 @@ public SqlDashboardArgs() public sealed class SqlDashboardState : global::Pulumi.ResourceArgs { + [Input("createdAt")] + public Input? CreatedAt { get; set; } + [Input("name")] public Input? Name { get; set; } @@ -180,6 +195,9 @@ public InputList Tags set => _tags = value; } + [Input("updatedAt")] + public Input? UpdatedAt { get; set; } + public SqlDashboardState() { } diff --git a/sdk/dotnet/SqlQuery.cs b/sdk/dotnet/SqlQuery.cs index 71428050..77db8a6c 100644 --- a/sdk/dotnet/SqlQuery.cs +++ b/sdk/dotnet/SqlQuery.cs @@ -151,6 +151,9 @@ namespace Pulumi.Databricks [DatabricksResourceType("databricks:index/sqlQuery:SqlQuery")] public partial class SqlQuery : global::Pulumi.CustomResource { + [Output("createdAt")] + public Output CreatedAt { get; private set; } = null!; + [Output("dataSourceId")] public Output DataSourceId { get; private set; } = null!; @@ -178,6 +181,9 @@ public partial class SqlQuery : global::Pulumi.CustomResource [Output("tags")] public Output> Tags { get; private set; } = null!; + [Output("updatedAt")] + public Output UpdatedAt { get; private set; } = null!; + /// /// Create a SqlQuery resource with the given unique name, arguments, and options. @@ -224,6 +230,9 @@ public static SqlQuery Get(string name, Input id, SqlQueryState? state = public sealed class SqlQueryArgs : global::Pulumi.ResourceArgs { + [Input("createdAt")] + public Input? CreatedAt { get; set; } + [Input("dataSourceId", required: true)] public Input DataSourceId { get; set; } = null!; @@ -261,6 +270,9 @@ public InputList Tags set => _tags = value; } + [Input("updatedAt")] + public Input? UpdatedAt { get; set; } + public SqlQueryArgs() { } @@ -269,6 +281,9 @@ public SqlQueryArgs() public sealed class SqlQueryState : global::Pulumi.ResourceArgs { + [Input("createdAt")] + public Input? CreatedAt { get; set; } + [Input("dataSourceId")] public Input? DataSourceId { get; set; } @@ -306,6 +321,9 @@ public InputList Tags set => _tags = value; } + [Input("updatedAt")] + public Input? UpdatedAt { get; set; } + public SqlQueryState() { } diff --git a/sdk/dotnet/StorageCredential.cs b/sdk/dotnet/StorageCredential.cs index 393a97f5..f496b83b 100644 --- a/sdk/dotnet/StorageCredential.cs +++ b/sdk/dotnet/StorageCredential.cs @@ -153,6 +153,9 @@ public partial class StorageCredential : global::Pulumi.CustomResource [Output("databricksGcpServiceAccount")] public Output DatabricksGcpServiceAccount { get; private set; } = null!; + [Output("forceDestroy")] + public Output ForceDestroy { get; private set; } = null!; + [Output("gcpServiceAccountKey")] public Output GcpServiceAccountKey { get; private set; } = null!; @@ -240,6 +243,9 @@ public sealed class StorageCredentialArgs : global::Pulumi.ResourceArgs [Input("databricksGcpServiceAccount")] public Input? DatabricksGcpServiceAccount { get; set; } + [Input("forceDestroy")] + public Input? ForceDestroy { get; set; } + [Input("gcpServiceAccountKey")] public Input? GcpServiceAccountKey { get; set; } @@ -289,6 +295,9 @@ public sealed class StorageCredentialState : global::Pulumi.ResourceArgs [Input("databricksGcpServiceAccount")] public Input? DatabricksGcpServiceAccount { get; set; } + [Input("forceDestroy")] + public Input? ForceDestroy { get; set; } + [Input("gcpServiceAccountKey")] public Input? GcpServiceAccountKey { get; set; } diff --git a/sdk/go/databricks/accessControlRuleSet.go b/sdk/go/databricks/accessControlRuleSet.go index 95e942f9..356849d6 100644 --- a/sdk/go/databricks/accessControlRuleSet.go +++ b/sdk/go/databricks/accessControlRuleSet.go @@ -13,10 +13,293 @@ import ( // This resource allows you to manage access rules on Databricks account level resources. For convenience we allow accessing this resource through the Databricks account and workspace. // -// > **Note** Currently, we only support managing access rules on service principal resources through `AccessControlRuleSet`. +// > **Note** Currently, we only support managing access rules on service principal, group and account resources through `AccessControlRuleSet`. // // > **Warning** `AccessControlRuleSet` cannot be used to manage access rules for resources supported by databricks_permissions. Refer to its documentation for more information. // +// ## Service principal rule set usage +// +// Through a Databricks workspace: +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-databricks/sdk/go/databricks" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _ := "00000000-0000-0000-0000-000000000000" +// ds, err := databricks.LookupGroup(ctx, &databricks.LookupGroupArgs{ +// DisplayName: "Data Science", +// }, nil) +// if err != nil { +// return err +// } +// _, err = databricks.NewServicePrincipal(ctx, "automationSp", &databricks.ServicePrincipalArgs{ +// DisplayName: pulumi.String("SP_FOR_AUTOMATION"), +// }) +// if err != nil { +// return err +// } +// _, err = databricks.NewAccessControlRuleSet(ctx, "automationSpRuleSet", &databricks.AccessControlRuleSetArgs{ +// GrantRules: databricks.AccessControlRuleSetGrantRuleArray{ +// &databricks.AccessControlRuleSetGrantRuleArgs{ +// Principals: pulumi.StringArray{ +// *pulumi.String(ds.AclPrincipalId), +// }, +// Role: pulumi.String("roles/servicePrincipal.user"), +// }, +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// +// Through AWS Databricks account: +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-databricks/sdk/go/databricks" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _ := "00000000-0000-0000-0000-000000000000" +// ds, err := databricks.NewGroup(ctx, "ds", nil) +// if err != nil { +// return err +// } +// _, err = databricks.NewServicePrincipal(ctx, "automationSp", &databricks.ServicePrincipalArgs{ +// DisplayName: pulumi.String("SP_FOR_AUTOMATION"), +// }) +// if err != nil { +// return err +// } +// _, err = databricks.NewAccessControlRuleSet(ctx, "automationSpRuleSet", &databricks.AccessControlRuleSetArgs{ +// GrantRules: databricks.AccessControlRuleSetGrantRuleArray{ +// &databricks.AccessControlRuleSetGrantRuleArgs{ +// Principals: pulumi.StringArray{ +// ds.AclPrincipalId, +// }, +// Role: pulumi.String("roles/servicePrincipal.user"), +// }, +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// +// Through Azure Databricks account: +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-databricks/sdk/go/databricks" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _ := "00000000-0000-0000-0000-000000000000" +// ds, err := databricks.NewGroup(ctx, "ds", nil) +// if err != nil { +// return err +// } +// _, err = databricks.NewServicePrincipal(ctx, "automationSp", &databricks.ServicePrincipalArgs{ +// ApplicationId: pulumi.String("00000000-0000-0000-0000-000000000000"), +// DisplayName: pulumi.String("SP_FOR_AUTOMATION"), +// }) +// if err != nil { +// return err +// } +// _, err = databricks.NewAccessControlRuleSet(ctx, "automationSpRuleSet", &databricks.AccessControlRuleSetArgs{ +// GrantRules: databricks.AccessControlRuleSetGrantRuleArray{ +// &databricks.AccessControlRuleSetGrantRuleArgs{ +// Principals: pulumi.StringArray{ +// ds.AclPrincipalId, +// }, +// Role: pulumi.String("roles/servicePrincipal.user"), +// }, +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// +// Through GCP Databricks account: +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-databricks/sdk/go/databricks" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _ := "00000000-0000-0000-0000-000000000000" +// ds, err := databricks.NewGroup(ctx, "ds", nil) +// if err != nil { +// return err +// } +// _, err = databricks.NewServicePrincipal(ctx, "automationSp", &databricks.ServicePrincipalArgs{ +// DisplayName: pulumi.String("SP_FOR_AUTOMATION"), +// }) +// if err != nil { +// return err +// } +// _, err = databricks.NewAccessControlRuleSet(ctx, "automationSpRuleSet", &databricks.AccessControlRuleSetArgs{ +// GrantRules: databricks.AccessControlRuleSetGrantRuleArray{ +// &databricks.AccessControlRuleSetGrantRuleArgs{ +// Principals: pulumi.StringArray{ +// ds.AclPrincipalId, +// }, +// Role: pulumi.String("roles/servicePrincipal.user"), +// }, +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// +// ## Group rule set usage +// +// Refer to the appropriate provider configuration as shown in the examples for service principal rule set. +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-databricks/sdk/go/databricks" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _ := "00000000-0000-0000-0000-000000000000" +// _, err := databricks.LookupGroup(ctx, &databricks.LookupGroupArgs{ +// DisplayName: "Data Science", +// }, nil) +// if err != nil { +// return err +// } +// john, err := databricks.LookupUser(ctx, &databricks.LookupUserArgs{ +// UserName: pulumi.StringRef("john.doe@example.com"), +// }, nil) +// if err != nil { +// return err +// } +// _, err = databricks.NewAccessControlRuleSet(ctx, "dsGroupRuleSet", &databricks.AccessControlRuleSetArgs{ +// GrantRules: databricks.AccessControlRuleSetGrantRuleArray{ +// &databricks.AccessControlRuleSetGrantRuleArgs{ +// Principals: pulumi.StringArray{ +// *pulumi.String(john.AclPrincipalId), +// }, +// Role: pulumi.String("roles/group.manager"), +// }, +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// +// ## Account rule set usage +// +// Refer to the appropriate provider configuration as shown in the examples for service principal rule set. +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-databricks/sdk/go/databricks" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _ := "00000000-0000-0000-0000-000000000000" +// _, err := databricks.LookupGroup(ctx, &databricks.LookupGroupArgs{ +// DisplayName: "Data Science", +// }, nil) +// if err != nil { +// return err +// } +// john, err := databricks.LookupUser(ctx, &databricks.LookupUserArgs{ +// UserName: pulumi.StringRef("john.doe@example.com"), +// }, nil) +// if err != nil { +// return err +// } +// _, err = databricks.NewAccessControlRuleSet(ctx, "accountRuleSet", &databricks.AccessControlRuleSetArgs{ +// GrantRules: databricks.AccessControlRuleSetGrantRuleArray{ +// &databricks.AccessControlRuleSetGrantRuleArgs{ +// Principals: pulumi.StringArray{ +// *pulumi.String(john.AclPrincipalId), +// }, +// Role: pulumi.String("roles/group.manager"), +// }, +// &databricks.AccessControlRuleSetGrantRuleArgs{ +// Principals: pulumi.StringArray{ +// data.Databricks_user.Ds.Acl_principal_id, +// }, +// Role: pulumi.String("roles/servicePrincipal.manager"), +// }, +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// // ## Related Resources // // The following resources are often used in the same context: @@ -34,6 +317,8 @@ type AccessControlRuleSet struct { GrantRules AccessControlRuleSetGrantRuleArrayOutput `pulumi:"grantRules"` // Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: // * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + // * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + // * `accounts/{account_id}/ruleSets/default` Name pulumi.StringOutput `pulumi:"name"` } @@ -74,6 +359,8 @@ type accessControlRuleSetState struct { GrantRules []AccessControlRuleSetGrantRule `pulumi:"grantRules"` // Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: // * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + // * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + // * `accounts/{account_id}/ruleSets/default` Name *string `pulumi:"name"` } @@ -85,6 +372,8 @@ type AccessControlRuleSetState struct { GrantRules AccessControlRuleSetGrantRuleArrayInput // Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: // * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + // * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + // * `accounts/{account_id}/ruleSets/default` Name pulumi.StringPtrInput } @@ -99,6 +388,8 @@ type accessControlRuleSetArgs struct { GrantRules []AccessControlRuleSetGrantRule `pulumi:"grantRules"` // Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: // * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + // * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + // * `accounts/{account_id}/ruleSets/default` Name *string `pulumi:"name"` } @@ -110,6 +401,8 @@ type AccessControlRuleSetArgs struct { GrantRules AccessControlRuleSetGrantRuleArrayInput // Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: // * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + // * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + // * `accounts/{account_id}/ruleSets/default` Name pulumi.StringPtrInput } @@ -213,6 +506,8 @@ func (o AccessControlRuleSetOutput) GrantRules() AccessControlRuleSetGrantRuleAr // Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: // * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` +// * `accounts/{account_id}/groups/{group_id}/ruleSets/default` +// * `accounts/{account_id}/ruleSets/default` func (o AccessControlRuleSetOutput) Name() pulumi.StringOutput { return o.ApplyT(func(v *AccessControlRuleSet) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput) } diff --git a/sdk/go/databricks/catalog.go b/sdk/go/databricks/catalog.go index a8935294..3301fed5 100644 --- a/sdk/go/databricks/catalog.go +++ b/sdk/go/databricks/catalog.go @@ -62,6 +62,8 @@ type Catalog struct { // User-supplied free-form text. Comment pulumi.StringPtrOutput `pulumi:"comment"` + // For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + ConnectionName pulumi.StringPtrOutput `pulumi:"connectionName"` // Delete catalog regardless of its contents. ForceDestroy pulumi.BoolPtrOutput `pulumi:"forceDestroy"` // Whether the catalog is accessible from all workspaces or a specific set of workspaces. Can be `ISOLATED` or `OPEN`. Setting the catalog to `ISOLATED` will automatically allow access from the current workspace. @@ -113,6 +115,8 @@ func GetCatalog(ctx *pulumi.Context, type catalogState struct { // User-supplied free-form text. Comment *string `pulumi:"comment"` + // For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + ConnectionName *string `pulumi:"connectionName"` // Delete catalog regardless of its contents. ForceDestroy *bool `pulumi:"forceDestroy"` // Whether the catalog is accessible from all workspaces or a specific set of workspaces. Can be `ISOLATED` or `OPEN`. Setting the catalog to `ISOLATED` will automatically allow access from the current workspace. @@ -135,6 +139,8 @@ type catalogState struct { type CatalogState struct { // User-supplied free-form text. Comment pulumi.StringPtrInput + // For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + ConnectionName pulumi.StringPtrInput // Delete catalog regardless of its contents. ForceDestroy pulumi.BoolPtrInput // Whether the catalog is accessible from all workspaces or a specific set of workspaces. Can be `ISOLATED` or `OPEN`. Setting the catalog to `ISOLATED` will automatically allow access from the current workspace. @@ -161,6 +167,8 @@ func (CatalogState) ElementType() reflect.Type { type catalogArgs struct { // User-supplied free-form text. Comment *string `pulumi:"comment"` + // For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + ConnectionName *string `pulumi:"connectionName"` // Delete catalog regardless of its contents. ForceDestroy *bool `pulumi:"forceDestroy"` // Whether the catalog is accessible from all workspaces or a specific set of workspaces. Can be `ISOLATED` or `OPEN`. Setting the catalog to `ISOLATED` will automatically allow access from the current workspace. @@ -184,6 +192,8 @@ type catalogArgs struct { type CatalogArgs struct { // User-supplied free-form text. Comment pulumi.StringPtrInput + // For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + ConnectionName pulumi.StringPtrInput // Delete catalog regardless of its contents. ForceDestroy pulumi.BoolPtrInput // Whether the catalog is accessible from all workspaces or a specific set of workspaces. Can be `ISOLATED` or `OPEN`. Setting the catalog to `ISOLATED` will automatically allow access from the current workspace. @@ -295,6 +305,11 @@ func (o CatalogOutput) Comment() pulumi.StringPtrOutput { return o.ApplyT(func(v *Catalog) pulumi.StringPtrOutput { return v.Comment }).(pulumi.StringPtrOutput) } +// For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. +func (o CatalogOutput) ConnectionName() pulumi.StringPtrOutput { + return o.ApplyT(func(v *Catalog) pulumi.StringPtrOutput { return v.ConnectionName }).(pulumi.StringPtrOutput) +} + // Delete catalog regardless of its contents. func (o CatalogOutput) ForceDestroy() pulumi.BoolPtrOutput { return o.ApplyT(func(v *Catalog) pulumi.BoolPtrOutput { return v.ForceDestroy }).(pulumi.BoolPtrOutput) diff --git a/sdk/go/databricks/connection.go b/sdk/go/databricks/connection.go new file mode 100644 index 00000000..593ab25e --- /dev/null +++ b/sdk/go/databricks/connection.go @@ -0,0 +1,373 @@ +// Code generated by the Pulumi Terraform Bridge (tfgen) Tool DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package databricks + +import ( + "context" + "reflect" + + "errors" + "github.com/pulumi/pulumi-databricks/sdk/go/databricks/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// Lakehouse Federation is the query federation platform for Databricks. Databricks uses Unity Catalog to manage query federation. To make a dataset available for read-only querying using Lakehouse Federation, you create the following: +// +// - A connection, a securable object in Unity Catalog that specifies a path and credentials for accessing an external database system. +// - A foreign catalog +// +// # This resource manages connections in Unity Catalog +// +// ## Example Usage +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-databricks/sdk/go/databricks" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := databricks.NewConnection(ctx, "mysql", &databricks.ConnectionArgs{ +// Comment: pulumi.String("this is a connection to mysql db"), +// ConnectionType: pulumi.String("MYSQL"), +// Options: pulumi.AnyMap{ +// "host": pulumi.Any("test.mysql.database.azure.com"), +// "password": pulumi.Any("password"), +// "port": pulumi.Any("3306"), +// "user": pulumi.Any("user"), +// }, +// Properties: pulumi.AnyMap{ +// "purpose": pulumi.Any("testing"), +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// +// ## Import +// +// # This resource can be imported by `name` bash +// +// ```sh +// +// $ pulumi import databricks:index/connection:Connection this +// +// ``` +type Connection struct { + pulumi.CustomResourceState + + // Free-form text. + Comment pulumi.StringPtrOutput `pulumi:"comment"` + // Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + ConnectionType pulumi.StringOutput `pulumi:"connectionType"` + MetastoreId pulumi.StringOutput `pulumi:"metastoreId"` + // Name of the Connection. + Name pulumi.StringOutput `pulumi:"name"` + // The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + Options pulumi.MapOutput `pulumi:"options"` + // Name of the connection owner. + Owner pulumi.StringPtrOutput `pulumi:"owner"` + // Free-form connection properties. + Properties pulumi.MapOutput `pulumi:"properties"` + ReadOnly pulumi.BoolOutput `pulumi:"readOnly"` +} + +// NewConnection registers a new resource with the given unique name, arguments, and options. +func NewConnection(ctx *pulumi.Context, + name string, args *ConnectionArgs, opts ...pulumi.ResourceOption) (*Connection, error) { + if args == nil { + return nil, errors.New("missing one or more required arguments") + } + + if args.ConnectionType == nil { + return nil, errors.New("invalid value for required argument 'ConnectionType'") + } + if args.Options == nil { + return nil, errors.New("invalid value for required argument 'Options'") + } + if args.Options != nil { + args.Options = pulumi.ToSecret(args.Options).(pulumi.MapInput) + } + secrets := pulumi.AdditionalSecretOutputs([]string{ + "options", + }) + opts = append(opts, secrets) + opts = internal.PkgResourceDefaultOpts(opts) + var resource Connection + err := ctx.RegisterResource("databricks:index/connection:Connection", name, args, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// GetConnection gets an existing Connection resource's state with the given name, ID, and optional +// state properties that are used to uniquely qualify the lookup (nil if not required). +func GetConnection(ctx *pulumi.Context, + name string, id pulumi.IDInput, state *ConnectionState, opts ...pulumi.ResourceOption) (*Connection, error) { + var resource Connection + err := ctx.ReadResource("databricks:index/connection:Connection", name, id, state, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// Input properties used for looking up and filtering Connection resources. +type connectionState struct { + // Free-form text. + Comment *string `pulumi:"comment"` + // Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + ConnectionType *string `pulumi:"connectionType"` + MetastoreId *string `pulumi:"metastoreId"` + // Name of the Connection. + Name *string `pulumi:"name"` + // The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + Options map[string]interface{} `pulumi:"options"` + // Name of the connection owner. + Owner *string `pulumi:"owner"` + // Free-form connection properties. + Properties map[string]interface{} `pulumi:"properties"` + ReadOnly *bool `pulumi:"readOnly"` +} + +type ConnectionState struct { + // Free-form text. + Comment pulumi.StringPtrInput + // Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + ConnectionType pulumi.StringPtrInput + MetastoreId pulumi.StringPtrInput + // Name of the Connection. + Name pulumi.StringPtrInput + // The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + Options pulumi.MapInput + // Name of the connection owner. + Owner pulumi.StringPtrInput + // Free-form connection properties. + Properties pulumi.MapInput + ReadOnly pulumi.BoolPtrInput +} + +func (ConnectionState) ElementType() reflect.Type { + return reflect.TypeOf((*connectionState)(nil)).Elem() +} + +type connectionArgs struct { + // Free-form text. + Comment *string `pulumi:"comment"` + // Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + ConnectionType string `pulumi:"connectionType"` + MetastoreId *string `pulumi:"metastoreId"` + // Name of the Connection. + Name *string `pulumi:"name"` + // The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + Options map[string]interface{} `pulumi:"options"` + // Name of the connection owner. + Owner *string `pulumi:"owner"` + // Free-form connection properties. + Properties map[string]interface{} `pulumi:"properties"` + ReadOnly *bool `pulumi:"readOnly"` +} + +// The set of arguments for constructing a Connection resource. +type ConnectionArgs struct { + // Free-form text. + Comment pulumi.StringPtrInput + // Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + ConnectionType pulumi.StringInput + MetastoreId pulumi.StringPtrInput + // Name of the Connection. + Name pulumi.StringPtrInput + // The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + Options pulumi.MapInput + // Name of the connection owner. + Owner pulumi.StringPtrInput + // Free-form connection properties. + Properties pulumi.MapInput + ReadOnly pulumi.BoolPtrInput +} + +func (ConnectionArgs) ElementType() reflect.Type { + return reflect.TypeOf((*connectionArgs)(nil)).Elem() +} + +type ConnectionInput interface { + pulumi.Input + + ToConnectionOutput() ConnectionOutput + ToConnectionOutputWithContext(ctx context.Context) ConnectionOutput +} + +func (*Connection) ElementType() reflect.Type { + return reflect.TypeOf((**Connection)(nil)).Elem() +} + +func (i *Connection) ToConnectionOutput() ConnectionOutput { + return i.ToConnectionOutputWithContext(context.Background()) +} + +func (i *Connection) ToConnectionOutputWithContext(ctx context.Context) ConnectionOutput { + return pulumi.ToOutputWithContext(ctx, i).(ConnectionOutput) +} + +// ConnectionArrayInput is an input type that accepts ConnectionArray and ConnectionArrayOutput values. +// You can construct a concrete instance of `ConnectionArrayInput` via: +// +// ConnectionArray{ ConnectionArgs{...} } +type ConnectionArrayInput interface { + pulumi.Input + + ToConnectionArrayOutput() ConnectionArrayOutput + ToConnectionArrayOutputWithContext(context.Context) ConnectionArrayOutput +} + +type ConnectionArray []ConnectionInput + +func (ConnectionArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]*Connection)(nil)).Elem() +} + +func (i ConnectionArray) ToConnectionArrayOutput() ConnectionArrayOutput { + return i.ToConnectionArrayOutputWithContext(context.Background()) +} + +func (i ConnectionArray) ToConnectionArrayOutputWithContext(ctx context.Context) ConnectionArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(ConnectionArrayOutput) +} + +// ConnectionMapInput is an input type that accepts ConnectionMap and ConnectionMapOutput values. +// You can construct a concrete instance of `ConnectionMapInput` via: +// +// ConnectionMap{ "key": ConnectionArgs{...} } +type ConnectionMapInput interface { + pulumi.Input + + ToConnectionMapOutput() ConnectionMapOutput + ToConnectionMapOutputWithContext(context.Context) ConnectionMapOutput +} + +type ConnectionMap map[string]ConnectionInput + +func (ConnectionMap) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*Connection)(nil)).Elem() +} + +func (i ConnectionMap) ToConnectionMapOutput() ConnectionMapOutput { + return i.ToConnectionMapOutputWithContext(context.Background()) +} + +func (i ConnectionMap) ToConnectionMapOutputWithContext(ctx context.Context) ConnectionMapOutput { + return pulumi.ToOutputWithContext(ctx, i).(ConnectionMapOutput) +} + +type ConnectionOutput struct{ *pulumi.OutputState } + +func (ConnectionOutput) ElementType() reflect.Type { + return reflect.TypeOf((**Connection)(nil)).Elem() +} + +func (o ConnectionOutput) ToConnectionOutput() ConnectionOutput { + return o +} + +func (o ConnectionOutput) ToConnectionOutputWithContext(ctx context.Context) ConnectionOutput { + return o +} + +// Free-form text. +func (o ConnectionOutput) Comment() pulumi.StringPtrOutput { + return o.ApplyT(func(v *Connection) pulumi.StringPtrOutput { return v.Comment }).(pulumi.StringPtrOutput) +} + +// Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) +func (o ConnectionOutput) ConnectionType() pulumi.StringOutput { + return o.ApplyT(func(v *Connection) pulumi.StringOutput { return v.ConnectionType }).(pulumi.StringOutput) +} + +func (o ConnectionOutput) MetastoreId() pulumi.StringOutput { + return o.ApplyT(func(v *Connection) pulumi.StringOutput { return v.MetastoreId }).(pulumi.StringOutput) +} + +// Name of the Connection. +func (o ConnectionOutput) Name() pulumi.StringOutput { + return o.ApplyT(func(v *Connection) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput) +} + +// The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. +func (o ConnectionOutput) Options() pulumi.MapOutput { + return o.ApplyT(func(v *Connection) pulumi.MapOutput { return v.Options }).(pulumi.MapOutput) +} + +// Name of the connection owner. +func (o ConnectionOutput) Owner() pulumi.StringPtrOutput { + return o.ApplyT(func(v *Connection) pulumi.StringPtrOutput { return v.Owner }).(pulumi.StringPtrOutput) +} + +// Free-form connection properties. +func (o ConnectionOutput) Properties() pulumi.MapOutput { + return o.ApplyT(func(v *Connection) pulumi.MapOutput { return v.Properties }).(pulumi.MapOutput) +} + +func (o ConnectionOutput) ReadOnly() pulumi.BoolOutput { + return o.ApplyT(func(v *Connection) pulumi.BoolOutput { return v.ReadOnly }).(pulumi.BoolOutput) +} + +type ConnectionArrayOutput struct{ *pulumi.OutputState } + +func (ConnectionArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]*Connection)(nil)).Elem() +} + +func (o ConnectionArrayOutput) ToConnectionArrayOutput() ConnectionArrayOutput { + return o +} + +func (o ConnectionArrayOutput) ToConnectionArrayOutputWithContext(ctx context.Context) ConnectionArrayOutput { + return o +} + +func (o ConnectionArrayOutput) Index(i pulumi.IntInput) ConnectionOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) *Connection { + return vs[0].([]*Connection)[vs[1].(int)] + }).(ConnectionOutput) +} + +type ConnectionMapOutput struct{ *pulumi.OutputState } + +func (ConnectionMapOutput) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*Connection)(nil)).Elem() +} + +func (o ConnectionMapOutput) ToConnectionMapOutput() ConnectionMapOutput { + return o +} + +func (o ConnectionMapOutput) ToConnectionMapOutputWithContext(ctx context.Context) ConnectionMapOutput { + return o +} + +func (o ConnectionMapOutput) MapIndex(k pulumi.StringInput) ConnectionOutput { + return pulumi.All(o, k).ApplyT(func(vs []interface{}) *Connection { + return vs[0].(map[string]*Connection)[vs[1].(string)] + }).(ConnectionOutput) +} + +func init() { + pulumi.RegisterInputType(reflect.TypeOf((*ConnectionInput)(nil)).Elem(), &Connection{}) + pulumi.RegisterInputType(reflect.TypeOf((*ConnectionArrayInput)(nil)).Elem(), ConnectionArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*ConnectionMapInput)(nil)).Elem(), ConnectionMap{}) + pulumi.RegisterOutputType(ConnectionOutput{}) + pulumi.RegisterOutputType(ConnectionArrayOutput{}) + pulumi.RegisterOutputType(ConnectionMapOutput{}) +} diff --git a/sdk/go/databricks/externalLocation.go b/sdk/go/databricks/externalLocation.go index 213ef01d..ddb0b6ef 100644 --- a/sdk/go/databricks/externalLocation.go +++ b/sdk/go/databricks/externalLocation.go @@ -29,16 +29,22 @@ import ( type ExternalLocation struct { pulumi.CustomResourceState + // The ARN of the s3 access point to use with the external location (AWS). + AccessPoint pulumi.StringPtrOutput `pulumi:"accessPoint"` // User-supplied free-form text. Comment pulumi.StringPtrOutput `pulumi:"comment"` - // Name of the StorageCredential to use with this External Location. + // Name of the StorageCredential to use with this external location. CredentialName pulumi.StringOutput `pulumi:"credentialName"` + // The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + EncryptionDetails ExternalLocationEncryptionDetailsPtrOutput `pulumi:"encryptionDetails"` // Destroy external location regardless of its dependents. ForceDestroy pulumi.BoolPtrOutput `pulumi:"forceDestroy"` - MetastoreId pulumi.StringOutput `pulumi:"metastoreId"` + // Update external location regardless of its dependents. + ForceUpdate pulumi.BoolPtrOutput `pulumi:"forceUpdate"` + MetastoreId pulumi.StringOutput `pulumi:"metastoreId"` // Name of External Location, which must be unique within the databricks_metastore. Change forces creation of a new resource. Name pulumi.StringOutput `pulumi:"name"` - // Username/groupname/sp applicationId of the external Location owner. + // Username/groupname/sp applicationId of the external location owner. Owner pulumi.StringOutput `pulumi:"owner"` // Indicates whether the external location is read-only. ReadOnly pulumi.BoolPtrOutput `pulumi:"readOnly"` @@ -84,16 +90,22 @@ func GetExternalLocation(ctx *pulumi.Context, // Input properties used for looking up and filtering ExternalLocation resources. type externalLocationState struct { + // The ARN of the s3 access point to use with the external location (AWS). + AccessPoint *string `pulumi:"accessPoint"` // User-supplied free-form text. Comment *string `pulumi:"comment"` - // Name of the StorageCredential to use with this External Location. + // Name of the StorageCredential to use with this external location. CredentialName *string `pulumi:"credentialName"` + // The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + EncryptionDetails *ExternalLocationEncryptionDetails `pulumi:"encryptionDetails"` // Destroy external location regardless of its dependents. - ForceDestroy *bool `pulumi:"forceDestroy"` - MetastoreId *string `pulumi:"metastoreId"` + ForceDestroy *bool `pulumi:"forceDestroy"` + // Update external location regardless of its dependents. + ForceUpdate *bool `pulumi:"forceUpdate"` + MetastoreId *string `pulumi:"metastoreId"` // Name of External Location, which must be unique within the databricks_metastore. Change forces creation of a new resource. Name *string `pulumi:"name"` - // Username/groupname/sp applicationId of the external Location owner. + // Username/groupname/sp applicationId of the external location owner. Owner *string `pulumi:"owner"` // Indicates whether the external location is read-only. ReadOnly *bool `pulumi:"readOnly"` @@ -104,16 +116,22 @@ type externalLocationState struct { } type ExternalLocationState struct { + // The ARN of the s3 access point to use with the external location (AWS). + AccessPoint pulumi.StringPtrInput // User-supplied free-form text. Comment pulumi.StringPtrInput - // Name of the StorageCredential to use with this External Location. + // Name of the StorageCredential to use with this external location. CredentialName pulumi.StringPtrInput + // The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + EncryptionDetails ExternalLocationEncryptionDetailsPtrInput // Destroy external location regardless of its dependents. ForceDestroy pulumi.BoolPtrInput - MetastoreId pulumi.StringPtrInput + // Update external location regardless of its dependents. + ForceUpdate pulumi.BoolPtrInput + MetastoreId pulumi.StringPtrInput // Name of External Location, which must be unique within the databricks_metastore. Change forces creation of a new resource. Name pulumi.StringPtrInput - // Username/groupname/sp applicationId of the external Location owner. + // Username/groupname/sp applicationId of the external location owner. Owner pulumi.StringPtrInput // Indicates whether the external location is read-only. ReadOnly pulumi.BoolPtrInput @@ -128,16 +146,22 @@ func (ExternalLocationState) ElementType() reflect.Type { } type externalLocationArgs struct { + // The ARN of the s3 access point to use with the external location (AWS). + AccessPoint *string `pulumi:"accessPoint"` // User-supplied free-form text. Comment *string `pulumi:"comment"` - // Name of the StorageCredential to use with this External Location. + // Name of the StorageCredential to use with this external location. CredentialName string `pulumi:"credentialName"` + // The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + EncryptionDetails *ExternalLocationEncryptionDetails `pulumi:"encryptionDetails"` // Destroy external location regardless of its dependents. - ForceDestroy *bool `pulumi:"forceDestroy"` - MetastoreId *string `pulumi:"metastoreId"` + ForceDestroy *bool `pulumi:"forceDestroy"` + // Update external location regardless of its dependents. + ForceUpdate *bool `pulumi:"forceUpdate"` + MetastoreId *string `pulumi:"metastoreId"` // Name of External Location, which must be unique within the databricks_metastore. Change forces creation of a new resource. Name *string `pulumi:"name"` - // Username/groupname/sp applicationId of the external Location owner. + // Username/groupname/sp applicationId of the external location owner. Owner *string `pulumi:"owner"` // Indicates whether the external location is read-only. ReadOnly *bool `pulumi:"readOnly"` @@ -149,16 +173,22 @@ type externalLocationArgs struct { // The set of arguments for constructing a ExternalLocation resource. type ExternalLocationArgs struct { + // The ARN of the s3 access point to use with the external location (AWS). + AccessPoint pulumi.StringPtrInput // User-supplied free-form text. Comment pulumi.StringPtrInput - // Name of the StorageCredential to use with this External Location. + // Name of the StorageCredential to use with this external location. CredentialName pulumi.StringInput + // The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + EncryptionDetails ExternalLocationEncryptionDetailsPtrInput // Destroy external location regardless of its dependents. ForceDestroy pulumi.BoolPtrInput - MetastoreId pulumi.StringPtrInput + // Update external location regardless of its dependents. + ForceUpdate pulumi.BoolPtrInput + MetastoreId pulumi.StringPtrInput // Name of External Location, which must be unique within the databricks_metastore. Change forces creation of a new resource. Name pulumi.StringPtrInput - // Username/groupname/sp applicationId of the external Location owner. + // Username/groupname/sp applicationId of the external location owner. Owner pulumi.StringPtrInput // Indicates whether the external location is read-only. ReadOnly pulumi.BoolPtrInput @@ -255,21 +285,36 @@ func (o ExternalLocationOutput) ToExternalLocationOutputWithContext(ctx context. return o } +// The ARN of the s3 access point to use with the external location (AWS). +func (o ExternalLocationOutput) AccessPoint() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ExternalLocation) pulumi.StringPtrOutput { return v.AccessPoint }).(pulumi.StringPtrOutput) +} + // User-supplied free-form text. func (o ExternalLocationOutput) Comment() pulumi.StringPtrOutput { return o.ApplyT(func(v *ExternalLocation) pulumi.StringPtrOutput { return v.Comment }).(pulumi.StringPtrOutput) } -// Name of the StorageCredential to use with this External Location. +// Name of the StorageCredential to use with this external location. func (o ExternalLocationOutput) CredentialName() pulumi.StringOutput { return o.ApplyT(func(v *ExternalLocation) pulumi.StringOutput { return v.CredentialName }).(pulumi.StringOutput) } +// The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). +func (o ExternalLocationOutput) EncryptionDetails() ExternalLocationEncryptionDetailsPtrOutput { + return o.ApplyT(func(v *ExternalLocation) ExternalLocationEncryptionDetailsPtrOutput { return v.EncryptionDetails }).(ExternalLocationEncryptionDetailsPtrOutput) +} + // Destroy external location regardless of its dependents. func (o ExternalLocationOutput) ForceDestroy() pulumi.BoolPtrOutput { return o.ApplyT(func(v *ExternalLocation) pulumi.BoolPtrOutput { return v.ForceDestroy }).(pulumi.BoolPtrOutput) } +// Update external location regardless of its dependents. +func (o ExternalLocationOutput) ForceUpdate() pulumi.BoolPtrOutput { + return o.ApplyT(func(v *ExternalLocation) pulumi.BoolPtrOutput { return v.ForceUpdate }).(pulumi.BoolPtrOutput) +} + func (o ExternalLocationOutput) MetastoreId() pulumi.StringOutput { return o.ApplyT(func(v *ExternalLocation) pulumi.StringOutput { return v.MetastoreId }).(pulumi.StringOutput) } @@ -279,7 +324,7 @@ func (o ExternalLocationOutput) Name() pulumi.StringOutput { return o.ApplyT(func(v *ExternalLocation) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput) } -// Username/groupname/sp applicationId of the external Location owner. +// Username/groupname/sp applicationId of the external location owner. func (o ExternalLocationOutput) Owner() pulumi.StringOutput { return o.ApplyT(func(v *ExternalLocation) pulumi.StringOutput { return v.Owner }).(pulumi.StringOutput) } diff --git a/sdk/go/databricks/getCurrentUser.go b/sdk/go/databricks/getCurrentUser.go index 611ec035..3726baa9 100644 --- a/sdk/go/databricks/getCurrentUser.go +++ b/sdk/go/databricks/getCurrentUser.go @@ -19,6 +19,7 @@ import ( // * `repos` - Personal Repos location of the user, e.g. `/Repos/mr.foo@example.com`. // * `alphanumeric` - Alphanumeric representation of user local name. e.g. `mrFoo`. // * `workspaceUrl` - URL of the current Databricks workspace. +// * `aclPrincipalId` - identifier for use in databricks_access_control_rule_set, e.g. `users/mr.foo@example.com` if current user is user, or `servicePrincipals/00000000-0000-0000-0000-000000000000` if current user is service principal. // // ## Related Resources // @@ -40,9 +41,10 @@ func GetCurrentUser(ctx *pulumi.Context, opts ...pulumi.InvokeOption) (*GetCurre // A collection of values returned by getCurrentUser. type GetCurrentUserResult struct { - Alphanumeric string `pulumi:"alphanumeric"` - ExternalId string `pulumi:"externalId"` - Home string `pulumi:"home"` + AclPrincipalId string `pulumi:"aclPrincipalId"` + Alphanumeric string `pulumi:"alphanumeric"` + ExternalId string `pulumi:"externalId"` + Home string `pulumi:"home"` // The provider-assigned unique ID for this managed resource. Id string `pulumi:"id"` Repos string `pulumi:"repos"` diff --git a/sdk/go/databricks/getGroup.go b/sdk/go/databricks/getGroup.go index f6b52f58..aa9b6189 100644 --- a/sdk/go/databricks/getGroup.go +++ b/sdk/go/databricks/getGroup.go @@ -77,6 +77,8 @@ func LookupGroup(ctx *pulumi.Context, args *LookupGroupArgs, opts ...pulumi.Invo // A collection of arguments for invoking getGroup. type LookupGroupArgs struct { + // identifier for use in databricks_access_control_rule_set, e.g. `groups/Some Group`. + AclPrincipalId *string `pulumi:"aclPrincipalId"` // True if group members can create clusters AllowClusterCreate *bool `pulumi:"allowClusterCreate"` // True if group members can create instance pools @@ -105,6 +107,8 @@ type LookupGroupArgs struct { // A collection of values returned by getGroup. type LookupGroupResult struct { + // identifier for use in databricks_access_control_rule_set, e.g. `groups/Some Group`. + AclPrincipalId string `pulumi:"aclPrincipalId"` // True if group members can create clusters AllowClusterCreate *bool `pulumi:"allowClusterCreate"` // True if group members can create instance pools @@ -146,6 +150,8 @@ func LookupGroupOutput(ctx *pulumi.Context, args LookupGroupOutputArgs, opts ... // A collection of arguments for invoking getGroup. type LookupGroupOutputArgs struct { + // identifier for use in databricks_access_control_rule_set, e.g. `groups/Some Group`. + AclPrincipalId pulumi.StringPtrInput `pulumi:"aclPrincipalId"` // True if group members can create clusters AllowClusterCreate pulumi.BoolPtrInput `pulumi:"allowClusterCreate"` // True if group members can create instance pools @@ -191,6 +197,11 @@ func (o LookupGroupResultOutput) ToLookupGroupResultOutputWithContext(ctx contex return o } +// identifier for use in databricks_access_control_rule_set, e.g. `groups/Some Group`. +func (o LookupGroupResultOutput) AclPrincipalId() pulumi.StringOutput { + return o.ApplyT(func(v LookupGroupResult) string { return v.AclPrincipalId }).(pulumi.StringOutput) +} + // True if group members can create clusters func (o LookupGroupResultOutput) AllowClusterCreate() pulumi.BoolPtrOutput { return o.ApplyT(func(v LookupGroupResult) *bool { return v.AllowClusterCreate }).(pulumi.BoolPtrOutput) diff --git a/sdk/go/databricks/getServicePrincipal.go b/sdk/go/databricks/getServicePrincipal.go index ce9d893d..af57ec18 100644 --- a/sdk/go/databricks/getServicePrincipal.go +++ b/sdk/go/databricks/getServicePrincipal.go @@ -79,6 +79,8 @@ func LookupServicePrincipal(ctx *pulumi.Context, args *LookupServicePrincipalArg // A collection of arguments for invoking getServicePrincipal. type LookupServicePrincipalArgs struct { + // identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. + AclPrincipalId *string `pulumi:"aclPrincipalId"` // Whether service principal is active or not. Active *bool `pulumi:"active"` // ID of the service principal. The service principal must exist before this resource can be retrieved. @@ -98,6 +100,8 @@ type LookupServicePrincipalArgs struct { // A collection of values returned by getServicePrincipal. type LookupServicePrincipalResult struct { + // identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. + AclPrincipalId string `pulumi:"aclPrincipalId"` // Whether service principal is active or not. Active bool `pulumi:"active"` ApplicationId string `pulumi:"applicationId"` @@ -129,6 +133,8 @@ func LookupServicePrincipalOutput(ctx *pulumi.Context, args LookupServicePrincip // A collection of arguments for invoking getServicePrincipal. type LookupServicePrincipalOutputArgs struct { + // identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. + AclPrincipalId pulumi.StringPtrInput `pulumi:"aclPrincipalId"` // Whether service principal is active or not. Active pulumi.BoolPtrInput `pulumi:"active"` // ID of the service principal. The service principal must exist before this resource can be retrieved. @@ -165,6 +171,11 @@ func (o LookupServicePrincipalResultOutput) ToLookupServicePrincipalResultOutput return o } +// identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. +func (o LookupServicePrincipalResultOutput) AclPrincipalId() pulumi.StringOutput { + return o.ApplyT(func(v LookupServicePrincipalResult) string { return v.AclPrincipalId }).(pulumi.StringOutput) +} + // Whether service principal is active or not. func (o LookupServicePrincipalResultOutput) Active() pulumi.BoolOutput { return o.ApplyT(func(v LookupServicePrincipalResult) bool { return v.Active }).(pulumi.BoolOutput) diff --git a/sdk/go/databricks/getUser.go b/sdk/go/databricks/getUser.go index 9b510b13..60cda07f 100644 --- a/sdk/go/databricks/getUser.go +++ b/sdk/go/databricks/getUser.go @@ -88,6 +88,8 @@ type LookupUserArgs struct { // A collection of values returned by getUser. type LookupUserResult struct { + // identifier for use in databricks_access_control_rule_set, e.g. `users/mr.foo@example.com`. + AclPrincipalId string `pulumi:"aclPrincipalId"` // Alphanumeric representation of user local name. e.g. `mrFoo`. Alphanumeric string `pulumi:"alphanumeric"` ApplicationId string `pulumi:"applicationId"` @@ -146,6 +148,11 @@ func (o LookupUserResultOutput) ToLookupUserResultOutputWithContext(ctx context. return o } +// identifier for use in databricks_access_control_rule_set, e.g. `users/mr.foo@example.com`. +func (o LookupUserResultOutput) AclPrincipalId() pulumi.StringOutput { + return o.ApplyT(func(v LookupUserResult) string { return v.AclPrincipalId }).(pulumi.StringOutput) +} + // Alphanumeric representation of user local name. e.g. `mrFoo`. func (o LookupUserResultOutput) Alphanumeric() pulumi.StringOutput { return o.ApplyT(func(v LookupUserResult) string { return v.Alphanumeric }).(pulumi.StringOutput) diff --git a/sdk/go/databricks/grants.go b/sdk/go/databricks/grants.go index 0829a998..57a2ba75 100644 --- a/sdk/go/databricks/grants.go +++ b/sdk/go/databricks/grants.go @@ -17,6 +17,7 @@ type Grants struct { Catalog pulumi.StringPtrOutput `pulumi:"catalog"` ExternalLocation pulumi.StringPtrOutput `pulumi:"externalLocation"` + ForeignConnection pulumi.StringPtrOutput `pulumi:"foreignConnection"` Function pulumi.StringPtrOutput `pulumi:"function"` Grants GrantsGrantArrayOutput `pulumi:"grants"` MaterializedView pulumi.StringPtrOutput `pulumi:"materializedView"` @@ -64,6 +65,7 @@ func GetGrants(ctx *pulumi.Context, type grantsState struct { Catalog *string `pulumi:"catalog"` ExternalLocation *string `pulumi:"externalLocation"` + ForeignConnection *string `pulumi:"foreignConnection"` Function *string `pulumi:"function"` Grants []GrantsGrant `pulumi:"grants"` MaterializedView *string `pulumi:"materializedView"` @@ -79,6 +81,7 @@ type grantsState struct { type GrantsState struct { Catalog pulumi.StringPtrInput ExternalLocation pulumi.StringPtrInput + ForeignConnection pulumi.StringPtrInput Function pulumi.StringPtrInput Grants GrantsGrantArrayInput MaterializedView pulumi.StringPtrInput @@ -98,6 +101,7 @@ func (GrantsState) ElementType() reflect.Type { type grantsArgs struct { Catalog *string `pulumi:"catalog"` ExternalLocation *string `pulumi:"externalLocation"` + ForeignConnection *string `pulumi:"foreignConnection"` Function *string `pulumi:"function"` Grants []GrantsGrant `pulumi:"grants"` MaterializedView *string `pulumi:"materializedView"` @@ -114,6 +118,7 @@ type grantsArgs struct { type GrantsArgs struct { Catalog pulumi.StringPtrInput ExternalLocation pulumi.StringPtrInput + ForeignConnection pulumi.StringPtrInput Function pulumi.StringPtrInput Grants GrantsGrantArrayInput MaterializedView pulumi.StringPtrInput @@ -221,6 +226,10 @@ func (o GrantsOutput) ExternalLocation() pulumi.StringPtrOutput { return o.ApplyT(func(v *Grants) pulumi.StringPtrOutput { return v.ExternalLocation }).(pulumi.StringPtrOutput) } +func (o GrantsOutput) ForeignConnection() pulumi.StringPtrOutput { + return o.ApplyT(func(v *Grants) pulumi.StringPtrOutput { return v.ForeignConnection }).(pulumi.StringPtrOutput) +} + func (o GrantsOutput) Function() pulumi.StringPtrOutput { return o.ApplyT(func(v *Grants) pulumi.StringPtrOutput { return v.Function }).(pulumi.StringPtrOutput) } diff --git a/sdk/go/databricks/init.go b/sdk/go/databricks/init.go index 9718b0f5..eb3afab8 100644 --- a/sdk/go/databricks/init.go +++ b/sdk/go/databricks/init.go @@ -31,6 +31,8 @@ func (m *module) Construct(ctx *pulumi.Context, name, typ, urn string) (r pulumi r = &Cluster{} case "databricks:index/clusterPolicy:ClusterPolicy": r = &ClusterPolicy{} + case "databricks:index/connection:Connection": + r = &Connection{} case "databricks:index/dbfsFile:DbfsFile": r = &DbfsFile{} case "databricks:index/directory:Directory": @@ -221,6 +223,11 @@ func init() { "index/clusterPolicy", &module{version}, ) + pulumi.RegisterResourceModule( + "databricks", + "index/connection", + &module{version}, + ) pulumi.RegisterResourceModule( "databricks", "index/dbfsFile", diff --git a/sdk/go/databricks/metastore.go b/sdk/go/databricks/metastore.go index f28943aa..f1ce9d8d 100644 --- a/sdk/go/databricks/metastore.go +++ b/sdk/go/databricks/metastore.go @@ -12,10 +12,6 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) -// > **Notes** -// -// Unity Catalog APIs are accessible via **workspace-level APIs**. This design may change in the future. -// // A metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces in order to control which workloads use each metastore. // // Unity Catalog offers a new metastore with built in security and auditing. This is distinct to the metastore used in previous versions of Databricks (based on the Hive Metastore). @@ -45,15 +41,18 @@ type Metastore struct { // Destroy metastore regardless of its contents. ForceDestroy pulumi.BoolPtrOutput `pulumi:"forceDestroy"` GlobalMetastoreId pulumi.StringOutput `pulumi:"globalMetastoreId"` + MetastoreId pulumi.StringOutput `pulumi:"metastoreId"` // Name of metastore. Name pulumi.StringOutput `pulumi:"name"` // Username/groupname/sp applicationId of the metastore owner. - Owner pulumi.StringOutput `pulumi:"owner"` + Owner pulumi.StringOutput `pulumi:"owner"` + // The region of the metastore Region pulumi.StringOutput `pulumi:"region"` // Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. - StorageRoot pulumi.StringOutput `pulumi:"storageRoot"` - UpdatedAt pulumi.IntOutput `pulumi:"updatedAt"` - UpdatedBy pulumi.StringOutput `pulumi:"updatedBy"` + StorageRoot pulumi.StringOutput `pulumi:"storageRoot"` + StorageRootCredentialId pulumi.StringPtrOutput `pulumi:"storageRootCredentialId"` + UpdatedAt pulumi.IntOutput `pulumi:"updatedAt"` + UpdatedBy pulumi.StringOutput `pulumi:"updatedBy"` } // NewMetastore registers a new resource with the given unique name, arguments, and options. @@ -102,15 +101,18 @@ type metastoreState struct { // Destroy metastore regardless of its contents. ForceDestroy *bool `pulumi:"forceDestroy"` GlobalMetastoreId *string `pulumi:"globalMetastoreId"` + MetastoreId *string `pulumi:"metastoreId"` // Name of metastore. Name *string `pulumi:"name"` // Username/groupname/sp applicationId of the metastore owner. - Owner *string `pulumi:"owner"` + Owner *string `pulumi:"owner"` + // The region of the metastore Region *string `pulumi:"region"` // Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. - StorageRoot *string `pulumi:"storageRoot"` - UpdatedAt *int `pulumi:"updatedAt"` - UpdatedBy *string `pulumi:"updatedBy"` + StorageRoot *string `pulumi:"storageRoot"` + StorageRootCredentialId *string `pulumi:"storageRootCredentialId"` + UpdatedAt *int `pulumi:"updatedAt"` + UpdatedBy *string `pulumi:"updatedBy"` } type MetastoreState struct { @@ -127,15 +129,18 @@ type MetastoreState struct { // Destroy metastore regardless of its contents. ForceDestroy pulumi.BoolPtrInput GlobalMetastoreId pulumi.StringPtrInput + MetastoreId pulumi.StringPtrInput // Name of metastore. Name pulumi.StringPtrInput // Username/groupname/sp applicationId of the metastore owner. - Owner pulumi.StringPtrInput + Owner pulumi.StringPtrInput + // The region of the metastore Region pulumi.StringPtrInput // Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. - StorageRoot pulumi.StringPtrInput - UpdatedAt pulumi.IntPtrInput - UpdatedBy pulumi.StringPtrInput + StorageRoot pulumi.StringPtrInput + StorageRootCredentialId pulumi.StringPtrInput + UpdatedAt pulumi.IntPtrInput + UpdatedBy pulumi.StringPtrInput } func (MetastoreState) ElementType() reflect.Type { @@ -156,15 +161,18 @@ type metastoreArgs struct { // Destroy metastore regardless of its contents. ForceDestroy *bool `pulumi:"forceDestroy"` GlobalMetastoreId *string `pulumi:"globalMetastoreId"` + MetastoreId *string `pulumi:"metastoreId"` // Name of metastore. Name *string `pulumi:"name"` // Username/groupname/sp applicationId of the metastore owner. - Owner *string `pulumi:"owner"` + Owner *string `pulumi:"owner"` + // The region of the metastore Region *string `pulumi:"region"` // Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. - StorageRoot string `pulumi:"storageRoot"` - UpdatedAt *int `pulumi:"updatedAt"` - UpdatedBy *string `pulumi:"updatedBy"` + StorageRoot string `pulumi:"storageRoot"` + StorageRootCredentialId *string `pulumi:"storageRootCredentialId"` + UpdatedAt *int `pulumi:"updatedAt"` + UpdatedBy *string `pulumi:"updatedBy"` } // The set of arguments for constructing a Metastore resource. @@ -182,15 +190,18 @@ type MetastoreArgs struct { // Destroy metastore regardless of its contents. ForceDestroy pulumi.BoolPtrInput GlobalMetastoreId pulumi.StringPtrInput + MetastoreId pulumi.StringPtrInput // Name of metastore. Name pulumi.StringPtrInput // Username/groupname/sp applicationId of the metastore owner. - Owner pulumi.StringPtrInput + Owner pulumi.StringPtrInput + // The region of the metastore Region pulumi.StringPtrInput // Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. - StorageRoot pulumi.StringInput - UpdatedAt pulumi.IntPtrInput - UpdatedBy pulumi.StringPtrInput + StorageRoot pulumi.StringInput + StorageRootCredentialId pulumi.StringPtrInput + UpdatedAt pulumi.IntPtrInput + UpdatedBy pulumi.StringPtrInput } func (MetastoreArgs) ElementType() reflect.Type { @@ -320,6 +331,10 @@ func (o MetastoreOutput) GlobalMetastoreId() pulumi.StringOutput { return o.ApplyT(func(v *Metastore) pulumi.StringOutput { return v.GlobalMetastoreId }).(pulumi.StringOutput) } +func (o MetastoreOutput) MetastoreId() pulumi.StringOutput { + return o.ApplyT(func(v *Metastore) pulumi.StringOutput { return v.MetastoreId }).(pulumi.StringOutput) +} + // Name of metastore. func (o MetastoreOutput) Name() pulumi.StringOutput { return o.ApplyT(func(v *Metastore) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput) @@ -330,6 +345,7 @@ func (o MetastoreOutput) Owner() pulumi.StringOutput { return o.ApplyT(func(v *Metastore) pulumi.StringOutput { return v.Owner }).(pulumi.StringOutput) } +// The region of the metastore func (o MetastoreOutput) Region() pulumi.StringOutput { return o.ApplyT(func(v *Metastore) pulumi.StringOutput { return v.Region }).(pulumi.StringOutput) } @@ -339,6 +355,10 @@ func (o MetastoreOutput) StorageRoot() pulumi.StringOutput { return o.ApplyT(func(v *Metastore) pulumi.StringOutput { return v.StorageRoot }).(pulumi.StringOutput) } +func (o MetastoreOutput) StorageRootCredentialId() pulumi.StringPtrOutput { + return o.ApplyT(func(v *Metastore) pulumi.StringPtrOutput { return v.StorageRootCredentialId }).(pulumi.StringPtrOutput) +} + func (o MetastoreOutput) UpdatedAt() pulumi.IntOutput { return o.ApplyT(func(v *Metastore) pulumi.IntOutput { return v.UpdatedAt }).(pulumi.IntOutput) } diff --git a/sdk/go/databricks/metastoreAssignment.go b/sdk/go/databricks/metastoreAssignment.go index fdeb8620..2744fb94 100644 --- a/sdk/go/databricks/metastoreAssignment.go +++ b/sdk/go/databricks/metastoreAssignment.go @@ -33,6 +33,7 @@ import ( // thisMetastore, err := databricks.NewMetastore(ctx, "thisMetastore", &databricks.MetastoreArgs{ // StorageRoot: pulumi.String(fmt.Sprintf("s3://%v/metastore", aws_s3_bucket.Metastore.Id)), // Owner: pulumi.String("uc admins"), +// Region: pulumi.String("us-east-1"), // ForceDestroy: pulumi.Bool(true), // }) // if err != nil { diff --git a/sdk/go/databricks/mlflowModel.go b/sdk/go/databricks/mlflowModel.go index c75d4aa2..4fe54407 100644 --- a/sdk/go/databricks/mlflowModel.go +++ b/sdk/go/databricks/mlflowModel.go @@ -76,16 +76,15 @@ import ( type MlflowModel struct { pulumi.CustomResourceState - CreationTimestamp pulumi.IntOutput `pulumi:"creationTimestamp"` + CreationTimestamp pulumi.IntPtrOutput `pulumi:"creationTimestamp"` // The description of the MLflow model. Description pulumi.StringPtrOutput `pulumi:"description"` - LastUpdatedTimestamp pulumi.IntOutput `pulumi:"lastUpdatedTimestamp"` + LastUpdatedTimestamp pulumi.IntPtrOutput `pulumi:"lastUpdatedTimestamp"` // Name of MLflow model. Change of name triggers new resource. - Name pulumi.StringOutput `pulumi:"name"` - RegisteredModelId pulumi.StringOutput `pulumi:"registeredModelId"` + Name pulumi.StringOutput `pulumi:"name"` // Tags for the MLflow model. Tags MlflowModelTagArrayOutput `pulumi:"tags"` - UserId pulumi.StringOutput `pulumi:"userId"` + UserId pulumi.StringPtrOutput `pulumi:"userId"` } // NewMlflowModel registers a new resource with the given unique name, arguments, and options. @@ -123,8 +122,7 @@ type mlflowModelState struct { Description *string `pulumi:"description"` LastUpdatedTimestamp *int `pulumi:"lastUpdatedTimestamp"` // Name of MLflow model. Change of name triggers new resource. - Name *string `pulumi:"name"` - RegisteredModelId *string `pulumi:"registeredModelId"` + Name *string `pulumi:"name"` // Tags for the MLflow model. Tags []MlflowModelTag `pulumi:"tags"` UserId *string `pulumi:"userId"` @@ -136,8 +134,7 @@ type MlflowModelState struct { Description pulumi.StringPtrInput LastUpdatedTimestamp pulumi.IntPtrInput // Name of MLflow model. Change of name triggers new resource. - Name pulumi.StringPtrInput - RegisteredModelId pulumi.StringPtrInput + Name pulumi.StringPtrInput // Tags for the MLflow model. Tags MlflowModelTagArrayInput UserId pulumi.StringPtrInput @@ -153,8 +150,7 @@ type mlflowModelArgs struct { Description *string `pulumi:"description"` LastUpdatedTimestamp *int `pulumi:"lastUpdatedTimestamp"` // Name of MLflow model. Change of name triggers new resource. - Name *string `pulumi:"name"` - RegisteredModelId *string `pulumi:"registeredModelId"` + Name *string `pulumi:"name"` // Tags for the MLflow model. Tags []MlflowModelTag `pulumi:"tags"` UserId *string `pulumi:"userId"` @@ -167,8 +163,7 @@ type MlflowModelArgs struct { Description pulumi.StringPtrInput LastUpdatedTimestamp pulumi.IntPtrInput // Name of MLflow model. Change of name triggers new resource. - Name pulumi.StringPtrInput - RegisteredModelId pulumi.StringPtrInput + Name pulumi.StringPtrInput // Tags for the MLflow model. Tags MlflowModelTagArrayInput UserId pulumi.StringPtrInput @@ -261,8 +256,8 @@ func (o MlflowModelOutput) ToMlflowModelOutputWithContext(ctx context.Context) M return o } -func (o MlflowModelOutput) CreationTimestamp() pulumi.IntOutput { - return o.ApplyT(func(v *MlflowModel) pulumi.IntOutput { return v.CreationTimestamp }).(pulumi.IntOutput) +func (o MlflowModelOutput) CreationTimestamp() pulumi.IntPtrOutput { + return o.ApplyT(func(v *MlflowModel) pulumi.IntPtrOutput { return v.CreationTimestamp }).(pulumi.IntPtrOutput) } // The description of the MLflow model. @@ -270,8 +265,8 @@ func (o MlflowModelOutput) Description() pulumi.StringPtrOutput { return o.ApplyT(func(v *MlflowModel) pulumi.StringPtrOutput { return v.Description }).(pulumi.StringPtrOutput) } -func (o MlflowModelOutput) LastUpdatedTimestamp() pulumi.IntOutput { - return o.ApplyT(func(v *MlflowModel) pulumi.IntOutput { return v.LastUpdatedTimestamp }).(pulumi.IntOutput) +func (o MlflowModelOutput) LastUpdatedTimestamp() pulumi.IntPtrOutput { + return o.ApplyT(func(v *MlflowModel) pulumi.IntPtrOutput { return v.LastUpdatedTimestamp }).(pulumi.IntPtrOutput) } // Name of MLflow model. Change of name triggers new resource. @@ -279,17 +274,13 @@ func (o MlflowModelOutput) Name() pulumi.StringOutput { return o.ApplyT(func(v *MlflowModel) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput) } -func (o MlflowModelOutput) RegisteredModelId() pulumi.StringOutput { - return o.ApplyT(func(v *MlflowModel) pulumi.StringOutput { return v.RegisteredModelId }).(pulumi.StringOutput) -} - // Tags for the MLflow model. func (o MlflowModelOutput) Tags() MlflowModelTagArrayOutput { return o.ApplyT(func(v *MlflowModel) MlflowModelTagArrayOutput { return v.Tags }).(MlflowModelTagArrayOutput) } -func (o MlflowModelOutput) UserId() pulumi.StringOutput { - return o.ApplyT(func(v *MlflowModel) pulumi.StringOutput { return v.UserId }).(pulumi.StringOutput) +func (o MlflowModelOutput) UserId() pulumi.StringPtrOutput { + return o.ApplyT(func(v *MlflowModel) pulumi.StringPtrOutput { return v.UserId }).(pulumi.StringPtrOutput) } type MlflowModelArrayOutput struct{ *pulumi.OutputState } diff --git a/sdk/go/databricks/mwsCustomerManagedKeys.go b/sdk/go/databricks/mwsCustomerManagedKeys.go index 88ace109..53d94e08 100644 --- a/sdk/go/databricks/mwsCustomerManagedKeys.go +++ b/sdk/go/databricks/mwsCustomerManagedKeys.go @@ -15,9 +15,11 @@ import ( // ## Example Usage // // > **Note** If you've used the resource before, please add `useCases = ["MANAGED_SERVICES"]` to keep the previous behaviour. +// // ### Customer-managed key for managed services // -// # You must configure this during workspace creation +// You must configure this during workspace creation +// ### For AWS // // ```go // package main @@ -114,7 +116,43 @@ import ( // }) // } // ``` +// ### For GCP +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-databricks/sdk/go/databricks" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi/config" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// cfg := config.New(ctx, "") +// databricksAccountId := cfg.RequireObject("databricksAccountId") +// cmekResourceId := cfg.RequireObject("cmekResourceId") +// _, err := databricks.NewMwsCustomerManagedKeys(ctx, "managedServices", &databricks.MwsCustomerManagedKeysArgs{ +// AccountId: pulumi.Any(databricksAccountId), +// GcpKeyInfo: &databricks.MwsCustomerManagedKeysGcpKeyInfoArgs{ +// KmsKeyId: pulumi.Any(cmekResourceId), +// }, +// UseCases: pulumi.StringArray{ +// pulumi.String("MANAGED_SERVICES"), +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` // ### Customer-managed key for workspace storage +// ### For AWS // // ```go // package main @@ -269,6 +307,41 @@ import ( // }) // } // ``` +// ### For GCP +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-databricks/sdk/go/databricks" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi/config" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// cfg := config.New(ctx, "") +// databricksAccountId := cfg.RequireObject("databricksAccountId") +// cmekResourceId := cfg.RequireObject("cmekResourceId") +// _, err := databricks.NewMwsCustomerManagedKeys(ctx, "storage", &databricks.MwsCustomerManagedKeysArgs{ +// AccountId: pulumi.Any(databricksAccountId), +// GcpKeyInfo: &databricks.MwsCustomerManagedKeysGcpKeyInfoArgs{ +// KmsKeyId: pulumi.Any(cmekResourceId), +// }, +// UseCases: pulumi.StringArray{ +// pulumi.String("STORAGE"), +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` // ## Related Resources // // The following resources are used in the same context: @@ -288,13 +361,14 @@ type MwsCustomerManagedKeys struct { // Account Id that could be found in the bottom left corner of [Accounts Console](https://accounts.cloud.databricks.com/) AccountId pulumi.StringOutput `pulumi:"accountId"` - // This field is a block and is documented below. + // This field is a block and is documented below. This conflicts with `gcpKeyInfo` AwsKeyInfo MwsCustomerManagedKeysAwsKeyInfoPtrOutput `pulumi:"awsKeyInfo"` // (Integer) Time in epoch milliseconds when the customer key was created. CreationTime pulumi.IntOutput `pulumi:"creationTime"` // (String) ID of the encryption key configuration object. - CustomerManagedKeyId pulumi.StringOutput `pulumi:"customerManagedKeyId"` - GcpKeyInfo MwsCustomerManagedKeysGcpKeyInfoPtrOutput `pulumi:"gcpKeyInfo"` + CustomerManagedKeyId pulumi.StringOutput `pulumi:"customerManagedKeyId"` + // This field is a block and is documented below. This conflicts with `awsKeyInfo` + GcpKeyInfo MwsCustomerManagedKeysGcpKeyInfoPtrOutput `pulumi:"gcpKeyInfo"` // *(since v0.3.4)* List of use cases for which this key will be used. *If you've used the resource before, please add `useCases = ["MANAGED_SERVICES"]` to keep the previous behaviour.* Possible values are: UseCases pulumi.StringArrayOutput `pulumi:"useCases"` } @@ -337,13 +411,14 @@ func GetMwsCustomerManagedKeys(ctx *pulumi.Context, type mwsCustomerManagedKeysState struct { // Account Id that could be found in the bottom left corner of [Accounts Console](https://accounts.cloud.databricks.com/) AccountId *string `pulumi:"accountId"` - // This field is a block and is documented below. + // This field is a block and is documented below. This conflicts with `gcpKeyInfo` AwsKeyInfo *MwsCustomerManagedKeysAwsKeyInfo `pulumi:"awsKeyInfo"` // (Integer) Time in epoch milliseconds when the customer key was created. CreationTime *int `pulumi:"creationTime"` // (String) ID of the encryption key configuration object. - CustomerManagedKeyId *string `pulumi:"customerManagedKeyId"` - GcpKeyInfo *MwsCustomerManagedKeysGcpKeyInfo `pulumi:"gcpKeyInfo"` + CustomerManagedKeyId *string `pulumi:"customerManagedKeyId"` + // This field is a block and is documented below. This conflicts with `awsKeyInfo` + GcpKeyInfo *MwsCustomerManagedKeysGcpKeyInfo `pulumi:"gcpKeyInfo"` // *(since v0.3.4)* List of use cases for which this key will be used. *If you've used the resource before, please add `useCases = ["MANAGED_SERVICES"]` to keep the previous behaviour.* Possible values are: UseCases []string `pulumi:"useCases"` } @@ -351,13 +426,14 @@ type mwsCustomerManagedKeysState struct { type MwsCustomerManagedKeysState struct { // Account Id that could be found in the bottom left corner of [Accounts Console](https://accounts.cloud.databricks.com/) AccountId pulumi.StringPtrInput - // This field is a block and is documented below. + // This field is a block and is documented below. This conflicts with `gcpKeyInfo` AwsKeyInfo MwsCustomerManagedKeysAwsKeyInfoPtrInput // (Integer) Time in epoch milliseconds when the customer key was created. CreationTime pulumi.IntPtrInput // (String) ID of the encryption key configuration object. CustomerManagedKeyId pulumi.StringPtrInput - GcpKeyInfo MwsCustomerManagedKeysGcpKeyInfoPtrInput + // This field is a block and is documented below. This conflicts with `awsKeyInfo` + GcpKeyInfo MwsCustomerManagedKeysGcpKeyInfoPtrInput // *(since v0.3.4)* List of use cases for which this key will be used. *If you've used the resource before, please add `useCases = ["MANAGED_SERVICES"]` to keep the previous behaviour.* Possible values are: UseCases pulumi.StringArrayInput } @@ -369,13 +445,14 @@ func (MwsCustomerManagedKeysState) ElementType() reflect.Type { type mwsCustomerManagedKeysArgs struct { // Account Id that could be found in the bottom left corner of [Accounts Console](https://accounts.cloud.databricks.com/) AccountId string `pulumi:"accountId"` - // This field is a block and is documented below. + // This field is a block and is documented below. This conflicts with `gcpKeyInfo` AwsKeyInfo *MwsCustomerManagedKeysAwsKeyInfo `pulumi:"awsKeyInfo"` // (Integer) Time in epoch milliseconds when the customer key was created. CreationTime *int `pulumi:"creationTime"` // (String) ID of the encryption key configuration object. - CustomerManagedKeyId *string `pulumi:"customerManagedKeyId"` - GcpKeyInfo *MwsCustomerManagedKeysGcpKeyInfo `pulumi:"gcpKeyInfo"` + CustomerManagedKeyId *string `pulumi:"customerManagedKeyId"` + // This field is a block and is documented below. This conflicts with `awsKeyInfo` + GcpKeyInfo *MwsCustomerManagedKeysGcpKeyInfo `pulumi:"gcpKeyInfo"` // *(since v0.3.4)* List of use cases for which this key will be used. *If you've used the resource before, please add `useCases = ["MANAGED_SERVICES"]` to keep the previous behaviour.* Possible values are: UseCases []string `pulumi:"useCases"` } @@ -384,13 +461,14 @@ type mwsCustomerManagedKeysArgs struct { type MwsCustomerManagedKeysArgs struct { // Account Id that could be found in the bottom left corner of [Accounts Console](https://accounts.cloud.databricks.com/) AccountId pulumi.StringInput - // This field is a block and is documented below. + // This field is a block and is documented below. This conflicts with `gcpKeyInfo` AwsKeyInfo MwsCustomerManagedKeysAwsKeyInfoPtrInput // (Integer) Time in epoch milliseconds when the customer key was created. CreationTime pulumi.IntPtrInput // (String) ID of the encryption key configuration object. CustomerManagedKeyId pulumi.StringPtrInput - GcpKeyInfo MwsCustomerManagedKeysGcpKeyInfoPtrInput + // This field is a block and is documented below. This conflicts with `awsKeyInfo` + GcpKeyInfo MwsCustomerManagedKeysGcpKeyInfoPtrInput // *(since v0.3.4)* List of use cases for which this key will be used. *If you've used the resource before, please add `useCases = ["MANAGED_SERVICES"]` to keep the previous behaviour.* Possible values are: UseCases pulumi.StringArrayInput } @@ -487,7 +565,7 @@ func (o MwsCustomerManagedKeysOutput) AccountId() pulumi.StringOutput { return o.ApplyT(func(v *MwsCustomerManagedKeys) pulumi.StringOutput { return v.AccountId }).(pulumi.StringOutput) } -// This field is a block and is documented below. +// This field is a block and is documented below. This conflicts with `gcpKeyInfo` func (o MwsCustomerManagedKeysOutput) AwsKeyInfo() MwsCustomerManagedKeysAwsKeyInfoPtrOutput { return o.ApplyT(func(v *MwsCustomerManagedKeys) MwsCustomerManagedKeysAwsKeyInfoPtrOutput { return v.AwsKeyInfo }).(MwsCustomerManagedKeysAwsKeyInfoPtrOutput) } @@ -502,6 +580,7 @@ func (o MwsCustomerManagedKeysOutput) CustomerManagedKeyId() pulumi.StringOutput return o.ApplyT(func(v *MwsCustomerManagedKeys) pulumi.StringOutput { return v.CustomerManagedKeyId }).(pulumi.StringOutput) } +// This field is a block and is documented below. This conflicts with `awsKeyInfo` func (o MwsCustomerManagedKeysOutput) GcpKeyInfo() MwsCustomerManagedKeysGcpKeyInfoPtrOutput { return o.ApplyT(func(v *MwsCustomerManagedKeys) MwsCustomerManagedKeysGcpKeyInfoPtrOutput { return v.GcpKeyInfo }).(MwsCustomerManagedKeysGcpKeyInfoPtrOutput) } diff --git a/sdk/go/databricks/mwsWorkspaces.go b/sdk/go/databricks/mwsWorkspaces.go index a6ba880a..a3c7218b 100644 --- a/sdk/go/databricks/mwsWorkspaces.go +++ b/sdk/go/databricks/mwsWorkspaces.go @@ -47,7 +47,8 @@ type MwsWorkspaces struct { // Canonical unique identifier of MwsPrivateAccessSettings in Databricks Account. PrivateAccessSettingsId pulumi.StringPtrOutput `pulumi:"privateAccessSettingsId"` // `storageConfigurationId` from storage configuration. - StorageConfigurationId pulumi.StringPtrOutput `pulumi:"storageConfigurationId"` + StorageConfigurationId pulumi.StringPtrOutput `pulumi:"storageConfigurationId"` + // `customerManagedKeyId` from customer managed keys with `useCases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. StorageCustomerManagedKeyId pulumi.StringPtrOutput `pulumi:"storageCustomerManagedKeyId"` Token MwsWorkspacesTokenPtrOutput `pulumi:"token"` // (String) workspace id @@ -134,7 +135,8 @@ type mwsWorkspacesState struct { // Canonical unique identifier of MwsPrivateAccessSettings in Databricks Account. PrivateAccessSettingsId *string `pulumi:"privateAccessSettingsId"` // `storageConfigurationId` from storage configuration. - StorageConfigurationId *string `pulumi:"storageConfigurationId"` + StorageConfigurationId *string `pulumi:"storageConfigurationId"` + // `customerManagedKeyId` from customer managed keys with `useCases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. StorageCustomerManagedKeyId *string `pulumi:"storageCustomerManagedKeyId"` Token *MwsWorkspacesToken `pulumi:"token"` // (String) workspace id @@ -179,7 +181,8 @@ type MwsWorkspacesState struct { // Canonical unique identifier of MwsPrivateAccessSettings in Databricks Account. PrivateAccessSettingsId pulumi.StringPtrInput // `storageConfigurationId` from storage configuration. - StorageConfigurationId pulumi.StringPtrInput + StorageConfigurationId pulumi.StringPtrInput + // `customerManagedKeyId` from customer managed keys with `useCases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. StorageCustomerManagedKeyId pulumi.StringPtrInput Token MwsWorkspacesTokenPtrInput // (String) workspace id @@ -228,7 +231,8 @@ type mwsWorkspacesArgs struct { // Canonical unique identifier of MwsPrivateAccessSettings in Databricks Account. PrivateAccessSettingsId *string `pulumi:"privateAccessSettingsId"` // `storageConfigurationId` from storage configuration. - StorageConfigurationId *string `pulumi:"storageConfigurationId"` + StorageConfigurationId *string `pulumi:"storageConfigurationId"` + // `customerManagedKeyId` from customer managed keys with `useCases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. StorageCustomerManagedKeyId *string `pulumi:"storageCustomerManagedKeyId"` Token *MwsWorkspacesToken `pulumi:"token"` // (String) workspace id @@ -274,7 +278,8 @@ type MwsWorkspacesArgs struct { // Canonical unique identifier of MwsPrivateAccessSettings in Databricks Account. PrivateAccessSettingsId pulumi.StringPtrInput // `storageConfigurationId` from storage configuration. - StorageConfigurationId pulumi.StringPtrInput + StorageConfigurationId pulumi.StringPtrInput + // `customerManagedKeyId` from customer managed keys with `useCases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. StorageCustomerManagedKeyId pulumi.StringPtrInput Token MwsWorkspacesTokenPtrInput // (String) workspace id @@ -460,6 +465,7 @@ func (o MwsWorkspacesOutput) StorageConfigurationId() pulumi.StringPtrOutput { return o.ApplyT(func(v *MwsWorkspaces) pulumi.StringPtrOutput { return v.StorageConfigurationId }).(pulumi.StringPtrOutput) } +// `customerManagedKeyId` from customer managed keys with `useCases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. func (o MwsWorkspacesOutput) StorageCustomerManagedKeyId() pulumi.StringPtrOutput { return o.ApplyT(func(v *MwsWorkspaces) pulumi.StringPtrOutput { return v.StorageCustomerManagedKeyId }).(pulumi.StringPtrOutput) } diff --git a/sdk/go/databricks/pulumiTypes.go b/sdk/go/databricks/pulumiTypes.go index 93b083e4..5dbef946 100644 --- a/sdk/go/databricks/pulumiTypes.go +++ b/sdk/go/databricks/pulumiTypes.go @@ -19,9 +19,10 @@ type AccessControlRuleSetGrantRule struct { // * `groups/{groupname}` (also exposed as `aclPrincipalId` attribute of `Group` resource). // * `servicePrincipals/{applicationId}` (also exposed as `aclPrincipalId` attribute of `ServicePrincipal` resource). Principals []string `pulumi:"principals"` - // Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles). + // Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles) or [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page). // * `roles/servicePrincipal.manager` - Manager of a service principal. // * `roles/servicePrincipal.user` - User of a service principal. + // * `roles/group.manager` - Manager of a group. Role string `pulumi:"role"` } @@ -42,9 +43,10 @@ type AccessControlRuleSetGrantRuleArgs struct { // * `groups/{groupname}` (also exposed as `aclPrincipalId` attribute of `Group` resource). // * `servicePrincipals/{applicationId}` (also exposed as `aclPrincipalId` attribute of `ServicePrincipal` resource). Principals pulumi.StringArrayInput `pulumi:"principals"` - // Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles). + // Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles) or [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page). // * `roles/servicePrincipal.manager` - Manager of a service principal. // * `roles/servicePrincipal.user` - User of a service principal. + // * `roles/group.manager` - Manager of a group. Role pulumi.StringInput `pulumi:"role"` } @@ -107,9 +109,10 @@ func (o AccessControlRuleSetGrantRuleOutput) Principals() pulumi.StringArrayOutp return o.ApplyT(func(v AccessControlRuleSetGrantRule) []string { return v.Principals }).(pulumi.StringArrayOutput) } -// Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles). +// Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles) or [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page). // * `roles/servicePrincipal.manager` - Manager of a service principal. // * `roles/servicePrincipal.user` - User of a service principal. +// * `roles/group.manager` - Manager of a group. func (o AccessControlRuleSetGrantRuleOutput) Role() pulumi.StringOutput { return o.ApplyT(func(v AccessControlRuleSetGrantRule) string { return v.Role }).(pulumi.StringOutput) } @@ -3734,6 +3737,289 @@ func (o ClusterWorkloadTypeClientsPtrOutput) Notebooks() pulumi.BoolPtrOutput { }).(pulumi.BoolPtrOutput) } +type ExternalLocationEncryptionDetails struct { + SseEncryptionDetails *ExternalLocationEncryptionDetailsSseEncryptionDetails `pulumi:"sseEncryptionDetails"` +} + +// ExternalLocationEncryptionDetailsInput is an input type that accepts ExternalLocationEncryptionDetailsArgs and ExternalLocationEncryptionDetailsOutput values. +// You can construct a concrete instance of `ExternalLocationEncryptionDetailsInput` via: +// +// ExternalLocationEncryptionDetailsArgs{...} +type ExternalLocationEncryptionDetailsInput interface { + pulumi.Input + + ToExternalLocationEncryptionDetailsOutput() ExternalLocationEncryptionDetailsOutput + ToExternalLocationEncryptionDetailsOutputWithContext(context.Context) ExternalLocationEncryptionDetailsOutput +} + +type ExternalLocationEncryptionDetailsArgs struct { + SseEncryptionDetails ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrInput `pulumi:"sseEncryptionDetails"` +} + +func (ExternalLocationEncryptionDetailsArgs) ElementType() reflect.Type { + return reflect.TypeOf((*ExternalLocationEncryptionDetails)(nil)).Elem() +} + +func (i ExternalLocationEncryptionDetailsArgs) ToExternalLocationEncryptionDetailsOutput() ExternalLocationEncryptionDetailsOutput { + return i.ToExternalLocationEncryptionDetailsOutputWithContext(context.Background()) +} + +func (i ExternalLocationEncryptionDetailsArgs) ToExternalLocationEncryptionDetailsOutputWithContext(ctx context.Context) ExternalLocationEncryptionDetailsOutput { + return pulumi.ToOutputWithContext(ctx, i).(ExternalLocationEncryptionDetailsOutput) +} + +func (i ExternalLocationEncryptionDetailsArgs) ToExternalLocationEncryptionDetailsPtrOutput() ExternalLocationEncryptionDetailsPtrOutput { + return i.ToExternalLocationEncryptionDetailsPtrOutputWithContext(context.Background()) +} + +func (i ExternalLocationEncryptionDetailsArgs) ToExternalLocationEncryptionDetailsPtrOutputWithContext(ctx context.Context) ExternalLocationEncryptionDetailsPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(ExternalLocationEncryptionDetailsOutput).ToExternalLocationEncryptionDetailsPtrOutputWithContext(ctx) +} + +// ExternalLocationEncryptionDetailsPtrInput is an input type that accepts ExternalLocationEncryptionDetailsArgs, ExternalLocationEncryptionDetailsPtr and ExternalLocationEncryptionDetailsPtrOutput values. +// You can construct a concrete instance of `ExternalLocationEncryptionDetailsPtrInput` via: +// +// ExternalLocationEncryptionDetailsArgs{...} +// +// or: +// +// nil +type ExternalLocationEncryptionDetailsPtrInput interface { + pulumi.Input + + ToExternalLocationEncryptionDetailsPtrOutput() ExternalLocationEncryptionDetailsPtrOutput + ToExternalLocationEncryptionDetailsPtrOutputWithContext(context.Context) ExternalLocationEncryptionDetailsPtrOutput +} + +type externalLocationEncryptionDetailsPtrType ExternalLocationEncryptionDetailsArgs + +func ExternalLocationEncryptionDetailsPtr(v *ExternalLocationEncryptionDetailsArgs) ExternalLocationEncryptionDetailsPtrInput { + return (*externalLocationEncryptionDetailsPtrType)(v) +} + +func (*externalLocationEncryptionDetailsPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**ExternalLocationEncryptionDetails)(nil)).Elem() +} + +func (i *externalLocationEncryptionDetailsPtrType) ToExternalLocationEncryptionDetailsPtrOutput() ExternalLocationEncryptionDetailsPtrOutput { + return i.ToExternalLocationEncryptionDetailsPtrOutputWithContext(context.Background()) +} + +func (i *externalLocationEncryptionDetailsPtrType) ToExternalLocationEncryptionDetailsPtrOutputWithContext(ctx context.Context) ExternalLocationEncryptionDetailsPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(ExternalLocationEncryptionDetailsPtrOutput) +} + +type ExternalLocationEncryptionDetailsOutput struct{ *pulumi.OutputState } + +func (ExternalLocationEncryptionDetailsOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ExternalLocationEncryptionDetails)(nil)).Elem() +} + +func (o ExternalLocationEncryptionDetailsOutput) ToExternalLocationEncryptionDetailsOutput() ExternalLocationEncryptionDetailsOutput { + return o +} + +func (o ExternalLocationEncryptionDetailsOutput) ToExternalLocationEncryptionDetailsOutputWithContext(ctx context.Context) ExternalLocationEncryptionDetailsOutput { + return o +} + +func (o ExternalLocationEncryptionDetailsOutput) ToExternalLocationEncryptionDetailsPtrOutput() ExternalLocationEncryptionDetailsPtrOutput { + return o.ToExternalLocationEncryptionDetailsPtrOutputWithContext(context.Background()) +} + +func (o ExternalLocationEncryptionDetailsOutput) ToExternalLocationEncryptionDetailsPtrOutputWithContext(ctx context.Context) ExternalLocationEncryptionDetailsPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v ExternalLocationEncryptionDetails) *ExternalLocationEncryptionDetails { + return &v + }).(ExternalLocationEncryptionDetailsPtrOutput) +} + +func (o ExternalLocationEncryptionDetailsOutput) SseEncryptionDetails() ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput { + return o.ApplyT(func(v ExternalLocationEncryptionDetails) *ExternalLocationEncryptionDetailsSseEncryptionDetails { + return v.SseEncryptionDetails + }).(ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput) +} + +type ExternalLocationEncryptionDetailsPtrOutput struct{ *pulumi.OutputState } + +func (ExternalLocationEncryptionDetailsPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**ExternalLocationEncryptionDetails)(nil)).Elem() +} + +func (o ExternalLocationEncryptionDetailsPtrOutput) ToExternalLocationEncryptionDetailsPtrOutput() ExternalLocationEncryptionDetailsPtrOutput { + return o +} + +func (o ExternalLocationEncryptionDetailsPtrOutput) ToExternalLocationEncryptionDetailsPtrOutputWithContext(ctx context.Context) ExternalLocationEncryptionDetailsPtrOutput { + return o +} + +func (o ExternalLocationEncryptionDetailsPtrOutput) Elem() ExternalLocationEncryptionDetailsOutput { + return o.ApplyT(func(v *ExternalLocationEncryptionDetails) ExternalLocationEncryptionDetails { + if v != nil { + return *v + } + var ret ExternalLocationEncryptionDetails + return ret + }).(ExternalLocationEncryptionDetailsOutput) +} + +func (o ExternalLocationEncryptionDetailsPtrOutput) SseEncryptionDetails() ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput { + return o.ApplyT(func(v *ExternalLocationEncryptionDetails) *ExternalLocationEncryptionDetailsSseEncryptionDetails { + if v == nil { + return nil + } + return v.SseEncryptionDetails + }).(ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput) +} + +type ExternalLocationEncryptionDetailsSseEncryptionDetails struct { + Algorithm *string `pulumi:"algorithm"` + AwsKmsKeyArn *string `pulumi:"awsKmsKeyArn"` +} + +// ExternalLocationEncryptionDetailsSseEncryptionDetailsInput is an input type that accepts ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs and ExternalLocationEncryptionDetailsSseEncryptionDetailsOutput values. +// You can construct a concrete instance of `ExternalLocationEncryptionDetailsSseEncryptionDetailsInput` via: +// +// ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs{...} +type ExternalLocationEncryptionDetailsSseEncryptionDetailsInput interface { + pulumi.Input + + ToExternalLocationEncryptionDetailsSseEncryptionDetailsOutput() ExternalLocationEncryptionDetailsSseEncryptionDetailsOutput + ToExternalLocationEncryptionDetailsSseEncryptionDetailsOutputWithContext(context.Context) ExternalLocationEncryptionDetailsSseEncryptionDetailsOutput +} + +type ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs struct { + Algorithm pulumi.StringPtrInput `pulumi:"algorithm"` + AwsKmsKeyArn pulumi.StringPtrInput `pulumi:"awsKmsKeyArn"` +} + +func (ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs) ElementType() reflect.Type { + return reflect.TypeOf((*ExternalLocationEncryptionDetailsSseEncryptionDetails)(nil)).Elem() +} + +func (i ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs) ToExternalLocationEncryptionDetailsSseEncryptionDetailsOutput() ExternalLocationEncryptionDetailsSseEncryptionDetailsOutput { + return i.ToExternalLocationEncryptionDetailsSseEncryptionDetailsOutputWithContext(context.Background()) +} + +func (i ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs) ToExternalLocationEncryptionDetailsSseEncryptionDetailsOutputWithContext(ctx context.Context) ExternalLocationEncryptionDetailsSseEncryptionDetailsOutput { + return pulumi.ToOutputWithContext(ctx, i).(ExternalLocationEncryptionDetailsSseEncryptionDetailsOutput) +} + +func (i ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs) ToExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput() ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput { + return i.ToExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutputWithContext(context.Background()) +} + +func (i ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs) ToExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutputWithContext(ctx context.Context) ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(ExternalLocationEncryptionDetailsSseEncryptionDetailsOutput).ToExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutputWithContext(ctx) +} + +// ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrInput is an input type that accepts ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs, ExternalLocationEncryptionDetailsSseEncryptionDetailsPtr and ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput values. +// You can construct a concrete instance of `ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrInput` via: +// +// ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs{...} +// +// or: +// +// nil +type ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrInput interface { + pulumi.Input + + ToExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput() ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput + ToExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutputWithContext(context.Context) ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput +} + +type externalLocationEncryptionDetailsSseEncryptionDetailsPtrType ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs + +func ExternalLocationEncryptionDetailsSseEncryptionDetailsPtr(v *ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs) ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrInput { + return (*externalLocationEncryptionDetailsSseEncryptionDetailsPtrType)(v) +} + +func (*externalLocationEncryptionDetailsSseEncryptionDetailsPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**ExternalLocationEncryptionDetailsSseEncryptionDetails)(nil)).Elem() +} + +func (i *externalLocationEncryptionDetailsSseEncryptionDetailsPtrType) ToExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput() ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput { + return i.ToExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutputWithContext(context.Background()) +} + +func (i *externalLocationEncryptionDetailsSseEncryptionDetailsPtrType) ToExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutputWithContext(ctx context.Context) ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput) +} + +type ExternalLocationEncryptionDetailsSseEncryptionDetailsOutput struct{ *pulumi.OutputState } + +func (ExternalLocationEncryptionDetailsSseEncryptionDetailsOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ExternalLocationEncryptionDetailsSseEncryptionDetails)(nil)).Elem() +} + +func (o ExternalLocationEncryptionDetailsSseEncryptionDetailsOutput) ToExternalLocationEncryptionDetailsSseEncryptionDetailsOutput() ExternalLocationEncryptionDetailsSseEncryptionDetailsOutput { + return o +} + +func (o ExternalLocationEncryptionDetailsSseEncryptionDetailsOutput) ToExternalLocationEncryptionDetailsSseEncryptionDetailsOutputWithContext(ctx context.Context) ExternalLocationEncryptionDetailsSseEncryptionDetailsOutput { + return o +} + +func (o ExternalLocationEncryptionDetailsSseEncryptionDetailsOutput) ToExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput() ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput { + return o.ToExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutputWithContext(context.Background()) +} + +func (o ExternalLocationEncryptionDetailsSseEncryptionDetailsOutput) ToExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutputWithContext(ctx context.Context) ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v ExternalLocationEncryptionDetailsSseEncryptionDetails) *ExternalLocationEncryptionDetailsSseEncryptionDetails { + return &v + }).(ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput) +} + +func (o ExternalLocationEncryptionDetailsSseEncryptionDetailsOutput) Algorithm() pulumi.StringPtrOutput { + return o.ApplyT(func(v ExternalLocationEncryptionDetailsSseEncryptionDetails) *string { return v.Algorithm }).(pulumi.StringPtrOutput) +} + +func (o ExternalLocationEncryptionDetailsSseEncryptionDetailsOutput) AwsKmsKeyArn() pulumi.StringPtrOutput { + return o.ApplyT(func(v ExternalLocationEncryptionDetailsSseEncryptionDetails) *string { return v.AwsKmsKeyArn }).(pulumi.StringPtrOutput) +} + +type ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput struct{ *pulumi.OutputState } + +func (ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**ExternalLocationEncryptionDetailsSseEncryptionDetails)(nil)).Elem() +} + +func (o ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput) ToExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput() ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput { + return o +} + +func (o ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput) ToExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutputWithContext(ctx context.Context) ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput { + return o +} + +func (o ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput) Elem() ExternalLocationEncryptionDetailsSseEncryptionDetailsOutput { + return o.ApplyT(func(v *ExternalLocationEncryptionDetailsSseEncryptionDetails) ExternalLocationEncryptionDetailsSseEncryptionDetails { + if v != nil { + return *v + } + var ret ExternalLocationEncryptionDetailsSseEncryptionDetails + return ret + }).(ExternalLocationEncryptionDetailsSseEncryptionDetailsOutput) +} + +func (o ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput) Algorithm() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ExternalLocationEncryptionDetailsSseEncryptionDetails) *string { + if v == nil { + return nil + } + return v.Algorithm + }).(pulumi.StringPtrOutput) +} + +func (o ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput) AwsKmsKeyArn() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ExternalLocationEncryptionDetailsSseEncryptionDetails) *string { + if v == nil { + return nil + } + return v.AwsKmsKeyArn + }).(pulumi.StringPtrOutput) +} + type GrantsGrant struct { Principal string `pulumi:"principal"` Privileges []string `pulumi:"privileges"` @@ -16366,7 +16652,7 @@ func (o JobRunAsPtrOutput) UserName() pulumi.StringPtrOutput { type JobRunJobTask struct { // (String) ID of the job - JobId string `pulumi:"jobId"` + JobId int `pulumi:"jobId"` // (Map) Job parameters for the task JobParameters map[string]interface{} `pulumi:"jobParameters"` } @@ -16384,7 +16670,7 @@ type JobRunJobTaskInput interface { type JobRunJobTaskArgs struct { // (String) ID of the job - JobId pulumi.StringInput `pulumi:"jobId"` + JobId pulumi.IntInput `pulumi:"jobId"` // (Map) Job parameters for the task JobParameters pulumi.MapInput `pulumi:"jobParameters"` } @@ -16467,8 +16753,8 @@ func (o JobRunJobTaskOutput) ToJobRunJobTaskPtrOutputWithContext(ctx context.Con } // (String) ID of the job -func (o JobRunJobTaskOutput) JobId() pulumi.StringOutput { - return o.ApplyT(func(v JobRunJobTask) string { return v.JobId }).(pulumi.StringOutput) +func (o JobRunJobTaskOutput) JobId() pulumi.IntOutput { + return o.ApplyT(func(v JobRunJobTask) int { return v.JobId }).(pulumi.IntOutput) } // (Map) Job parameters for the task @@ -16501,13 +16787,13 @@ func (o JobRunJobTaskPtrOutput) Elem() JobRunJobTaskOutput { } // (String) ID of the job -func (o JobRunJobTaskPtrOutput) JobId() pulumi.StringPtrOutput { - return o.ApplyT(func(v *JobRunJobTask) *string { +func (o JobRunJobTaskPtrOutput) JobId() pulumi.IntPtrOutput { + return o.ApplyT(func(v *JobRunJobTask) *int { if v == nil { return nil } return &v.JobId - }).(pulumi.StringPtrOutput) + }).(pulumi.IntPtrOutput) } // (Map) Job parameters for the task @@ -23525,7 +23811,7 @@ func (o JobTaskPythonWheelTaskPtrOutput) Parameters() pulumi.StringArrayOutput { type JobTaskRunJobTask struct { // (String) ID of the job - JobId string `pulumi:"jobId"` + JobId int `pulumi:"jobId"` // (Map) Job parameters for the task JobParameters map[string]interface{} `pulumi:"jobParameters"` } @@ -23543,7 +23829,7 @@ type JobTaskRunJobTaskInput interface { type JobTaskRunJobTaskArgs struct { // (String) ID of the job - JobId pulumi.StringInput `pulumi:"jobId"` + JobId pulumi.IntInput `pulumi:"jobId"` // (Map) Job parameters for the task JobParameters pulumi.MapInput `pulumi:"jobParameters"` } @@ -23626,8 +23912,8 @@ func (o JobTaskRunJobTaskOutput) ToJobTaskRunJobTaskPtrOutputWithContext(ctx con } // (String) ID of the job -func (o JobTaskRunJobTaskOutput) JobId() pulumi.StringOutput { - return o.ApplyT(func(v JobTaskRunJobTask) string { return v.JobId }).(pulumi.StringOutput) +func (o JobTaskRunJobTaskOutput) JobId() pulumi.IntOutput { + return o.ApplyT(func(v JobTaskRunJobTask) int { return v.JobId }).(pulumi.IntOutput) } // (Map) Job parameters for the task @@ -23660,13 +23946,13 @@ func (o JobTaskRunJobTaskPtrOutput) Elem() JobTaskRunJobTaskOutput { } // (String) ID of the job -func (o JobTaskRunJobTaskPtrOutput) JobId() pulumi.StringPtrOutput { - return o.ApplyT(func(v *JobTaskRunJobTask) *string { +func (o JobTaskRunJobTaskPtrOutput) JobId() pulumi.IntPtrOutput { + return o.ApplyT(func(v *JobTaskRunJobTask) *int { if v == nil { return nil } return &v.JobId - }).(pulumi.StringPtrOutput) + }).(pulumi.IntPtrOutput) } // (Map) Job parameters for the task @@ -26964,7 +27250,7 @@ func (o LibraryPypiPtrOutput) Repo() pulumi.StringPtrOutput { type MetastoreDataAccessAwsIamRole struct { // The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF` // - // `azureServicePrincipal` optional configuration block for credential details for Azure: + // `azureManagedIdentity` optional configuration block for using managed identity as credential details for Azure (Recommended): RoleArn string `pulumi:"roleArn"` } @@ -26982,7 +27268,7 @@ type MetastoreDataAccessAwsIamRoleInput interface { type MetastoreDataAccessAwsIamRoleArgs struct { // The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF` // - // `azureServicePrincipal` optional configuration block for credential details for Azure: + // `azureManagedIdentity` optional configuration block for using managed identity as credential details for Azure (Recommended): RoleArn pulumi.StringInput `pulumi:"roleArn"` } @@ -27065,7 +27351,7 @@ func (o MetastoreDataAccessAwsIamRoleOutput) ToMetastoreDataAccessAwsIamRolePtrO // The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF` // -// `azureServicePrincipal` optional configuration block for credential details for Azure: +// `azureManagedIdentity` optional configuration block for using managed identity as credential details for Azure (Recommended): func (o MetastoreDataAccessAwsIamRoleOutput) RoleArn() pulumi.StringOutput { return o.ApplyT(func(v MetastoreDataAccessAwsIamRole) string { return v.RoleArn }).(pulumi.StringOutput) } @@ -27096,7 +27382,7 @@ func (o MetastoreDataAccessAwsIamRolePtrOutput) Elem() MetastoreDataAccessAwsIam // The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF` // -// `azureServicePrincipal` optional configuration block for credential details for Azure: +// `azureManagedIdentity` optional configuration block for using managed identity as credential details for Azure (Recommended): func (o MetastoreDataAccessAwsIamRolePtrOutput) RoleArn() pulumi.StringPtrOutput { return o.ApplyT(func(v *MetastoreDataAccessAwsIamRole) *string { if v == nil { @@ -27255,8 +27541,6 @@ type MetastoreDataAccessAzureServicePrincipal struct { // The application ID of the application registration within the referenced AAD tenant ApplicationId string `pulumi:"applicationId"` // The client secret generated for the above app ID in AAD. **This field is redacted on output** - // - // `azureManagedIdentity` optional configuration block for using managed identity as credential details for Azure: ClientSecret string `pulumi:"clientSecret"` // The directory ID corresponding to the Azure Active Directory (AAD) tenant of the application DirectoryId string `pulumi:"directoryId"` @@ -27277,8 +27561,6 @@ type MetastoreDataAccessAzureServicePrincipalArgs struct { // The application ID of the application registration within the referenced AAD tenant ApplicationId pulumi.StringInput `pulumi:"applicationId"` // The client secret generated for the above app ID in AAD. **This field is redacted on output** - // - // `azureManagedIdentity` optional configuration block for using managed identity as credential details for Azure: ClientSecret pulumi.StringInput `pulumi:"clientSecret"` // The directory ID corresponding to the Azure Active Directory (AAD) tenant of the application DirectoryId pulumi.StringInput `pulumi:"directoryId"` @@ -27367,8 +27649,6 @@ func (o MetastoreDataAccessAzureServicePrincipalOutput) ApplicationId() pulumi.S } // The client secret generated for the above app ID in AAD. **This field is redacted on output** -// -// `azureManagedIdentity` optional configuration block for using managed identity as credential details for Azure: func (o MetastoreDataAccessAzureServicePrincipalOutput) ClientSecret() pulumi.StringOutput { return o.ApplyT(func(v MetastoreDataAccessAzureServicePrincipal) string { return v.ClientSecret }).(pulumi.StringOutput) } @@ -27413,8 +27693,6 @@ func (o MetastoreDataAccessAzureServicePrincipalPtrOutput) ApplicationId() pulum } // The client secret generated for the above app ID in AAD. **This field is redacted on output** -// -// `azureManagedIdentity` optional configuration block for using managed identity as credential details for Azure: func (o MetastoreDataAccessAzureServicePrincipalPtrOutput) ClientSecret() pulumi.StringPtrOutput { return o.ApplyT(func(v *MetastoreDataAccessAzureServicePrincipal) *string { if v == nil { @@ -27436,6 +27714,8 @@ func (o MetastoreDataAccessAzureServicePrincipalPtrOutput) DirectoryId() pulumi. type MetastoreDataAccessDatabricksGcpServiceAccount struct { // The email of the GCP service account created, to be granted access to relevant buckets. + // + // `azureServicePrincipal` optional configuration block for credential details for Azure (Legacy): Email *string `pulumi:"email"` } @@ -27452,6 +27732,8 @@ type MetastoreDataAccessDatabricksGcpServiceAccountInput interface { type MetastoreDataAccessDatabricksGcpServiceAccountArgs struct { // The email of the GCP service account created, to be granted access to relevant buckets. + // + // `azureServicePrincipal` optional configuration block for credential details for Azure (Legacy): Email pulumi.StringPtrInput `pulumi:"email"` } @@ -27533,6 +27815,8 @@ func (o MetastoreDataAccessDatabricksGcpServiceAccountOutput) ToMetastoreDataAcc } // The email of the GCP service account created, to be granted access to relevant buckets. +// +// `azureServicePrincipal` optional configuration block for credential details for Azure (Legacy): func (o MetastoreDataAccessDatabricksGcpServiceAccountOutput) Email() pulumi.StringPtrOutput { return o.ApplyT(func(v MetastoreDataAccessDatabricksGcpServiceAccount) *string { return v.Email }).(pulumi.StringPtrOutput) } @@ -27562,6 +27846,8 @@ func (o MetastoreDataAccessDatabricksGcpServiceAccountPtrOutput) Elem() Metastor } // The email of the GCP service account created, to be granted access to relevant buckets. +// +// `azureServicePrincipal` optional configuration block for credential details for Azure (Legacy): func (o MetastoreDataAccessDatabricksGcpServiceAccountPtrOutput) Email() pulumi.StringPtrOutput { return o.ApplyT(func(v *MetastoreDataAccessDatabricksGcpServiceAccount) *string { if v == nil { @@ -27573,6 +27859,8 @@ func (o MetastoreDataAccessDatabricksGcpServiceAccountPtrOutput) Email() pulumi. type MetastoreDataAccessGcpServiceAccountKey struct { // The email of the GCP service account created, to be granted access to relevant buckets. + // + // `azureServicePrincipal` optional configuration block for credential details for Azure (Legacy): Email string `pulumi:"email"` PrivateKey string `pulumi:"privateKey"` PrivateKeyId string `pulumi:"privateKeyId"` @@ -27591,6 +27879,8 @@ type MetastoreDataAccessGcpServiceAccountKeyInput interface { type MetastoreDataAccessGcpServiceAccountKeyArgs struct { // The email of the GCP service account created, to be granted access to relevant buckets. + // + // `azureServicePrincipal` optional configuration block for credential details for Azure (Legacy): Email pulumi.StringInput `pulumi:"email"` PrivateKey pulumi.StringInput `pulumi:"privateKey"` PrivateKeyId pulumi.StringInput `pulumi:"privateKeyId"` @@ -27674,6 +27964,8 @@ func (o MetastoreDataAccessGcpServiceAccountKeyOutput) ToMetastoreDataAccessGcpS } // The email of the GCP service account created, to be granted access to relevant buckets. +// +// `azureServicePrincipal` optional configuration block for credential details for Azure (Legacy): func (o MetastoreDataAccessGcpServiceAccountKeyOutput) Email() pulumi.StringOutput { return o.ApplyT(func(v MetastoreDataAccessGcpServiceAccountKey) string { return v.Email }).(pulumi.StringOutput) } @@ -27711,6 +28003,8 @@ func (o MetastoreDataAccessGcpServiceAccountKeyPtrOutput) Elem() MetastoreDataAc } // The email of the GCP service account created, to be granted access to relevant buckets. +// +// `azureServicePrincipal` optional configuration block for credential details for Azure (Legacy): func (o MetastoreDataAccessGcpServiceAccountKeyPtrOutput) Email() pulumi.StringPtrOutput { return o.ApplyT(func(v *MetastoreDataAccessGcpServiceAccountKey) *string { if v == nil { @@ -27739,8 +28033,8 @@ func (o MetastoreDataAccessGcpServiceAccountKeyPtrOutput) PrivateKeyId() pulumi. } type MlflowModelTag struct { - Key string `pulumi:"key"` - Value string `pulumi:"value"` + Key *string `pulumi:"key"` + Value *string `pulumi:"value"` } // MlflowModelTagInput is an input type that accepts MlflowModelTagArgs and MlflowModelTagOutput values. @@ -27755,8 +28049,8 @@ type MlflowModelTagInput interface { } type MlflowModelTagArgs struct { - Key pulumi.StringInput `pulumi:"key"` - Value pulumi.StringInput `pulumi:"value"` + Key pulumi.StringPtrInput `pulumi:"key"` + Value pulumi.StringPtrInput `pulumi:"value"` } func (MlflowModelTagArgs) ElementType() reflect.Type { @@ -27810,12 +28104,12 @@ func (o MlflowModelTagOutput) ToMlflowModelTagOutputWithContext(ctx context.Cont return o } -func (o MlflowModelTagOutput) Key() pulumi.StringOutput { - return o.ApplyT(func(v MlflowModelTag) string { return v.Key }).(pulumi.StringOutput) +func (o MlflowModelTagOutput) Key() pulumi.StringPtrOutput { + return o.ApplyT(func(v MlflowModelTag) *string { return v.Key }).(pulumi.StringPtrOutput) } -func (o MlflowModelTagOutput) Value() pulumi.StringOutput { - return o.ApplyT(func(v MlflowModelTag) string { return v.Value }).(pulumi.StringOutput) +func (o MlflowModelTagOutput) Value() pulumi.StringPtrOutput { + return o.ApplyT(func(v MlflowModelTag) *string { return v.Value }).(pulumi.StringPtrOutput) } type MlflowModelTagArrayOutput struct{ *pulumi.OutputState } @@ -28364,8 +28658,10 @@ func (o ModelServingConfigPtrOutput) TrafficConfig() ModelServingConfigTrafficCo } type ModelServingConfigServedModel struct { - EnvironmentVars map[string]interface{} `pulumi:"environmentVars"` - InstanceProfileArn *string `pulumi:"instanceProfileArn"` + // a map of environment variable name/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`. + EnvironmentVars map[string]interface{} `pulumi:"environmentVars"` + // ARN of the instance profile that the served model will use to access AWS resources. + InstanceProfileArn *string `pulumi:"instanceProfileArn"` // The name of the model in Databricks Model Registry to be served. ModelName string `pulumi:"modelName"` // The version of the model in Databricks Model Registry to be served. @@ -28390,7 +28686,9 @@ type ModelServingConfigServedModelInput interface { } type ModelServingConfigServedModelArgs struct { - EnvironmentVars pulumi.MapInput `pulumi:"environmentVars"` + // a map of environment variable name/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`. + EnvironmentVars pulumi.MapInput `pulumi:"environmentVars"` + // ARN of the instance profile that the served model will use to access AWS resources. InstanceProfileArn pulumi.StringPtrInput `pulumi:"instanceProfileArn"` // The name of the model in Databricks Model Registry to be served. ModelName pulumi.StringInput `pulumi:"modelName"` @@ -28455,10 +28753,12 @@ func (o ModelServingConfigServedModelOutput) ToModelServingConfigServedModelOutp return o } +// a map of environment variable name/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`. func (o ModelServingConfigServedModelOutput) EnvironmentVars() pulumi.MapOutput { return o.ApplyT(func(v ModelServingConfigServedModel) map[string]interface{} { return v.EnvironmentVars }).(pulumi.MapOutput) } +// ARN of the instance profile that the served model will use to access AWS resources. func (o ModelServingConfigServedModelOutput) InstanceProfileArn() pulumi.StringPtrOutput { return o.ApplyT(func(v ModelServingConfigServedModel) *string { return v.InstanceProfileArn }).(pulumi.StringPtrOutput) } @@ -29892,6 +30192,7 @@ func (o MwsCustomerManagedKeysAwsKeyInfoPtrOutput) KeyRegion() pulumi.StringPtrO } type MwsCustomerManagedKeysGcpKeyInfo struct { + // The GCP KMS key's resource name. KmsKeyId string `pulumi:"kmsKeyId"` } @@ -29907,6 +30208,7 @@ type MwsCustomerManagedKeysGcpKeyInfoInput interface { } type MwsCustomerManagedKeysGcpKeyInfoArgs struct { + // The GCP KMS key's resource name. KmsKeyId pulumi.StringInput `pulumi:"kmsKeyId"` } @@ -29987,6 +30289,7 @@ func (o MwsCustomerManagedKeysGcpKeyInfoOutput) ToMwsCustomerManagedKeysGcpKeyIn }).(MwsCustomerManagedKeysGcpKeyInfoPtrOutput) } +// The GCP KMS key's resource name. func (o MwsCustomerManagedKeysGcpKeyInfoOutput) KmsKeyId() pulumi.StringOutput { return o.ApplyT(func(v MwsCustomerManagedKeysGcpKeyInfo) string { return v.KmsKeyId }).(pulumi.StringOutput) } @@ -30015,6 +30318,7 @@ func (o MwsCustomerManagedKeysGcpKeyInfoPtrOutput) Elem() MwsCustomerManagedKeys }).(MwsCustomerManagedKeysGcpKeyInfoOutput) } +// The GCP KMS key's resource name. func (o MwsCustomerManagedKeysGcpKeyInfoPtrOutput) KmsKeyId() pulumi.StringPtrOutput { return o.ApplyT(func(v *MwsCustomerManagedKeysGcpKeyInfo) *string { if v == nil { @@ -31474,7 +31778,8 @@ func (o MwsWorkspacesGkeConfigPtrOutput) MasterIpRange() pulumi.StringPtrOutput } type MwsWorkspacesToken struct { - Comment *string `pulumi:"comment"` + Comment *string `pulumi:"comment"` + // Token expiry lifetime. By default its 2592000 (30 days). LifetimeSeconds *int `pulumi:"lifetimeSeconds"` TokenId *string `pulumi:"tokenId"` TokenValue *string `pulumi:"tokenValue"` @@ -31492,7 +31797,8 @@ type MwsWorkspacesTokenInput interface { } type MwsWorkspacesTokenArgs struct { - Comment pulumi.StringPtrInput `pulumi:"comment"` + Comment pulumi.StringPtrInput `pulumi:"comment"` + // Token expiry lifetime. By default its 2592000 (30 days). LifetimeSeconds pulumi.IntPtrInput `pulumi:"lifetimeSeconds"` TokenId pulumi.StringPtrInput `pulumi:"tokenId"` TokenValue pulumi.StringPtrInput `pulumi:"tokenValue"` @@ -31579,6 +31885,7 @@ func (o MwsWorkspacesTokenOutput) Comment() pulumi.StringPtrOutput { return o.ApplyT(func(v MwsWorkspacesToken) *string { return v.Comment }).(pulumi.StringPtrOutput) } +// Token expiry lifetime. By default its 2592000 (30 days). func (o MwsWorkspacesTokenOutput) LifetimeSeconds() pulumi.IntPtrOutput { return o.ApplyT(func(v MwsWorkspacesToken) *int { return v.LifetimeSeconds }).(pulumi.IntPtrOutput) } @@ -31624,6 +31931,7 @@ func (o MwsWorkspacesTokenPtrOutput) Comment() pulumi.StringPtrOutput { }).(pulumi.StringPtrOutput) } +// Token expiry lifetime. By default its 2592000 (30 days). func (o MwsWorkspacesTokenPtrOutput) LifetimeSeconds() pulumi.IntPtrOutput { return o.ApplyT(func(v *MwsWorkspacesToken) *int { if v == nil { @@ -39965,8 +40273,8 @@ type SqlTableColumn struct { Name string `pulumi:"name"` // Whether field is nullable (Default: `true`) Nullable *bool `pulumi:"nullable"` - // Column type spec (with metadata) as SQL text - Type string `pulumi:"type"` + // Column type spec (with metadata) as SQL text. Not supported for `VIEW` table_type. + Type *string `pulumi:"type"` } // SqlTableColumnInput is an input type that accepts SqlTableColumnArgs and SqlTableColumnOutput values. @@ -39987,8 +40295,8 @@ type SqlTableColumnArgs struct { Name pulumi.StringInput `pulumi:"name"` // Whether field is nullable (Default: `true`) Nullable pulumi.BoolPtrInput `pulumi:"nullable"` - // Column type spec (with metadata) as SQL text - Type pulumi.StringInput `pulumi:"type"` + // Column type spec (with metadata) as SQL text. Not supported for `VIEW` table_type. + Type pulumi.StringPtrInput `pulumi:"type"` } func (SqlTableColumnArgs) ElementType() reflect.Type { @@ -40057,9 +40365,9 @@ func (o SqlTableColumnOutput) Nullable() pulumi.BoolPtrOutput { return o.ApplyT(func(v SqlTableColumn) *bool { return v.Nullable }).(pulumi.BoolPtrOutput) } -// Column type spec (with metadata) as SQL text -func (o SqlTableColumnOutput) Type() pulumi.StringOutput { - return o.ApplyT(func(v SqlTableColumn) string { return v.Type }).(pulumi.StringOutput) +// Column type spec (with metadata) as SQL text. Not supported for `VIEW` table_type. +func (o SqlTableColumnOutput) Type() pulumi.StringPtrOutput { + return o.ApplyT(func(v SqlTableColumn) *string { return v.Type }).(pulumi.StringPtrOutput) } type SqlTableColumnArrayOutput struct{ *pulumi.OutputState } @@ -59045,7 +59353,7 @@ func (o GetJobJobSettingsSettingsRunAsPtrOutput) UserName() pulumi.StringPtrOutp } type GetJobJobSettingsSettingsRunJobTask struct { - JobId string `pulumi:"jobId"` + JobId int `pulumi:"jobId"` JobParameters map[string]interface{} `pulumi:"jobParameters"` } @@ -59061,8 +59369,8 @@ type GetJobJobSettingsSettingsRunJobTaskInput interface { } type GetJobJobSettingsSettingsRunJobTaskArgs struct { - JobId pulumi.StringInput `pulumi:"jobId"` - JobParameters pulumi.MapInput `pulumi:"jobParameters"` + JobId pulumi.IntInput `pulumi:"jobId"` + JobParameters pulumi.MapInput `pulumi:"jobParameters"` } func (GetJobJobSettingsSettingsRunJobTaskArgs) ElementType() reflect.Type { @@ -59142,8 +59450,8 @@ func (o GetJobJobSettingsSettingsRunJobTaskOutput) ToGetJobJobSettingsSettingsRu }).(GetJobJobSettingsSettingsRunJobTaskPtrOutput) } -func (o GetJobJobSettingsSettingsRunJobTaskOutput) JobId() pulumi.StringOutput { - return o.ApplyT(func(v GetJobJobSettingsSettingsRunJobTask) string { return v.JobId }).(pulumi.StringOutput) +func (o GetJobJobSettingsSettingsRunJobTaskOutput) JobId() pulumi.IntOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsRunJobTask) int { return v.JobId }).(pulumi.IntOutput) } func (o GetJobJobSettingsSettingsRunJobTaskOutput) JobParameters() pulumi.MapOutput { @@ -59174,13 +59482,13 @@ func (o GetJobJobSettingsSettingsRunJobTaskPtrOutput) Elem() GetJobJobSettingsSe }).(GetJobJobSettingsSettingsRunJobTaskOutput) } -func (o GetJobJobSettingsSettingsRunJobTaskPtrOutput) JobId() pulumi.StringPtrOutput { - return o.ApplyT(func(v *GetJobJobSettingsSettingsRunJobTask) *string { +func (o GetJobJobSettingsSettingsRunJobTaskPtrOutput) JobId() pulumi.IntPtrOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsRunJobTask) *int { if v == nil { return nil } return &v.JobId - }).(pulumi.StringPtrOutput) + }).(pulumi.IntPtrOutput) } func (o GetJobJobSettingsSettingsRunJobTaskPtrOutput) JobParameters() pulumi.MapOutput { @@ -65857,7 +66165,7 @@ func (o GetJobJobSettingsSettingsTaskPythonWheelTaskPtrOutput) Parameters() pulu } type GetJobJobSettingsSettingsTaskRunJobTask struct { - JobId string `pulumi:"jobId"` + JobId int `pulumi:"jobId"` JobParameters map[string]interface{} `pulumi:"jobParameters"` } @@ -65873,8 +66181,8 @@ type GetJobJobSettingsSettingsTaskRunJobTaskInput interface { } type GetJobJobSettingsSettingsTaskRunJobTaskArgs struct { - JobId pulumi.StringInput `pulumi:"jobId"` - JobParameters pulumi.MapInput `pulumi:"jobParameters"` + JobId pulumi.IntInput `pulumi:"jobId"` + JobParameters pulumi.MapInput `pulumi:"jobParameters"` } func (GetJobJobSettingsSettingsTaskRunJobTaskArgs) ElementType() reflect.Type { @@ -65954,8 +66262,8 @@ func (o GetJobJobSettingsSettingsTaskRunJobTaskOutput) ToGetJobJobSettingsSettin }).(GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput) } -func (o GetJobJobSettingsSettingsTaskRunJobTaskOutput) JobId() pulumi.StringOutput { - return o.ApplyT(func(v GetJobJobSettingsSettingsTaskRunJobTask) string { return v.JobId }).(pulumi.StringOutput) +func (o GetJobJobSettingsSettingsTaskRunJobTaskOutput) JobId() pulumi.IntOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsTaskRunJobTask) int { return v.JobId }).(pulumi.IntOutput) } func (o GetJobJobSettingsSettingsTaskRunJobTaskOutput) JobParameters() pulumi.MapOutput { @@ -65986,13 +66294,13 @@ func (o GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput) Elem() GetJobJobSettin }).(GetJobJobSettingsSettingsTaskRunJobTaskOutput) } -func (o GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput) JobId() pulumi.StringPtrOutput { - return o.ApplyT(func(v *GetJobJobSettingsSettingsTaskRunJobTask) *string { +func (o GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput) JobId() pulumi.IntPtrOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsTaskRunJobTask) *int { if v == nil { return nil } return &v.JobId - }).(pulumi.StringPtrOutput) + }).(pulumi.IntPtrOutput) } func (o GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput) JobParameters() pulumi.MapOutput { @@ -69881,6 +70189,10 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*ClusterWorkloadTypePtrInput)(nil)).Elem(), ClusterWorkloadTypeArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*ClusterWorkloadTypeClientsInput)(nil)).Elem(), ClusterWorkloadTypeClientsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*ClusterWorkloadTypeClientsPtrInput)(nil)).Elem(), ClusterWorkloadTypeClientsArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*ExternalLocationEncryptionDetailsInput)(nil)).Elem(), ExternalLocationEncryptionDetailsArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*ExternalLocationEncryptionDetailsPtrInput)(nil)).Elem(), ExternalLocationEncryptionDetailsArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*ExternalLocationEncryptionDetailsSseEncryptionDetailsInput)(nil)).Elem(), ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrInput)(nil)).Elem(), ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GrantsGrantInput)(nil)).Elem(), GrantsGrantArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GrantsGrantArrayInput)(nil)).Elem(), GrantsGrantArray{}) pulumi.RegisterInputType(reflect.TypeOf((*InstancePoolAwsAttributesInput)(nil)).Elem(), InstancePoolAwsAttributesArgs{}) @@ -70724,6 +71036,10 @@ func init() { pulumi.RegisterOutputType(ClusterWorkloadTypePtrOutput{}) pulumi.RegisterOutputType(ClusterWorkloadTypeClientsOutput{}) pulumi.RegisterOutputType(ClusterWorkloadTypeClientsPtrOutput{}) + pulumi.RegisterOutputType(ExternalLocationEncryptionDetailsOutput{}) + pulumi.RegisterOutputType(ExternalLocationEncryptionDetailsPtrOutput{}) + pulumi.RegisterOutputType(ExternalLocationEncryptionDetailsSseEncryptionDetailsOutput{}) + pulumi.RegisterOutputType(ExternalLocationEncryptionDetailsSseEncryptionDetailsPtrOutput{}) pulumi.RegisterOutputType(GrantsGrantOutput{}) pulumi.RegisterOutputType(GrantsGrantArrayOutput{}) pulumi.RegisterOutputType(InstancePoolAwsAttributesOutput{}) diff --git a/sdk/go/databricks/share.go b/sdk/go/databricks/share.go index ff42d89f..58fb9eed 100644 --- a/sdk/go/databricks/share.go +++ b/sdk/go/databricks/share.go @@ -21,6 +21,8 @@ type Share struct { // Name of share. Change forces creation of a new resource. Name pulumi.StringOutput `pulumi:"name"` Objects ShareObjectArrayOutput `pulumi:"objects"` + // User name/group name/sp applicationId of the share owner. + Owner pulumi.StringPtrOutput `pulumi:"owner"` } // NewShare registers a new resource with the given unique name, arguments, and options. @@ -60,6 +62,8 @@ type shareState struct { // Name of share. Change forces creation of a new resource. Name *string `pulumi:"name"` Objects []ShareObject `pulumi:"objects"` + // User name/group name/sp applicationId of the share owner. + Owner *string `pulumi:"owner"` } type ShareState struct { @@ -70,6 +74,8 @@ type ShareState struct { // Name of share. Change forces creation of a new resource. Name pulumi.StringPtrInput Objects ShareObjectArrayInput + // User name/group name/sp applicationId of the share owner. + Owner pulumi.StringPtrInput } func (ShareState) ElementType() reflect.Type { @@ -84,6 +90,8 @@ type shareArgs struct { // Name of share. Change forces creation of a new resource. Name *string `pulumi:"name"` Objects []ShareObject `pulumi:"objects"` + // User name/group name/sp applicationId of the share owner. + Owner *string `pulumi:"owner"` } // The set of arguments for constructing a Share resource. @@ -95,6 +103,8 @@ type ShareArgs struct { // Name of share. Change forces creation of a new resource. Name pulumi.StringPtrInput Objects ShareObjectArrayInput + // User name/group name/sp applicationId of the share owner. + Owner pulumi.StringPtrInput } func (ShareArgs) ElementType() reflect.Type { @@ -203,6 +213,11 @@ func (o ShareOutput) Objects() ShareObjectArrayOutput { return o.ApplyT(func(v *Share) ShareObjectArrayOutput { return v.Objects }).(ShareObjectArrayOutput) } +// User name/group name/sp applicationId of the share owner. +func (o ShareOutput) Owner() pulumi.StringPtrOutput { + return o.ApplyT(func(v *Share) pulumi.StringPtrOutput { return v.Owner }).(pulumi.StringPtrOutput) +} + type ShareArrayOutput struct{ *pulumi.OutputState } func (ShareArrayOutput) ElementType() reflect.Type { diff --git a/sdk/go/databricks/sqlAlert.go b/sdk/go/databricks/sqlAlert.go index e36b7523..77ed635a 100644 --- a/sdk/go/databricks/sqlAlert.go +++ b/sdk/go/databricks/sqlAlert.go @@ -27,6 +27,7 @@ import ( type SqlAlert struct { pulumi.CustomResourceState + CreatedAt pulumi.StringOutput `pulumi:"createdAt"` // Name of the alert. Name pulumi.StringOutput `pulumi:"name"` // Alert configuration options. @@ -36,7 +37,8 @@ type SqlAlert struct { // ID of the query evaluated by the alert. QueryId pulumi.StringOutput `pulumi:"queryId"` // Number of seconds after being triggered before the alert rearms itself and can be triggered again. If not defined, alert will never be triggered again. - Rearm pulumi.IntPtrOutput `pulumi:"rearm"` + Rearm pulumi.IntPtrOutput `pulumi:"rearm"` + UpdatedAt pulumi.StringOutput `pulumi:"updatedAt"` } // NewSqlAlert registers a new resource with the given unique name, arguments, and options. @@ -75,6 +77,7 @@ func GetSqlAlert(ctx *pulumi.Context, // Input properties used for looking up and filtering SqlAlert resources. type sqlAlertState struct { + CreatedAt *string `pulumi:"createdAt"` // Name of the alert. Name *string `pulumi:"name"` // Alert configuration options. @@ -84,10 +87,12 @@ type sqlAlertState struct { // ID of the query evaluated by the alert. QueryId *string `pulumi:"queryId"` // Number of seconds after being triggered before the alert rearms itself and can be triggered again. If not defined, alert will never be triggered again. - Rearm *int `pulumi:"rearm"` + Rearm *int `pulumi:"rearm"` + UpdatedAt *string `pulumi:"updatedAt"` } type SqlAlertState struct { + CreatedAt pulumi.StringPtrInput // Name of the alert. Name pulumi.StringPtrInput // Alert configuration options. @@ -97,7 +102,8 @@ type SqlAlertState struct { // ID of the query evaluated by the alert. QueryId pulumi.StringPtrInput // Number of seconds after being triggered before the alert rearms itself and can be triggered again. If not defined, alert will never be triggered again. - Rearm pulumi.IntPtrInput + Rearm pulumi.IntPtrInput + UpdatedAt pulumi.StringPtrInput } func (SqlAlertState) ElementType() reflect.Type { @@ -105,6 +111,7 @@ func (SqlAlertState) ElementType() reflect.Type { } type sqlAlertArgs struct { + CreatedAt *string `pulumi:"createdAt"` // Name of the alert. Name *string `pulumi:"name"` // Alert configuration options. @@ -114,11 +121,13 @@ type sqlAlertArgs struct { // ID of the query evaluated by the alert. QueryId string `pulumi:"queryId"` // Number of seconds after being triggered before the alert rearms itself and can be triggered again. If not defined, alert will never be triggered again. - Rearm *int `pulumi:"rearm"` + Rearm *int `pulumi:"rearm"` + UpdatedAt *string `pulumi:"updatedAt"` } // The set of arguments for constructing a SqlAlert resource. type SqlAlertArgs struct { + CreatedAt pulumi.StringPtrInput // Name of the alert. Name pulumi.StringPtrInput // Alert configuration options. @@ -128,7 +137,8 @@ type SqlAlertArgs struct { // ID of the query evaluated by the alert. QueryId pulumi.StringInput // Number of seconds after being triggered before the alert rearms itself and can be triggered again. If not defined, alert will never be triggered again. - Rearm pulumi.IntPtrInput + Rearm pulumi.IntPtrInput + UpdatedAt pulumi.StringPtrInput } func (SqlAlertArgs) ElementType() reflect.Type { @@ -218,6 +228,10 @@ func (o SqlAlertOutput) ToSqlAlertOutputWithContext(ctx context.Context) SqlAler return o } +func (o SqlAlertOutput) CreatedAt() pulumi.StringOutput { + return o.ApplyT(func(v *SqlAlert) pulumi.StringOutput { return v.CreatedAt }).(pulumi.StringOutput) +} + // Name of the alert. func (o SqlAlertOutput) Name() pulumi.StringOutput { return o.ApplyT(func(v *SqlAlert) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput) @@ -243,6 +257,10 @@ func (o SqlAlertOutput) Rearm() pulumi.IntPtrOutput { return o.ApplyT(func(v *SqlAlert) pulumi.IntPtrOutput { return v.Rearm }).(pulumi.IntPtrOutput) } +func (o SqlAlertOutput) UpdatedAt() pulumi.StringOutput { + return o.ApplyT(func(v *SqlAlert) pulumi.StringOutput { return v.UpdatedAt }).(pulumi.StringOutput) +} + type SqlAlertArrayOutput struct{ *pulumi.OutputState } func (SqlAlertArrayOutput) ElementType() reflect.Type { diff --git a/sdk/go/databricks/sqlDashboard.go b/sdk/go/databricks/sqlDashboard.go index ae19548d..06d8571c 100644 --- a/sdk/go/databricks/sqlDashboard.go +++ b/sdk/go/databricks/sqlDashboard.go @@ -109,9 +109,11 @@ import ( type SqlDashboard struct { pulumi.CustomResourceState - Name pulumi.StringOutput `pulumi:"name"` - Parent pulumi.StringPtrOutput `pulumi:"parent"` - Tags pulumi.StringArrayOutput `pulumi:"tags"` + CreatedAt pulumi.StringOutput `pulumi:"createdAt"` + Name pulumi.StringOutput `pulumi:"name"` + Parent pulumi.StringPtrOutput `pulumi:"parent"` + Tags pulumi.StringArrayOutput `pulumi:"tags"` + UpdatedAt pulumi.StringOutput `pulumi:"updatedAt"` } // NewSqlDashboard registers a new resource with the given unique name, arguments, and options. @@ -144,15 +146,19 @@ func GetSqlDashboard(ctx *pulumi.Context, // Input properties used for looking up and filtering SqlDashboard resources. type sqlDashboardState struct { - Name *string `pulumi:"name"` - Parent *string `pulumi:"parent"` - Tags []string `pulumi:"tags"` + CreatedAt *string `pulumi:"createdAt"` + Name *string `pulumi:"name"` + Parent *string `pulumi:"parent"` + Tags []string `pulumi:"tags"` + UpdatedAt *string `pulumi:"updatedAt"` } type SqlDashboardState struct { - Name pulumi.StringPtrInput - Parent pulumi.StringPtrInput - Tags pulumi.StringArrayInput + CreatedAt pulumi.StringPtrInput + Name pulumi.StringPtrInput + Parent pulumi.StringPtrInput + Tags pulumi.StringArrayInput + UpdatedAt pulumi.StringPtrInput } func (SqlDashboardState) ElementType() reflect.Type { @@ -160,16 +166,20 @@ func (SqlDashboardState) ElementType() reflect.Type { } type sqlDashboardArgs struct { - Name *string `pulumi:"name"` - Parent *string `pulumi:"parent"` - Tags []string `pulumi:"tags"` + CreatedAt *string `pulumi:"createdAt"` + Name *string `pulumi:"name"` + Parent *string `pulumi:"parent"` + Tags []string `pulumi:"tags"` + UpdatedAt *string `pulumi:"updatedAt"` } // The set of arguments for constructing a SqlDashboard resource. type SqlDashboardArgs struct { - Name pulumi.StringPtrInput - Parent pulumi.StringPtrInput - Tags pulumi.StringArrayInput + CreatedAt pulumi.StringPtrInput + Name pulumi.StringPtrInput + Parent pulumi.StringPtrInput + Tags pulumi.StringArrayInput + UpdatedAt pulumi.StringPtrInput } func (SqlDashboardArgs) ElementType() reflect.Type { @@ -259,6 +269,10 @@ func (o SqlDashboardOutput) ToSqlDashboardOutputWithContext(ctx context.Context) return o } +func (o SqlDashboardOutput) CreatedAt() pulumi.StringOutput { + return o.ApplyT(func(v *SqlDashboard) pulumi.StringOutput { return v.CreatedAt }).(pulumi.StringOutput) +} + func (o SqlDashboardOutput) Name() pulumi.StringOutput { return o.ApplyT(func(v *SqlDashboard) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput) } @@ -271,6 +285,10 @@ func (o SqlDashboardOutput) Tags() pulumi.StringArrayOutput { return o.ApplyT(func(v *SqlDashboard) pulumi.StringArrayOutput { return v.Tags }).(pulumi.StringArrayOutput) } +func (o SqlDashboardOutput) UpdatedAt() pulumi.StringOutput { + return o.ApplyT(func(v *SqlDashboard) pulumi.StringOutput { return v.UpdatedAt }).(pulumi.StringOutput) +} + type SqlDashboardArrayOutput struct{ *pulumi.OutputState } func (SqlDashboardArrayOutput) ElementType() reflect.Type { diff --git a/sdk/go/databricks/sqlQuery.go b/sdk/go/databricks/sqlQuery.go index 20b80e1f..faa54ca0 100644 --- a/sdk/go/databricks/sqlQuery.go +++ b/sdk/go/databricks/sqlQuery.go @@ -158,6 +158,7 @@ import ( type SqlQuery struct { pulumi.CustomResourceState + CreatedAt pulumi.StringOutput `pulumi:"createdAt"` DataSourceId pulumi.StringOutput `pulumi:"dataSourceId"` Description pulumi.StringPtrOutput `pulumi:"description"` Name pulumi.StringOutput `pulumi:"name"` @@ -166,8 +167,9 @@ type SqlQuery struct { Query pulumi.StringOutput `pulumi:"query"` RunAsRole pulumi.StringPtrOutput `pulumi:"runAsRole"` // Deprecated: Operations on `databricks_sql_query` schedules are deprecated. Please use `databricks_job` resource to schedule a `sql_task`. - Schedule SqlQuerySchedulePtrOutput `pulumi:"schedule"` - Tags pulumi.StringArrayOutput `pulumi:"tags"` + Schedule SqlQuerySchedulePtrOutput `pulumi:"schedule"` + Tags pulumi.StringArrayOutput `pulumi:"tags"` + UpdatedAt pulumi.StringOutput `pulumi:"updatedAt"` } // NewSqlQuery registers a new resource with the given unique name, arguments, and options. @@ -206,6 +208,7 @@ func GetSqlQuery(ctx *pulumi.Context, // Input properties used for looking up and filtering SqlQuery resources. type sqlQueryState struct { + CreatedAt *string `pulumi:"createdAt"` DataSourceId *string `pulumi:"dataSourceId"` Description *string `pulumi:"description"` Name *string `pulumi:"name"` @@ -214,11 +217,13 @@ type sqlQueryState struct { Query *string `pulumi:"query"` RunAsRole *string `pulumi:"runAsRole"` // Deprecated: Operations on `databricks_sql_query` schedules are deprecated. Please use `databricks_job` resource to schedule a `sql_task`. - Schedule *SqlQuerySchedule `pulumi:"schedule"` - Tags []string `pulumi:"tags"` + Schedule *SqlQuerySchedule `pulumi:"schedule"` + Tags []string `pulumi:"tags"` + UpdatedAt *string `pulumi:"updatedAt"` } type SqlQueryState struct { + CreatedAt pulumi.StringPtrInput DataSourceId pulumi.StringPtrInput Description pulumi.StringPtrInput Name pulumi.StringPtrInput @@ -227,8 +232,9 @@ type SqlQueryState struct { Query pulumi.StringPtrInput RunAsRole pulumi.StringPtrInput // Deprecated: Operations on `databricks_sql_query` schedules are deprecated. Please use `databricks_job` resource to schedule a `sql_task`. - Schedule SqlQuerySchedulePtrInput - Tags pulumi.StringArrayInput + Schedule SqlQuerySchedulePtrInput + Tags pulumi.StringArrayInput + UpdatedAt pulumi.StringPtrInput } func (SqlQueryState) ElementType() reflect.Type { @@ -236,6 +242,7 @@ func (SqlQueryState) ElementType() reflect.Type { } type sqlQueryArgs struct { + CreatedAt *string `pulumi:"createdAt"` DataSourceId string `pulumi:"dataSourceId"` Description *string `pulumi:"description"` Name *string `pulumi:"name"` @@ -244,12 +251,14 @@ type sqlQueryArgs struct { Query string `pulumi:"query"` RunAsRole *string `pulumi:"runAsRole"` // Deprecated: Operations on `databricks_sql_query` schedules are deprecated. Please use `databricks_job` resource to schedule a `sql_task`. - Schedule *SqlQuerySchedule `pulumi:"schedule"` - Tags []string `pulumi:"tags"` + Schedule *SqlQuerySchedule `pulumi:"schedule"` + Tags []string `pulumi:"tags"` + UpdatedAt *string `pulumi:"updatedAt"` } // The set of arguments for constructing a SqlQuery resource. type SqlQueryArgs struct { + CreatedAt pulumi.StringPtrInput DataSourceId pulumi.StringInput Description pulumi.StringPtrInput Name pulumi.StringPtrInput @@ -258,8 +267,9 @@ type SqlQueryArgs struct { Query pulumi.StringInput RunAsRole pulumi.StringPtrInput // Deprecated: Operations on `databricks_sql_query` schedules are deprecated. Please use `databricks_job` resource to schedule a `sql_task`. - Schedule SqlQuerySchedulePtrInput - Tags pulumi.StringArrayInput + Schedule SqlQuerySchedulePtrInput + Tags pulumi.StringArrayInput + UpdatedAt pulumi.StringPtrInput } func (SqlQueryArgs) ElementType() reflect.Type { @@ -349,6 +359,10 @@ func (o SqlQueryOutput) ToSqlQueryOutputWithContext(ctx context.Context) SqlQuer return o } +func (o SqlQueryOutput) CreatedAt() pulumi.StringOutput { + return o.ApplyT(func(v *SqlQuery) pulumi.StringOutput { return v.CreatedAt }).(pulumi.StringOutput) +} + func (o SqlQueryOutput) DataSourceId() pulumi.StringOutput { return o.ApplyT(func(v *SqlQuery) pulumi.StringOutput { return v.DataSourceId }).(pulumi.StringOutput) } @@ -386,6 +400,10 @@ func (o SqlQueryOutput) Tags() pulumi.StringArrayOutput { return o.ApplyT(func(v *SqlQuery) pulumi.StringArrayOutput { return v.Tags }).(pulumi.StringArrayOutput) } +func (o SqlQueryOutput) UpdatedAt() pulumi.StringOutput { + return o.ApplyT(func(v *SqlQuery) pulumi.StringOutput { return v.UpdatedAt }).(pulumi.StringOutput) +} + type SqlQueryArrayOutput struct{ *pulumi.OutputState } func (SqlQueryArrayOutput) ElementType() reflect.Type { diff --git a/sdk/go/databricks/storageCredential.go b/sdk/go/databricks/storageCredential.go index 425cff5b..1ff1af36 100644 --- a/sdk/go/databricks/storageCredential.go +++ b/sdk/go/databricks/storageCredential.go @@ -161,6 +161,7 @@ type StorageCredential struct { AzureServicePrincipal StorageCredentialAzureServicePrincipalPtrOutput `pulumi:"azureServicePrincipal"` Comment pulumi.StringPtrOutput `pulumi:"comment"` DatabricksGcpServiceAccount StorageCredentialDatabricksGcpServiceAccountOutput `pulumi:"databricksGcpServiceAccount"` + ForceDestroy pulumi.BoolPtrOutput `pulumi:"forceDestroy"` GcpServiceAccountKey StorageCredentialGcpServiceAccountKeyPtrOutput `pulumi:"gcpServiceAccountKey"` MetastoreId pulumi.StringOutput `pulumi:"metastoreId"` // Name of Storage Credentials, which must be unique within the databricks_metastore. Change forces creation of a new resource. @@ -208,6 +209,7 @@ type storageCredentialState struct { AzureServicePrincipal *StorageCredentialAzureServicePrincipal `pulumi:"azureServicePrincipal"` Comment *string `pulumi:"comment"` DatabricksGcpServiceAccount *StorageCredentialDatabricksGcpServiceAccount `pulumi:"databricksGcpServiceAccount"` + ForceDestroy *bool `pulumi:"forceDestroy"` GcpServiceAccountKey *StorageCredentialGcpServiceAccountKey `pulumi:"gcpServiceAccountKey"` MetastoreId *string `pulumi:"metastoreId"` // Name of Storage Credentials, which must be unique within the databricks_metastore. Change forces creation of a new resource. @@ -226,6 +228,7 @@ type StorageCredentialState struct { AzureServicePrincipal StorageCredentialAzureServicePrincipalPtrInput Comment pulumi.StringPtrInput DatabricksGcpServiceAccount StorageCredentialDatabricksGcpServiceAccountPtrInput + ForceDestroy pulumi.BoolPtrInput GcpServiceAccountKey StorageCredentialGcpServiceAccountKeyPtrInput MetastoreId pulumi.StringPtrInput // Name of Storage Credentials, which must be unique within the databricks_metastore. Change forces creation of a new resource. @@ -248,6 +251,7 @@ type storageCredentialArgs struct { AzureServicePrincipal *StorageCredentialAzureServicePrincipal `pulumi:"azureServicePrincipal"` Comment *string `pulumi:"comment"` DatabricksGcpServiceAccount *StorageCredentialDatabricksGcpServiceAccount `pulumi:"databricksGcpServiceAccount"` + ForceDestroy *bool `pulumi:"forceDestroy"` GcpServiceAccountKey *StorageCredentialGcpServiceAccountKey `pulumi:"gcpServiceAccountKey"` MetastoreId *string `pulumi:"metastoreId"` // Name of Storage Credentials, which must be unique within the databricks_metastore. Change forces creation of a new resource. @@ -267,6 +271,7 @@ type StorageCredentialArgs struct { AzureServicePrincipal StorageCredentialAzureServicePrincipalPtrInput Comment pulumi.StringPtrInput DatabricksGcpServiceAccount StorageCredentialDatabricksGcpServiceAccountPtrInput + ForceDestroy pulumi.BoolPtrInput GcpServiceAccountKey StorageCredentialGcpServiceAccountKeyPtrInput MetastoreId pulumi.StringPtrInput // Name of Storage Credentials, which must be unique within the databricks_metastore. Change forces creation of a new resource. @@ -392,6 +397,10 @@ func (o StorageCredentialOutput) DatabricksGcpServiceAccount() StorageCredential }).(StorageCredentialDatabricksGcpServiceAccountOutput) } +func (o StorageCredentialOutput) ForceDestroy() pulumi.BoolPtrOutput { + return o.ApplyT(func(v *StorageCredential) pulumi.BoolPtrOutput { return v.ForceDestroy }).(pulumi.BoolPtrOutput) +} + func (o StorageCredentialOutput) GcpServiceAccountKey() StorageCredentialGcpServiceAccountKeyPtrOutput { return o.ApplyT(func(v *StorageCredential) StorageCredentialGcpServiceAccountKeyPtrOutput { return v.GcpServiceAccountKey diff --git a/sdk/java/src/main/java/com/pulumi/databricks/AccessControlRuleSet.java b/sdk/java/src/main/java/com/pulumi/databricks/AccessControlRuleSet.java index 950d85a7..98700398 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/AccessControlRuleSet.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/AccessControlRuleSet.java @@ -19,10 +19,299 @@ /** * This resource allows you to manage access rules on Databricks account level resources. For convenience we allow accessing this resource through the Databricks account and workspace. * - * > **Note** Currently, we only support managing access rules on service principal resources through `databricks.AccessControlRuleSet`. + * > **Note** Currently, we only support managing access rules on service principal, group and account resources through `databricks.AccessControlRuleSet`. * * > **Warning** `databricks.AccessControlRuleSet` cannot be used to manage access rules for resources supported by databricks_permissions. Refer to its documentation for more information. * + * ## Service principal rule set usage + * + * Through a Databricks workspace: + * ```java + * package generated_program; + * + * import com.pulumi.Context; + * import com.pulumi.Pulumi; + * import com.pulumi.core.Output; + * import com.pulumi.databricks.DatabricksFunctions; + * import com.pulumi.databricks.inputs.GetGroupArgs; + * import com.pulumi.databricks.ServicePrincipal; + * import com.pulumi.databricks.ServicePrincipalArgs; + * import com.pulumi.databricks.AccessControlRuleSet; + * import com.pulumi.databricks.AccessControlRuleSetArgs; + * import com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs; + * import java.util.List; + * import java.util.ArrayList; + * import java.util.Map; + * import java.io.File; + * import java.nio.file.Files; + * import java.nio.file.Paths; + * + * public class App { + * public static void main(String[] args) { + * Pulumi.run(App::stack); + * } + * + * public static void stack(Context ctx) { + * final var accountId = "00000000-0000-0000-0000-000000000000"; + * + * final var ds = DatabricksFunctions.getGroup(GetGroupArgs.builder() + * .displayName("Data Science") + * .build()); + * + * var automationSp = new ServicePrincipal("automationSp", ServicePrincipalArgs.builder() + * .displayName("SP_FOR_AUTOMATION") + * .build()); + * + * var automationSpRuleSet = new AccessControlRuleSet("automationSpRuleSet", AccessControlRuleSetArgs.builder() + * .grantRules(AccessControlRuleSetGrantRuleArgs.builder() + * .principals(ds.applyValue(getGroupResult -> getGroupResult.aclPrincipalId())) + * .role("roles/servicePrincipal.user") + * .build()) + * .build()); + * + * } + * } + * ``` + * + * Through AWS Databricks account: + * ```java + * package generated_program; + * + * import com.pulumi.Context; + * import com.pulumi.Pulumi; + * import com.pulumi.core.Output; + * import com.pulumi.databricks.Group; + * import com.pulumi.databricks.ServicePrincipal; + * import com.pulumi.databricks.ServicePrincipalArgs; + * import com.pulumi.databricks.AccessControlRuleSet; + * import com.pulumi.databricks.AccessControlRuleSetArgs; + * import com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs; + * import java.util.List; + * import java.util.ArrayList; + * import java.util.Map; + * import java.io.File; + * import java.nio.file.Files; + * import java.nio.file.Paths; + * + * public class App { + * public static void main(String[] args) { + * Pulumi.run(App::stack); + * } + * + * public static void stack(Context ctx) { + * final var accountId = "00000000-0000-0000-0000-000000000000"; + * + * var ds = new Group("ds"); + * + * var automationSp = new ServicePrincipal("automationSp", ServicePrincipalArgs.builder() + * .displayName("SP_FOR_AUTOMATION") + * .build()); + * + * var automationSpRuleSet = new AccessControlRuleSet("automationSpRuleSet", AccessControlRuleSetArgs.builder() + * .grantRules(AccessControlRuleSetGrantRuleArgs.builder() + * .principals(ds.aclPrincipalId()) + * .role("roles/servicePrincipal.user") + * .build()) + * .build()); + * + * } + * } + * ``` + * + * Through Azure Databricks account: + * ```java + * package generated_program; + * + * import com.pulumi.Context; + * import com.pulumi.Pulumi; + * import com.pulumi.core.Output; + * import com.pulumi.databricks.Group; + * import com.pulumi.databricks.ServicePrincipal; + * import com.pulumi.databricks.ServicePrincipalArgs; + * import com.pulumi.databricks.AccessControlRuleSet; + * import com.pulumi.databricks.AccessControlRuleSetArgs; + * import com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs; + * import java.util.List; + * import java.util.ArrayList; + * import java.util.Map; + * import java.io.File; + * import java.nio.file.Files; + * import java.nio.file.Paths; + * + * public class App { + * public static void main(String[] args) { + * Pulumi.run(App::stack); + * } + * + * public static void stack(Context ctx) { + * final var accountId = "00000000-0000-0000-0000-000000000000"; + * + * var ds = new Group("ds"); + * + * var automationSp = new ServicePrincipal("automationSp", ServicePrincipalArgs.builder() + * .applicationId("00000000-0000-0000-0000-000000000000") + * .displayName("SP_FOR_AUTOMATION") + * .build()); + * + * var automationSpRuleSet = new AccessControlRuleSet("automationSpRuleSet", AccessControlRuleSetArgs.builder() + * .grantRules(AccessControlRuleSetGrantRuleArgs.builder() + * .principals(ds.aclPrincipalId()) + * .role("roles/servicePrincipal.user") + * .build()) + * .build()); + * + * } + * } + * ``` + * + * Through GCP Databricks account: + * ```java + * package generated_program; + * + * import com.pulumi.Context; + * import com.pulumi.Pulumi; + * import com.pulumi.core.Output; + * import com.pulumi.databricks.Group; + * import com.pulumi.databricks.ServicePrincipal; + * import com.pulumi.databricks.ServicePrincipalArgs; + * import com.pulumi.databricks.AccessControlRuleSet; + * import com.pulumi.databricks.AccessControlRuleSetArgs; + * import com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs; + * import java.util.List; + * import java.util.ArrayList; + * import java.util.Map; + * import java.io.File; + * import java.nio.file.Files; + * import java.nio.file.Paths; + * + * public class App { + * public static void main(String[] args) { + * Pulumi.run(App::stack); + * } + * + * public static void stack(Context ctx) { + * final var accountId = "00000000-0000-0000-0000-000000000000"; + * + * var ds = new Group("ds"); + * + * var automationSp = new ServicePrincipal("automationSp", ServicePrincipalArgs.builder() + * .displayName("SP_FOR_AUTOMATION") + * .build()); + * + * var automationSpRuleSet = new AccessControlRuleSet("automationSpRuleSet", AccessControlRuleSetArgs.builder() + * .grantRules(AccessControlRuleSetGrantRuleArgs.builder() + * .principals(ds.aclPrincipalId()) + * .role("roles/servicePrincipal.user") + * .build()) + * .build()); + * + * } + * } + * ``` + * + * ## Group rule set usage + * + * Refer to the appropriate provider configuration as shown in the examples for service principal rule set. + * ```java + * package generated_program; + * + * import com.pulumi.Context; + * import com.pulumi.Pulumi; + * import com.pulumi.core.Output; + * import com.pulumi.databricks.DatabricksFunctions; + * import com.pulumi.databricks.inputs.GetGroupArgs; + * import com.pulumi.databricks.inputs.GetUserArgs; + * import com.pulumi.databricks.AccessControlRuleSet; + * import com.pulumi.databricks.AccessControlRuleSetArgs; + * import com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs; + * import java.util.List; + * import java.util.ArrayList; + * import java.util.Map; + * import java.io.File; + * import java.nio.file.Files; + * import java.nio.file.Paths; + * + * public class App { + * public static void main(String[] args) { + * Pulumi.run(App::stack); + * } + * + * public static void stack(Context ctx) { + * final var accountId = "00000000-0000-0000-0000-000000000000"; + * + * final var ds = DatabricksFunctions.getGroup(GetGroupArgs.builder() + * .displayName("Data Science") + * .build()); + * + * final var john = DatabricksFunctions.getUser(GetUserArgs.builder() + * .userName("john.doe@example.com") + * .build()); + * + * var dsGroupRuleSet = new AccessControlRuleSet("dsGroupRuleSet", AccessControlRuleSetArgs.builder() + * .grantRules(AccessControlRuleSetGrantRuleArgs.builder() + * .principals(john.applyValue(getUserResult -> getUserResult.aclPrincipalId())) + * .role("roles/group.manager") + * .build()) + * .build()); + * + * } + * } + * ``` + * + * ## Account rule set usage + * + * Refer to the appropriate provider configuration as shown in the examples for service principal rule set. + * ```java + * package generated_program; + * + * import com.pulumi.Context; + * import com.pulumi.Pulumi; + * import com.pulumi.core.Output; + * import com.pulumi.databricks.DatabricksFunctions; + * import com.pulumi.databricks.inputs.GetGroupArgs; + * import com.pulumi.databricks.inputs.GetUserArgs; + * import com.pulumi.databricks.AccessControlRuleSet; + * import com.pulumi.databricks.AccessControlRuleSetArgs; + * import com.pulumi.databricks.inputs.AccessControlRuleSetGrantRuleArgs; + * import java.util.List; + * import java.util.ArrayList; + * import java.util.Map; + * import java.io.File; + * import java.nio.file.Files; + * import java.nio.file.Paths; + * + * public class App { + * public static void main(String[] args) { + * Pulumi.run(App::stack); + * } + * + * public static void stack(Context ctx) { + * final var accountId = "00000000-0000-0000-0000-000000000000"; + * + * final var ds = DatabricksFunctions.getGroup(GetGroupArgs.builder() + * .displayName("Data Science") + * .build()); + * + * final var john = DatabricksFunctions.getUser(GetUserArgs.builder() + * .userName("john.doe@example.com") + * .build()); + * + * var accountRuleSet = new AccessControlRuleSet("accountRuleSet", AccessControlRuleSetArgs.builder() + * .grantRules( + * AccessControlRuleSetGrantRuleArgs.builder() + * .principals(john.applyValue(getUserResult -> getUserResult.aclPrincipalId())) + * .role("roles/group.manager") + * .build(), + * AccessControlRuleSetGrantRuleArgs.builder() + * .principals(data.databricks_user().ds().acl_principal_id()) + * .role("roles/servicePrincipal.manager") + * .build()) + * .build()); + * + * } + * } + * ``` + * * ## Related Resources * * The following resources are often used in the same context: @@ -61,6 +350,8 @@ public Output>> grantRules() { /** * Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: * * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + * * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + * * `accounts/{account_id}/ruleSets/default` * */ @Export(name="name", type=String.class, parameters={}) @@ -69,6 +360,8 @@ public Output>> grantRules() { /** * @return Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: * * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + * * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + * * `accounts/{account_id}/ruleSets/default` * */ public Output name() { diff --git a/sdk/java/src/main/java/com/pulumi/databricks/AccessControlRuleSetArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/AccessControlRuleSetArgs.java index 4adb6e4b..b4894e7e 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/AccessControlRuleSetArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/AccessControlRuleSetArgs.java @@ -39,6 +39,8 @@ public Optional>> grantRules() { /** * Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: * * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + * * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + * * `accounts/{account_id}/ruleSets/default` * */ @Import(name="name") @@ -47,6 +49,8 @@ public Optional>> grantRules() { /** * @return Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: * * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + * * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + * * `accounts/{account_id}/ruleSets/default` * */ public Optional> name() { @@ -118,6 +122,8 @@ public Builder grantRules(AccessControlRuleSetGrantRuleArgs... grantRules) { /** * @param name Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: * * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + * * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + * * `accounts/{account_id}/ruleSets/default` * * @return builder * @@ -130,6 +136,8 @@ public Builder name(@Nullable Output name) { /** * @param name Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: * * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + * * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + * * `accounts/{account_id}/ruleSets/default` * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/Catalog.java b/sdk/java/src/main/java/com/pulumi/databricks/Catalog.java index 34c11020..9cc6cd9c 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/Catalog.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/Catalog.java @@ -82,6 +82,20 @@ public class Catalog extends com.pulumi.resources.CustomResource { public Output> comment() { return Codegen.optional(this.comment); } + /** + * For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + * + */ + @Export(name="connectionName", type=String.class, parameters={}) + private Output connectionName; + + /** + * @return For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + * + */ + public Output> connectionName() { + return Codegen.optional(this.connectionName); + } /** * Delete catalog regardless of its contents. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/CatalogArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/CatalogArgs.java index ccaefe08..cc68703f 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/CatalogArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/CatalogArgs.java @@ -33,6 +33,21 @@ public Optional> comment() { return Optional.ofNullable(this.comment); } + /** + * For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + * + */ + @Import(name="connectionName") + private @Nullable Output connectionName; + + /** + * @return For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + * + */ + public Optional> connectionName() { + return Optional.ofNullable(this.connectionName); + } + /** * Delete catalog regardless of its contents. * @@ -164,6 +179,7 @@ private CatalogArgs() {} private CatalogArgs(CatalogArgs $) { this.comment = $.comment; + this.connectionName = $.connectionName; this.forceDestroy = $.forceDestroy; this.isolationMode = $.isolationMode; this.metastoreId = $.metastoreId; @@ -214,6 +230,27 @@ public Builder comment(String comment) { return comment(Output.of(comment)); } + /** + * @param connectionName For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + * + * @return builder + * + */ + public Builder connectionName(@Nullable Output connectionName) { + $.connectionName = connectionName; + return this; + } + + /** + * @param connectionName For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + * + * @return builder + * + */ + public Builder connectionName(String connectionName) { + return connectionName(Output.of(connectionName)); + } + /** * @param forceDestroy Delete catalog regardless of its contents. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/Connection.java b/sdk/java/src/main/java/com/pulumi/databricks/Connection.java new file mode 100644 index 00000000..277e7950 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/Connection.java @@ -0,0 +1,226 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Export; +import com.pulumi.core.annotations.ResourceType; +import com.pulumi.core.internal.Codegen; +import com.pulumi.databricks.ConnectionArgs; +import com.pulumi.databricks.Utilities; +import com.pulumi.databricks.inputs.ConnectionState; +import java.lang.Boolean; +import java.lang.Object; +import java.lang.String; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import javax.annotation.Nullable; + +/** + * Lakehouse Federation is the query federation platform for Databricks. Databricks uses Unity Catalog to manage query federation. To make a dataset available for read-only querying using Lakehouse Federation, you create the following: + * + * - A connection, a securable object in Unity Catalog that specifies a path and credentials for accessing an external database system. + * - A foreign catalog + * + * This resource manages connections in Unity Catalog + * + * ## Example Usage + * ```java + * package generated_program; + * + * import com.pulumi.Context; + * import com.pulumi.Pulumi; + * import com.pulumi.core.Output; + * import com.pulumi.databricks.Connection; + * import com.pulumi.databricks.ConnectionArgs; + * import java.util.List; + * import java.util.ArrayList; + * import java.util.Map; + * import java.io.File; + * import java.nio.file.Files; + * import java.nio.file.Paths; + * + * public class App { + * public static void main(String[] args) { + * Pulumi.run(App::stack); + * } + * + * public static void stack(Context ctx) { + * var mysql = new Connection("mysql", ConnectionArgs.builder() + * .comment("this is a connection to mysql db") + * .connectionType("MYSQL") + * .options(Map.ofEntries( + * Map.entry("host", "test.mysql.database.azure.com"), + * Map.entry("password", "password"), + * Map.entry("port", "3306"), + * Map.entry("user", "user") + * )) + * .properties(Map.of("purpose", "testing")) + * .build()); + * + * } + * } + * ``` + * + * ## Import + * + * This resource can be imported by `name` bash + * + * ```sh + * $ pulumi import databricks:index/connection:Connection this <connection_name> + * ``` + * + */ +@ResourceType(type="databricks:index/connection:Connection") +public class Connection extends com.pulumi.resources.CustomResource { + /** + * Free-form text. + * + */ + @Export(name="comment", type=String.class, parameters={}) + private Output comment; + + /** + * @return Free-form text. + * + */ + public Output> comment() { + return Codegen.optional(this.comment); + } + /** + * Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + * + */ + @Export(name="connectionType", type=String.class, parameters={}) + private Output connectionType; + + /** + * @return Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + * + */ + public Output connectionType() { + return this.connectionType; + } + @Export(name="metastoreId", type=String.class, parameters={}) + private Output metastoreId; + + public Output metastoreId() { + return this.metastoreId; + } + /** + * Name of the Connection. + * + */ + @Export(name="name", type=String.class, parameters={}) + private Output name; + + /** + * @return Name of the Connection. + * + */ + public Output name() { + return this.name; + } + /** + * The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + * + */ + @Export(name="options", type=Map.class, parameters={String.class, Object.class}) + private Output> options; + + /** + * @return The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + * + */ + public Output> options() { + return this.options; + } + /** + * Name of the connection owner. + * + */ + @Export(name="owner", type=String.class, parameters={}) + private Output owner; + + /** + * @return Name of the connection owner. + * + */ + public Output> owner() { + return Codegen.optional(this.owner); + } + /** + * Free-form connection properties. + * + */ + @Export(name="properties", type=Map.class, parameters={String.class, Object.class}) + private Output> properties; + + /** + * @return Free-form connection properties. + * + */ + public Output>> properties() { + return Codegen.optional(this.properties); + } + @Export(name="readOnly", type=Boolean.class, parameters={}) + private Output readOnly; + + public Output readOnly() { + return this.readOnly; + } + + /** + * + * @param name The _unique_ name of the resulting resource. + */ + public Connection(String name) { + this(name, ConnectionArgs.Empty); + } + /** + * + * @param name The _unique_ name of the resulting resource. + * @param args The arguments to use to populate this resource's properties. + */ + public Connection(String name, ConnectionArgs args) { + this(name, args, null); + } + /** + * + * @param name The _unique_ name of the resulting resource. + * @param args The arguments to use to populate this resource's properties. + * @param options A bag of options that control this resource's behavior. + */ + public Connection(String name, ConnectionArgs args, @Nullable com.pulumi.resources.CustomResourceOptions options) { + super("databricks:index/connection:Connection", name, args == null ? ConnectionArgs.Empty : args, makeResourceOptions(options, Codegen.empty())); + } + + private Connection(String name, Output id, @Nullable ConnectionState state, @Nullable com.pulumi.resources.CustomResourceOptions options) { + super("databricks:index/connection:Connection", name, state, makeResourceOptions(options, id)); + } + + private static com.pulumi.resources.CustomResourceOptions makeResourceOptions(@Nullable com.pulumi.resources.CustomResourceOptions options, @Nullable Output id) { + var defaultOptions = com.pulumi.resources.CustomResourceOptions.builder() + .version(Utilities.getVersion()) + .additionalSecretOutputs(List.of( + "options" + )) + .build(); + return com.pulumi.resources.CustomResourceOptions.merge(defaultOptions, options, id); + } + + /** + * Get an existing Host resource's state with the given name, ID, and optional extra + * properties used to qualify the lookup. + * + * @param name The _unique_ name of the resulting resource. + * @param id The _unique_ provider ID of the resource to lookup. + * @param state + * @param options Optional settings to control the behavior of the CustomResource. + */ + public static Connection get(String name, Output id, @Nullable ConnectionState state, @Nullable com.pulumi.resources.CustomResourceOptions options) { + return new Connection(name, id, state, options); + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/ConnectionArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/ConnectionArgs.java new file mode 100644 index 00000000..f6f3a07f --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/ConnectionArgs.java @@ -0,0 +1,307 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.Boolean; +import java.lang.Object; +import java.lang.String; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class ConnectionArgs extends com.pulumi.resources.ResourceArgs { + + public static final ConnectionArgs Empty = new ConnectionArgs(); + + /** + * Free-form text. + * + */ + @Import(name="comment") + private @Nullable Output comment; + + /** + * @return Free-form text. + * + */ + public Optional> comment() { + return Optional.ofNullable(this.comment); + } + + /** + * Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + * + */ + @Import(name="connectionType", required=true) + private Output connectionType; + + /** + * @return Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + * + */ + public Output connectionType() { + return this.connectionType; + } + + @Import(name="metastoreId") + private @Nullable Output metastoreId; + + public Optional> metastoreId() { + return Optional.ofNullable(this.metastoreId); + } + + /** + * Name of the Connection. + * + */ + @Import(name="name") + private @Nullable Output name; + + /** + * @return Name of the Connection. + * + */ + public Optional> name() { + return Optional.ofNullable(this.name); + } + + /** + * The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + * + */ + @Import(name="options", required=true) + private Output> options; + + /** + * @return The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + * + */ + public Output> options() { + return this.options; + } + + /** + * Name of the connection owner. + * + */ + @Import(name="owner") + private @Nullable Output owner; + + /** + * @return Name of the connection owner. + * + */ + public Optional> owner() { + return Optional.ofNullable(this.owner); + } + + /** + * Free-form connection properties. + * + */ + @Import(name="properties") + private @Nullable Output> properties; + + /** + * @return Free-form connection properties. + * + */ + public Optional>> properties() { + return Optional.ofNullable(this.properties); + } + + @Import(name="readOnly") + private @Nullable Output readOnly; + + public Optional> readOnly() { + return Optional.ofNullable(this.readOnly); + } + + private ConnectionArgs() {} + + private ConnectionArgs(ConnectionArgs $) { + this.comment = $.comment; + this.connectionType = $.connectionType; + this.metastoreId = $.metastoreId; + this.name = $.name; + this.options = $.options; + this.owner = $.owner; + this.properties = $.properties; + this.readOnly = $.readOnly; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(ConnectionArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private ConnectionArgs $; + + public Builder() { + $ = new ConnectionArgs(); + } + + public Builder(ConnectionArgs defaults) { + $ = new ConnectionArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param comment Free-form text. + * + * @return builder + * + */ + public Builder comment(@Nullable Output comment) { + $.comment = comment; + return this; + } + + /** + * @param comment Free-form text. + * + * @return builder + * + */ + public Builder comment(String comment) { + return comment(Output.of(comment)); + } + + /** + * @param connectionType Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + * + * @return builder + * + */ + public Builder connectionType(Output connectionType) { + $.connectionType = connectionType; + return this; + } + + /** + * @param connectionType Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + * + * @return builder + * + */ + public Builder connectionType(String connectionType) { + return connectionType(Output.of(connectionType)); + } + + public Builder metastoreId(@Nullable Output metastoreId) { + $.metastoreId = metastoreId; + return this; + } + + public Builder metastoreId(String metastoreId) { + return metastoreId(Output.of(metastoreId)); + } + + /** + * @param name Name of the Connection. + * + * @return builder + * + */ + public Builder name(@Nullable Output name) { + $.name = name; + return this; + } + + /** + * @param name Name of the Connection. + * + * @return builder + * + */ + public Builder name(String name) { + return name(Output.of(name)); + } + + /** + * @param options The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + * + * @return builder + * + */ + public Builder options(Output> options) { + $.options = options; + return this; + } + + /** + * @param options The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + * + * @return builder + * + */ + public Builder options(Map options) { + return options(Output.of(options)); + } + + /** + * @param owner Name of the connection owner. + * + * @return builder + * + */ + public Builder owner(@Nullable Output owner) { + $.owner = owner; + return this; + } + + /** + * @param owner Name of the connection owner. + * + * @return builder + * + */ + public Builder owner(String owner) { + return owner(Output.of(owner)); + } + + /** + * @param properties Free-form connection properties. + * + * @return builder + * + */ + public Builder properties(@Nullable Output> properties) { + $.properties = properties; + return this; + } + + /** + * @param properties Free-form connection properties. + * + * @return builder + * + */ + public Builder properties(Map properties) { + return properties(Output.of(properties)); + } + + public Builder readOnly(@Nullable Output readOnly) { + $.readOnly = readOnly; + return this; + } + + public Builder readOnly(Boolean readOnly) { + return readOnly(Output.of(readOnly)); + } + + public ConnectionArgs build() { + $.connectionType = Objects.requireNonNull($.connectionType, "expected parameter 'connectionType' to be non-null"); + $.options = Objects.requireNonNull($.options, "expected parameter 'options' to be non-null"); + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/DatabricksFunctions.java b/sdk/java/src/main/java/com/pulumi/databricks/DatabricksFunctions.java index 2eff2a0c..d4577050 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/DatabricksFunctions.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/DatabricksFunctions.java @@ -1921,6 +1921,7 @@ public static CompletableFuture getClustersPlain(GetClustersP * * `repos` - Personal Repos location of the user, e.g. `/Repos/mr.foo@example.com`. * * `alphanumeric` - Alphanumeric representation of user local name. e.g. `mr_foo`. * * `workspace_url` - URL of the current Databricks workspace. + * * `acl_principal_id` - identifier for use in databricks_access_control_rule_set, e.g. `users/mr.foo@example.com` if current user is user, or `servicePrincipals/00000000-0000-0000-0000-000000000000` if current user is service principal. * * ## Related Resources * @@ -1947,6 +1948,7 @@ public static Output getCurrentUser() { * * `repos` - Personal Repos location of the user, e.g. `/Repos/mr.foo@example.com`. * * `alphanumeric` - Alphanumeric representation of user local name. e.g. `mr_foo`. * * `workspace_url` - URL of the current Databricks workspace. + * * `acl_principal_id` - identifier for use in databricks_access_control_rule_set, e.g. `users/mr.foo@example.com` if current user is user, or `servicePrincipals/00000000-0000-0000-0000-000000000000` if current user is service principal. * * ## Related Resources * @@ -1973,6 +1975,7 @@ public static CompletableFuture getCurrentUserPlain() { * * `repos` - Personal Repos location of the user, e.g. `/Repos/mr.foo@example.com`. * * `alphanumeric` - Alphanumeric representation of user local name. e.g. `mr_foo`. * * `workspace_url` - URL of the current Databricks workspace. + * * `acl_principal_id` - identifier for use in databricks_access_control_rule_set, e.g. `users/mr.foo@example.com` if current user is user, or `servicePrincipals/00000000-0000-0000-0000-000000000000` if current user is service principal. * * ## Related Resources * @@ -1999,6 +2002,7 @@ public static Output getCurrentUser(InvokeArgs args) { * * `repos` - Personal Repos location of the user, e.g. `/Repos/mr.foo@example.com`. * * `alphanumeric` - Alphanumeric representation of user local name. e.g. `mr_foo`. * * `workspace_url` - URL of the current Databricks workspace. + * * `acl_principal_id` - identifier for use in databricks_access_control_rule_set, e.g. `users/mr.foo@example.com` if current user is user, or `servicePrincipals/00000000-0000-0000-0000-000000000000` if current user is service principal. * * ## Related Resources * @@ -2025,6 +2029,7 @@ public static CompletableFuture getCurrentUserPlain(Invoke * * `repos` - Personal Repos location of the user, e.g. `/Repos/mr.foo@example.com`. * * `alphanumeric` - Alphanumeric representation of user local name. e.g. `mr_foo`. * * `workspace_url` - URL of the current Databricks workspace. + * * `acl_principal_id` - identifier for use in databricks_access_control_rule_set, e.g. `users/mr.foo@example.com` if current user is user, or `servicePrincipals/00000000-0000-0000-0000-000000000000` if current user is service principal. * * ## Related Resources * @@ -2051,6 +2056,7 @@ public static Output getCurrentUser(InvokeArgs args, Invok * * `repos` - Personal Repos location of the user, e.g. `/Repos/mr.foo@example.com`. * * `alphanumeric` - Alphanumeric representation of user local name. e.g. `mr_foo`. * * `workspace_url` - URL of the current Databricks workspace. + * * `acl_principal_id` - identifier for use in databricks_access_control_rule_set, e.g. `users/mr.foo@example.com` if current user is user, or `servicePrincipals/00000000-0000-0000-0000-000000000000` if current user is service principal. * * ## Related Resources * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/ExternalLocation.java b/sdk/java/src/main/java/com/pulumi/databricks/ExternalLocation.java index ea767aed..f8e824c8 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/ExternalLocation.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/ExternalLocation.java @@ -10,6 +10,7 @@ import com.pulumi.databricks.ExternalLocationArgs; import com.pulumi.databricks.Utilities; import com.pulumi.databricks.inputs.ExternalLocationState; +import com.pulumi.databricks.outputs.ExternalLocationEncryptionDetails; import java.lang.Boolean; import java.lang.String; import java.util.Optional; @@ -32,6 +33,20 @@ */ @ResourceType(type="databricks:index/externalLocation:ExternalLocation") public class ExternalLocation extends com.pulumi.resources.CustomResource { + /** + * The ARN of the s3 access point to use with the external location (AWS). + * + */ + @Export(name="accessPoint", type=String.class, parameters={}) + private Output accessPoint; + + /** + * @return The ARN of the s3 access point to use with the external location (AWS). + * + */ + public Output> accessPoint() { + return Codegen.optional(this.accessPoint); + } /** * User-supplied free-form text. * @@ -47,19 +62,33 @@ public Output> comment() { return Codegen.optional(this.comment); } /** - * Name of the databricks.StorageCredential to use with this External Location. + * Name of the databricks.StorageCredential to use with this external location. * */ @Export(name="credentialName", type=String.class, parameters={}) private Output credentialName; /** - * @return Name of the databricks.StorageCredential to use with this External Location. + * @return Name of the databricks.StorageCredential to use with this external location. * */ public Output credentialName() { return this.credentialName; } + /** + * The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + * + */ + @Export(name="encryptionDetails", type=ExternalLocationEncryptionDetails.class, parameters={}) + private Output encryptionDetails; + + /** + * @return The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + * + */ + public Output> encryptionDetails() { + return Codegen.optional(this.encryptionDetails); + } /** * Destroy external location regardless of its dependents. * @@ -74,6 +103,20 @@ public Output credentialName() { public Output> forceDestroy() { return Codegen.optional(this.forceDestroy); } + /** + * Update external location regardless of its dependents. + * + */ + @Export(name="forceUpdate", type=Boolean.class, parameters={}) + private Output forceUpdate; + + /** + * @return Update external location regardless of its dependents. + * + */ + public Output> forceUpdate() { + return Codegen.optional(this.forceUpdate); + } @Export(name="metastoreId", type=String.class, parameters={}) private Output metastoreId; @@ -95,14 +138,14 @@ public Output name() { return this.name; } /** - * Username/groupname/sp application_id of the external Location owner. + * Username/groupname/sp application_id of the external location owner. * */ @Export(name="owner", type=String.class, parameters={}) private Output owner; /** - * @return Username/groupname/sp application_id of the external Location owner. + * @return Username/groupname/sp application_id of the external location owner. * */ public Output owner() { diff --git a/sdk/java/src/main/java/com/pulumi/databricks/ExternalLocationArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/ExternalLocationArgs.java index 7db2bb64..076543a4 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/ExternalLocationArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/ExternalLocationArgs.java @@ -5,6 +5,7 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; +import com.pulumi.databricks.inputs.ExternalLocationEncryptionDetailsArgs; import java.lang.Boolean; import java.lang.String; import java.util.Objects; @@ -16,6 +17,21 @@ public final class ExternalLocationArgs extends com.pulumi.resources.ResourceArg public static final ExternalLocationArgs Empty = new ExternalLocationArgs(); + /** + * The ARN of the s3 access point to use with the external location (AWS). + * + */ + @Import(name="accessPoint") + private @Nullable Output accessPoint; + + /** + * @return The ARN of the s3 access point to use with the external location (AWS). + * + */ + public Optional> accessPoint() { + return Optional.ofNullable(this.accessPoint); + } + /** * User-supplied free-form text. * @@ -32,20 +48,35 @@ public Optional> comment() { } /** - * Name of the databricks.StorageCredential to use with this External Location. + * Name of the databricks.StorageCredential to use with this external location. * */ @Import(name="credentialName", required=true) private Output credentialName; /** - * @return Name of the databricks.StorageCredential to use with this External Location. + * @return Name of the databricks.StorageCredential to use with this external location. * */ public Output credentialName() { return this.credentialName; } + /** + * The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + * + */ + @Import(name="encryptionDetails") + private @Nullable Output encryptionDetails; + + /** + * @return The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + * + */ + public Optional> encryptionDetails() { + return Optional.ofNullable(this.encryptionDetails); + } + /** * Destroy external location regardless of its dependents. * @@ -61,6 +92,21 @@ public Optional> forceDestroy() { return Optional.ofNullable(this.forceDestroy); } + /** + * Update external location regardless of its dependents. + * + */ + @Import(name="forceUpdate") + private @Nullable Output forceUpdate; + + /** + * @return Update external location regardless of its dependents. + * + */ + public Optional> forceUpdate() { + return Optional.ofNullable(this.forceUpdate); + } + @Import(name="metastoreId") private @Nullable Output metastoreId; @@ -84,14 +130,14 @@ public Optional> name() { } /** - * Username/groupname/sp application_id of the external Location owner. + * Username/groupname/sp application_id of the external location owner. * */ @Import(name="owner") private @Nullable Output owner; /** - * @return Username/groupname/sp application_id of the external Location owner. + * @return Username/groupname/sp application_id of the external location owner. * */ public Optional> owner() { @@ -146,9 +192,12 @@ public Output url() { private ExternalLocationArgs() {} private ExternalLocationArgs(ExternalLocationArgs $) { + this.accessPoint = $.accessPoint; this.comment = $.comment; this.credentialName = $.credentialName; + this.encryptionDetails = $.encryptionDetails; this.forceDestroy = $.forceDestroy; + this.forceUpdate = $.forceUpdate; this.metastoreId = $.metastoreId; this.name = $.name; this.owner = $.owner; @@ -175,6 +224,27 @@ public Builder(ExternalLocationArgs defaults) { $ = new ExternalLocationArgs(Objects.requireNonNull(defaults)); } + /** + * @param accessPoint The ARN of the s3 access point to use with the external location (AWS). + * + * @return builder + * + */ + public Builder accessPoint(@Nullable Output accessPoint) { + $.accessPoint = accessPoint; + return this; + } + + /** + * @param accessPoint The ARN of the s3 access point to use with the external location (AWS). + * + * @return builder + * + */ + public Builder accessPoint(String accessPoint) { + return accessPoint(Output.of(accessPoint)); + } + /** * @param comment User-supplied free-form text. * @@ -197,7 +267,7 @@ public Builder comment(String comment) { } /** - * @param credentialName Name of the databricks.StorageCredential to use with this External Location. + * @param credentialName Name of the databricks.StorageCredential to use with this external location. * * @return builder * @@ -208,7 +278,7 @@ public Builder credentialName(Output credentialName) { } /** - * @param credentialName Name of the databricks.StorageCredential to use with this External Location. + * @param credentialName Name of the databricks.StorageCredential to use with this external location. * * @return builder * @@ -217,6 +287,27 @@ public Builder credentialName(String credentialName) { return credentialName(Output.of(credentialName)); } + /** + * @param encryptionDetails The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + * + * @return builder + * + */ + public Builder encryptionDetails(@Nullable Output encryptionDetails) { + $.encryptionDetails = encryptionDetails; + return this; + } + + /** + * @param encryptionDetails The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + * + * @return builder + * + */ + public Builder encryptionDetails(ExternalLocationEncryptionDetailsArgs encryptionDetails) { + return encryptionDetails(Output.of(encryptionDetails)); + } + /** * @param forceDestroy Destroy external location regardless of its dependents. * @@ -238,6 +329,27 @@ public Builder forceDestroy(Boolean forceDestroy) { return forceDestroy(Output.of(forceDestroy)); } + /** + * @param forceUpdate Update external location regardless of its dependents. + * + * @return builder + * + */ + public Builder forceUpdate(@Nullable Output forceUpdate) { + $.forceUpdate = forceUpdate; + return this; + } + + /** + * @param forceUpdate Update external location regardless of its dependents. + * + * @return builder + * + */ + public Builder forceUpdate(Boolean forceUpdate) { + return forceUpdate(Output.of(forceUpdate)); + } + public Builder metastoreId(@Nullable Output metastoreId) { $.metastoreId = metastoreId; return this; @@ -269,7 +381,7 @@ public Builder name(String name) { } /** - * @param owner Username/groupname/sp application_id of the external Location owner. + * @param owner Username/groupname/sp application_id of the external location owner. * * @return builder * @@ -280,7 +392,7 @@ public Builder owner(@Nullable Output owner) { } /** - * @param owner Username/groupname/sp application_id of the external Location owner. + * @param owner Username/groupname/sp application_id of the external location owner. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/Grants.java b/sdk/java/src/main/java/com/pulumi/databricks/Grants.java index 5fd774b0..b893f062 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/Grants.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/Grants.java @@ -30,6 +30,12 @@ public Output> catalog() { public Output> externalLocation() { return Codegen.optional(this.externalLocation); } + @Export(name="foreignConnection", type=String.class, parameters={}) + private Output foreignConnection; + + public Output> foreignConnection() { + return Codegen.optional(this.foreignConnection); + } @Export(name="function", type=String.class, parameters={}) private Output function; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/GrantsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/GrantsArgs.java index 255a950e..45fcf122 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/GrantsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/GrantsArgs.java @@ -31,6 +31,13 @@ public Optional> externalLocation() { return Optional.ofNullable(this.externalLocation); } + @Import(name="foreignConnection") + private @Nullable Output foreignConnection; + + public Optional> foreignConnection() { + return Optional.ofNullable(this.foreignConnection); + } + @Import(name="function") private @Nullable Output function; @@ -106,6 +113,7 @@ private GrantsArgs() {} private GrantsArgs(GrantsArgs $) { this.catalog = $.catalog; this.externalLocation = $.externalLocation; + this.foreignConnection = $.foreignConnection; this.function = $.function; this.grants = $.grants; this.materializedView = $.materializedView; @@ -154,6 +162,15 @@ public Builder externalLocation(String externalLocation) { return externalLocation(Output.of(externalLocation)); } + public Builder foreignConnection(@Nullable Output foreignConnection) { + $.foreignConnection = foreignConnection; + return this; + } + + public Builder foreignConnection(String foreignConnection) { + return foreignConnection(Output.of(foreignConnection)); + } + public Builder function(@Nullable Output function) { $.function = function; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/Metastore.java b/sdk/java/src/main/java/com/pulumi/databricks/Metastore.java index 15540c9a..3d0bd3ed 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/Metastore.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/Metastore.java @@ -17,9 +17,6 @@ import javax.annotation.Nullable; /** - * > **Notes** - * Unity Catalog APIs are accessible via **workspace-level APIs**. This design may change in the future. - * * A metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces in order to control which workloads use each metastore. * * Unity Catalog offers a new metastore with built in security and auditing. This is distinct to the metastore used in previous versions of Databricks (based on the Hive Metastore). @@ -121,6 +118,12 @@ public Output> forceDestroy() { public Output globalMetastoreId() { return this.globalMetastoreId; } + @Export(name="metastoreId", type=String.class, parameters={}) + private Output metastoreId; + + public Output metastoreId() { + return this.metastoreId; + } /** * Name of metastore. * @@ -149,9 +152,17 @@ public Output name() { public Output owner() { return this.owner; } + /** + * The region of the metastore + * + */ @Export(name="region", type=String.class, parameters={}) private Output region; + /** + * @return The region of the metastore + * + */ public Output region() { return this.region; } @@ -169,6 +180,12 @@ public Output region() { public Output storageRoot() { return this.storageRoot; } + @Export(name="storageRootCredentialId", type=String.class, parameters={}) + private Output storageRootCredentialId; + + public Output> storageRootCredentialId() { + return Codegen.optional(this.storageRootCredentialId); + } @Export(name="updatedAt", type=Integer.class, parameters={}) private Output updatedAt; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/MetastoreArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/MetastoreArgs.java index 2b18e59d..045a94fa 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/MetastoreArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/MetastoreArgs.java @@ -112,6 +112,13 @@ public Optional> globalMetastoreId() { return Optional.ofNullable(this.globalMetastoreId); } + @Import(name="metastoreId") + private @Nullable Output metastoreId; + + public Optional> metastoreId() { + return Optional.ofNullable(this.metastoreId); + } + /** * Name of metastore. * @@ -142,9 +149,17 @@ public Optional> owner() { return Optional.ofNullable(this.owner); } + /** + * The region of the metastore + * + */ @Import(name="region") private @Nullable Output region; + /** + * @return The region of the metastore + * + */ public Optional> region() { return Optional.ofNullable(this.region); } @@ -164,6 +179,13 @@ public Output storageRoot() { return this.storageRoot; } + @Import(name="storageRootCredentialId") + private @Nullable Output storageRootCredentialId; + + public Optional> storageRootCredentialId() { + return Optional.ofNullable(this.storageRootCredentialId); + } + @Import(name="updatedAt") private @Nullable Output updatedAt; @@ -190,10 +212,12 @@ private MetastoreArgs(MetastoreArgs $) { this.deltaSharingScope = $.deltaSharingScope; this.forceDestroy = $.forceDestroy; this.globalMetastoreId = $.globalMetastoreId; + this.metastoreId = $.metastoreId; this.name = $.name; this.owner = $.owner; this.region = $.region; this.storageRoot = $.storageRoot; + this.storageRootCredentialId = $.storageRootCredentialId; this.updatedAt = $.updatedAt; this.updatedBy = $.updatedBy; } @@ -345,6 +369,15 @@ public Builder globalMetastoreId(String globalMetastoreId) { return globalMetastoreId(Output.of(globalMetastoreId)); } + public Builder metastoreId(@Nullable Output metastoreId) { + $.metastoreId = metastoreId; + return this; + } + + public Builder metastoreId(String metastoreId) { + return metastoreId(Output.of(metastoreId)); + } + /** * @param name Name of metastore. * @@ -387,11 +420,23 @@ public Builder owner(String owner) { return owner(Output.of(owner)); } + /** + * @param region The region of the metastore + * + * @return builder + * + */ public Builder region(@Nullable Output region) { $.region = region; return this; } + /** + * @param region The region of the metastore + * + * @return builder + * + */ public Builder region(String region) { return region(Output.of(region)); } @@ -417,6 +462,15 @@ public Builder storageRoot(String storageRoot) { return storageRoot(Output.of(storageRoot)); } + public Builder storageRootCredentialId(@Nullable Output storageRootCredentialId) { + $.storageRootCredentialId = storageRootCredentialId; + return this; + } + + public Builder storageRootCredentialId(String storageRootCredentialId) { + return storageRootCredentialId(Output.of(storageRootCredentialId)); + } + public Builder updatedAt(@Nullable Output updatedAt) { $.updatedAt = updatedAt; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/MetastoreAssignment.java b/sdk/java/src/main/java/com/pulumi/databricks/MetastoreAssignment.java index 2f0bcd20..d8690fc1 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/MetastoreAssignment.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/MetastoreAssignment.java @@ -45,6 +45,7 @@ * var thisMetastore = new Metastore("thisMetastore", MetastoreArgs.builder() * .storageRoot(String.format("s3://%s/metastore", aws_s3_bucket.metastore().id())) * .owner("uc admins") + * .region("us-east-1") * .forceDestroy(true) * .build()); * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/MlflowModel.java b/sdk/java/src/main/java/com/pulumi/databricks/MlflowModel.java index e2c31e73..4b6ee19c 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/MlflowModel.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/MlflowModel.java @@ -87,10 +87,10 @@ @ResourceType(type="databricks:index/mlflowModel:MlflowModel") public class MlflowModel extends com.pulumi.resources.CustomResource { @Export(name="creationTimestamp", type=Integer.class, parameters={}) - private Output creationTimestamp; + private Output creationTimestamp; - public Output creationTimestamp() { - return this.creationTimestamp; + public Output> creationTimestamp() { + return Codegen.optional(this.creationTimestamp); } /** * The description of the MLflow model. @@ -107,10 +107,10 @@ public Output> description() { return Codegen.optional(this.description); } @Export(name="lastUpdatedTimestamp", type=Integer.class, parameters={}) - private Output lastUpdatedTimestamp; + private Output lastUpdatedTimestamp; - public Output lastUpdatedTimestamp() { - return this.lastUpdatedTimestamp; + public Output> lastUpdatedTimestamp() { + return Codegen.optional(this.lastUpdatedTimestamp); } /** * Name of MLflow model. Change of name triggers new resource. @@ -126,12 +126,6 @@ public Output lastUpdatedTimestamp() { public Output name() { return this.name; } - @Export(name="registeredModelId", type=String.class, parameters={}) - private Output registeredModelId; - - public Output registeredModelId() { - return this.registeredModelId; - } /** * Tags for the MLflow model. * @@ -147,10 +141,10 @@ public Output>> tags() { return Codegen.optional(this.tags); } @Export(name="userId", type=String.class, parameters={}) - private Output userId; + private Output userId; - public Output userId() { - return this.userId; + public Output> userId() { + return Codegen.optional(this.userId); } /** diff --git a/sdk/java/src/main/java/com/pulumi/databricks/MlflowModelArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/MlflowModelArgs.java index f8d26dd6..6a5dc698 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/MlflowModelArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/MlflowModelArgs.java @@ -62,13 +62,6 @@ public Optional> name() { return Optional.ofNullable(this.name); } - @Import(name="registeredModelId") - private @Nullable Output registeredModelId; - - public Optional> registeredModelId() { - return Optional.ofNullable(this.registeredModelId); - } - /** * Tags for the MLflow model. * @@ -98,7 +91,6 @@ private MlflowModelArgs(MlflowModelArgs $) { this.description = $.description; this.lastUpdatedTimestamp = $.lastUpdatedTimestamp; this.name = $.name; - this.registeredModelId = $.registeredModelId; this.tags = $.tags; this.userId = $.userId; } @@ -181,15 +173,6 @@ public Builder name(String name) { return name(Output.of(name)); } - public Builder registeredModelId(@Nullable Output registeredModelId) { - $.registeredModelId = registeredModelId; - return this; - } - - public Builder registeredModelId(String registeredModelId) { - return registeredModelId(Output.of(registeredModelId)); - } - /** * @param tags Tags for the MLflow model. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/MwsCustomerManagedKeys.java b/sdk/java/src/main/java/com/pulumi/databricks/MwsCustomerManagedKeys.java index 9fdd81fe..b898f562 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/MwsCustomerManagedKeys.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/MwsCustomerManagedKeys.java @@ -22,9 +22,11 @@ * ## Example Usage * * > **Note** If you've used the resource before, please add `use_cases = ["MANAGED_SERVICES"]` to keep the previous behaviour. + * * ### Customer-managed key for managed services * * You must configure this during workspace creation + * ### For AWS * ```java * package generated_program; * @@ -105,7 +107,45 @@ * } * } * ``` + * ### For GCP + * ```java + * package generated_program; + * + * import com.pulumi.Context; + * import com.pulumi.Pulumi; + * import com.pulumi.core.Output; + * import com.pulumi.databricks.MwsCustomerManagedKeys; + * import com.pulumi.databricks.MwsCustomerManagedKeysArgs; + * import com.pulumi.databricks.inputs.MwsCustomerManagedKeysGcpKeyInfoArgs; + * import java.util.List; + * import java.util.ArrayList; + * import java.util.Map; + * import java.io.File; + * import java.nio.file.Files; + * import java.nio.file.Paths; + * + * public class App { + * public static void main(String[] args) { + * Pulumi.run(App::stack); + * } + * + * public static void stack(Context ctx) { + * final var config = ctx.config(); + * final var databricksAccountId = config.get("databricksAccountId"); + * final var cmekResourceId = config.get("cmekResourceId"); + * var managedServices = new MwsCustomerManagedKeys("managedServices", MwsCustomerManagedKeysArgs.builder() + * .accountId(databricksAccountId) + * .gcpKeyInfo(MwsCustomerManagedKeysGcpKeyInfoArgs.builder() + * .kmsKeyId(cmekResourceId) + * .build()) + * .useCases("MANAGED_SERVICES") + * .build()); + * + * } + * } + * ``` * ### Customer-managed key for workspace storage + * ### For AWS * ```java * package generated_program; * @@ -224,6 +264,43 @@ * } * } * ``` + * ### For GCP + * ```java + * package generated_program; + * + * import com.pulumi.Context; + * import com.pulumi.Pulumi; + * import com.pulumi.core.Output; + * import com.pulumi.databricks.MwsCustomerManagedKeys; + * import com.pulumi.databricks.MwsCustomerManagedKeysArgs; + * import com.pulumi.databricks.inputs.MwsCustomerManagedKeysGcpKeyInfoArgs; + * import java.util.List; + * import java.util.ArrayList; + * import java.util.Map; + * import java.io.File; + * import java.nio.file.Files; + * import java.nio.file.Paths; + * + * public class App { + * public static void main(String[] args) { + * Pulumi.run(App::stack); + * } + * + * public static void stack(Context ctx) { + * final var config = ctx.config(); + * final var databricksAccountId = config.get("databricksAccountId"); + * final var cmekResourceId = config.get("cmekResourceId"); + * var storage = new MwsCustomerManagedKeys("storage", MwsCustomerManagedKeysArgs.builder() + * .accountId(databricksAccountId) + * .gcpKeyInfo(MwsCustomerManagedKeysGcpKeyInfoArgs.builder() + * .kmsKeyId(cmekResourceId) + * .build()) + * .useCases("STORAGE") + * .build()); + * + * } + * } + * ``` * ## Related Resources * * The following resources are used in the same context: @@ -257,14 +334,14 @@ public Output accountId() { return this.accountId; } /** - * This field is a block and is documented below. + * This field is a block and is documented below. This conflicts with `gcp_key_info` * */ @Export(name="awsKeyInfo", type=MwsCustomerManagedKeysAwsKeyInfo.class, parameters={}) private Output awsKeyInfo; /** - * @return This field is a block and is documented below. + * @return This field is a block and is documented below. This conflicts with `gcp_key_info` * */ public Output> awsKeyInfo() { @@ -298,9 +375,17 @@ public Output creationTime() { public Output customerManagedKeyId() { return this.customerManagedKeyId; } + /** + * This field is a block and is documented below. This conflicts with `aws_key_info` + * + */ @Export(name="gcpKeyInfo", type=MwsCustomerManagedKeysGcpKeyInfo.class, parameters={}) private Output gcpKeyInfo; + /** + * @return This field is a block and is documented below. This conflicts with `aws_key_info` + * + */ public Output> gcpKeyInfo() { return Codegen.optional(this.gcpKeyInfo); } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/MwsCustomerManagedKeysArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/MwsCustomerManagedKeysArgs.java index af471c62..5d864828 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/MwsCustomerManagedKeysArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/MwsCustomerManagedKeysArgs.java @@ -35,14 +35,14 @@ public Output accountId() { } /** - * This field is a block and is documented below. + * This field is a block and is documented below. This conflicts with `gcp_key_info` * */ @Import(name="awsKeyInfo") private @Nullable Output awsKeyInfo; /** - * @return This field is a block and is documented below. + * @return This field is a block and is documented below. This conflicts with `gcp_key_info` * */ public Optional> awsKeyInfo() { @@ -79,9 +79,17 @@ public Optional> customerManagedKeyId() { return Optional.ofNullable(this.customerManagedKeyId); } + /** + * This field is a block and is documented below. This conflicts with `aws_key_info` + * + */ @Import(name="gcpKeyInfo") private @Nullable Output gcpKeyInfo; + /** + * @return This field is a block and is documented below. This conflicts with `aws_key_info` + * + */ public Optional> gcpKeyInfo() { return Optional.ofNullable(this.gcpKeyInfo); } @@ -152,7 +160,7 @@ public Builder accountId(String accountId) { } /** - * @param awsKeyInfo This field is a block and is documented below. + * @param awsKeyInfo This field is a block and is documented below. This conflicts with `gcp_key_info` * * @return builder * @@ -163,7 +171,7 @@ public Builder awsKeyInfo(@Nullable Output } /** - * @param awsKeyInfo This field is a block and is documented below. + * @param awsKeyInfo This field is a block and is documented below. This conflicts with `gcp_key_info` * * @return builder * @@ -214,11 +222,23 @@ public Builder customerManagedKeyId(String customerManagedKeyId) { return customerManagedKeyId(Output.of(customerManagedKeyId)); } + /** + * @param gcpKeyInfo This field is a block and is documented below. This conflicts with `aws_key_info` + * + * @return builder + * + */ public Builder gcpKeyInfo(@Nullable Output gcpKeyInfo) { $.gcpKeyInfo = gcpKeyInfo; return this; } + /** + * @param gcpKeyInfo This field is a block and is documented below. This conflicts with `aws_key_info` + * + * @return builder + * + */ public Builder gcpKeyInfo(MwsCustomerManagedKeysGcpKeyInfoArgs gcpKeyInfo) { return gcpKeyInfo(Output.of(gcpKeyInfo)); } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/MwsWorkspaces.java b/sdk/java/src/main/java/com/pulumi/databricks/MwsWorkspaces.java index bf2feb19..1f9ab2f2 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/MwsWorkspaces.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/MwsWorkspaces.java @@ -232,9 +232,17 @@ public Output> privateAccessSettingsId() { public Output> storageConfigurationId() { return Codegen.optional(this.storageConfigurationId); } + /** + * `customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. + * + */ @Export(name="storageCustomerManagedKeyId", type=String.class, parameters={}) private Output storageCustomerManagedKeyId; + /** + * @return `customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. + * + */ public Output> storageCustomerManagedKeyId() { return Codegen.optional(this.storageCustomerManagedKeyId); } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/MwsWorkspacesArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/MwsWorkspacesArgs.java index 3ae3d6c0..a183b259 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/MwsWorkspacesArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/MwsWorkspacesArgs.java @@ -248,9 +248,17 @@ public Optional> storageConfigurationId() { return Optional.ofNullable(this.storageConfigurationId); } + /** + * `customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. + * + */ @Import(name="storageCustomerManagedKeyId") private @Nullable Output storageCustomerManagedKeyId; + /** + * @return `customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. + * + */ public Optional> storageCustomerManagedKeyId() { return Optional.ofNullable(this.storageCustomerManagedKeyId); } @@ -695,11 +703,23 @@ public Builder storageConfigurationId(String storageConfigurationId) { return storageConfigurationId(Output.of(storageConfigurationId)); } + /** + * @param storageCustomerManagedKeyId `customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. + * + * @return builder + * + */ public Builder storageCustomerManagedKeyId(@Nullable Output storageCustomerManagedKeyId) { $.storageCustomerManagedKeyId = storageCustomerManagedKeyId; return this; } + /** + * @param storageCustomerManagedKeyId `customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. + * + * @return builder + * + */ public Builder storageCustomerManagedKeyId(String storageCustomerManagedKeyId) { return storageCustomerManagedKeyId(Output.of(storageCustomerManagedKeyId)); } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/Share.java b/sdk/java/src/main/java/com/pulumi/databricks/Share.java index 35250411..55e72343 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/Share.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/Share.java @@ -67,6 +67,20 @@ public Output name() { public Output>> objects() { return Codegen.optional(this.objects); } + /** + * User name/group name/sp application_id of the share owner. + * + */ + @Export(name="owner", type=String.class, parameters={}) + private Output owner; + + /** + * @return User name/group name/sp application_id of the share owner. + * + */ + public Output> owner() { + return Codegen.optional(this.owner); + } /** * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/ShareArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/ShareArgs.java index 61ab5e39..7636ece4 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/ShareArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/ShareArgs.java @@ -70,6 +70,21 @@ public Optional>> objects() { return Optional.ofNullable(this.objects); } + /** + * User name/group name/sp application_id of the share owner. + * + */ + @Import(name="owner") + private @Nullable Output owner; + + /** + * @return User name/group name/sp application_id of the share owner. + * + */ + public Optional> owner() { + return Optional.ofNullable(this.owner); + } + private ShareArgs() {} private ShareArgs(ShareArgs $) { @@ -77,6 +92,7 @@ private ShareArgs(ShareArgs $) { this.createdBy = $.createdBy; this.name = $.name; this.objects = $.objects; + this.owner = $.owner; } public static Builder builder() { @@ -173,6 +189,27 @@ public Builder objects(ShareObjectArgs... objects) { return objects(List.of(objects)); } + /** + * @param owner User name/group name/sp application_id of the share owner. + * + * @return builder + * + */ + public Builder owner(@Nullable Output owner) { + $.owner = owner; + return this; + } + + /** + * @param owner User name/group name/sp application_id of the share owner. + * + * @return builder + * + */ + public Builder owner(String owner) { + return owner(Output.of(owner)); + } + public ShareArgs build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/SqlAlert.java b/sdk/java/src/main/java/com/pulumi/databricks/SqlAlert.java index 10add4e0..abf0fa70 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/SqlAlert.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/SqlAlert.java @@ -33,6 +33,12 @@ */ @ResourceType(type="databricks:index/sqlAlert:SqlAlert") public class SqlAlert extends com.pulumi.resources.CustomResource { + @Export(name="createdAt", type=String.class, parameters={}) + private Output createdAt; + + public Output createdAt() { + return this.createdAt; + } /** * Name of the alert. * @@ -103,6 +109,12 @@ public Output queryId() { public Output> rearm() { return Codegen.optional(this.rearm); } + @Export(name="updatedAt", type=String.class, parameters={}) + private Output updatedAt; + + public Output updatedAt() { + return this.updatedAt; + } /** * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/SqlAlertArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/SqlAlertArgs.java index c83e3f65..45784858 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/SqlAlertArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/SqlAlertArgs.java @@ -17,6 +17,13 @@ public final class SqlAlertArgs extends com.pulumi.resources.ResourceArgs { public static final SqlAlertArgs Empty = new SqlAlertArgs(); + @Import(name="createdAt") + private @Nullable Output createdAt; + + public Optional> createdAt() { + return Optional.ofNullable(this.createdAt); + } + /** * Name of the alert. * @@ -92,14 +99,23 @@ public Optional> rearm() { return Optional.ofNullable(this.rearm); } + @Import(name="updatedAt") + private @Nullable Output updatedAt; + + public Optional> updatedAt() { + return Optional.ofNullable(this.updatedAt); + } + private SqlAlertArgs() {} private SqlAlertArgs(SqlAlertArgs $) { + this.createdAt = $.createdAt; this.name = $.name; this.options = $.options; this.parent = $.parent; this.queryId = $.queryId; this.rearm = $.rearm; + this.updatedAt = $.updatedAt; } public static Builder builder() { @@ -120,6 +136,15 @@ public Builder(SqlAlertArgs defaults) { $ = new SqlAlertArgs(Objects.requireNonNull(defaults)); } + public Builder createdAt(@Nullable Output createdAt) { + $.createdAt = createdAt; + return this; + } + + public Builder createdAt(String createdAt) { + return createdAt(Output.of(createdAt)); + } + /** * @param name Name of the alert. * @@ -225,6 +250,15 @@ public Builder rearm(Integer rearm) { return rearm(Output.of(rearm)); } + public Builder updatedAt(@Nullable Output updatedAt) { + $.updatedAt = updatedAt; + return this; + } + + public Builder updatedAt(String updatedAt) { + return updatedAt(Output.of(updatedAt)); + } + public SqlAlertArgs build() { $.options = Objects.requireNonNull($.options, "expected parameter 'options' to be non-null"); $.queryId = Objects.requireNonNull($.queryId, "expected parameter 'queryId' to be non-null"); diff --git a/sdk/java/src/main/java/com/pulumi/databricks/SqlDashboard.java b/sdk/java/src/main/java/com/pulumi/databricks/SqlDashboard.java index 6b2cf6ad..26e3116d 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/SqlDashboard.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/SqlDashboard.java @@ -115,6 +115,12 @@ */ @ResourceType(type="databricks:index/sqlDashboard:SqlDashboard") public class SqlDashboard extends com.pulumi.resources.CustomResource { + @Export(name="createdAt", type=String.class, parameters={}) + private Output createdAt; + + public Output createdAt() { + return this.createdAt; + } @Export(name="name", type=String.class, parameters={}) private Output name; @@ -133,6 +139,12 @@ public Output> parent() { public Output>> tags() { return Codegen.optional(this.tags); } + @Export(name="updatedAt", type=String.class, parameters={}) + private Output updatedAt; + + public Output updatedAt() { + return this.updatedAt; + } /** * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/SqlDashboardArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/SqlDashboardArgs.java index cdaf19f7..c244a080 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/SqlDashboardArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/SqlDashboardArgs.java @@ -16,6 +16,13 @@ public final class SqlDashboardArgs extends com.pulumi.resources.ResourceArgs { public static final SqlDashboardArgs Empty = new SqlDashboardArgs(); + @Import(name="createdAt") + private @Nullable Output createdAt; + + public Optional> createdAt() { + return Optional.ofNullable(this.createdAt); + } + @Import(name="name") private @Nullable Output name; @@ -37,12 +44,21 @@ public Optional>> tags() { return Optional.ofNullable(this.tags); } + @Import(name="updatedAt") + private @Nullable Output updatedAt; + + public Optional> updatedAt() { + return Optional.ofNullable(this.updatedAt); + } + private SqlDashboardArgs() {} private SqlDashboardArgs(SqlDashboardArgs $) { + this.createdAt = $.createdAt; this.name = $.name; this.parent = $.parent; this.tags = $.tags; + this.updatedAt = $.updatedAt; } public static Builder builder() { @@ -63,6 +79,15 @@ public Builder(SqlDashboardArgs defaults) { $ = new SqlDashboardArgs(Objects.requireNonNull(defaults)); } + public Builder createdAt(@Nullable Output createdAt) { + $.createdAt = createdAt; + return this; + } + + public Builder createdAt(String createdAt) { + return createdAt(Output.of(createdAt)); + } + public Builder name(@Nullable Output name) { $.name = name; return this; @@ -94,6 +119,15 @@ public Builder tags(String... tags) { return tags(List.of(tags)); } + public Builder updatedAt(@Nullable Output updatedAt) { + $.updatedAt = updatedAt; + return this; + } + + public Builder updatedAt(String updatedAt) { + return updatedAt(Output.of(updatedAt)); + } + public SqlDashboardArgs build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/SqlQuery.java b/sdk/java/src/main/java/com/pulumi/databricks/SqlQuery.java index 9a5f107e..c107825b 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/SqlQuery.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/SqlQuery.java @@ -174,6 +174,12 @@ */ @ResourceType(type="databricks:index/sqlQuery:SqlQuery") public class SqlQuery extends com.pulumi.resources.CustomResource { + @Export(name="createdAt", type=String.class, parameters={}) + private Output createdAt; + + public Output createdAt() { + return this.createdAt; + } @Export(name="dataSourceId", type=String.class, parameters={}) private Output dataSourceId; @@ -234,6 +240,12 @@ public Output> schedule() { public Output>> tags() { return Codegen.optional(this.tags); } + @Export(name="updatedAt", type=String.class, parameters={}) + private Output updatedAt; + + public Output updatedAt() { + return this.updatedAt; + } /** * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/SqlQueryArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/SqlQueryArgs.java index 16689aea..c9c7191e 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/SqlQueryArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/SqlQueryArgs.java @@ -18,6 +18,13 @@ public final class SqlQueryArgs extends com.pulumi.resources.ResourceArgs { public static final SqlQueryArgs Empty = new SqlQueryArgs(); + @Import(name="createdAt") + private @Nullable Output createdAt; + + public Optional> createdAt() { + return Optional.ofNullable(this.createdAt); + } + @Import(name="dataSourceId", required=true) private Output dataSourceId; @@ -93,9 +100,17 @@ public Optional>> tags() { return Optional.ofNullable(this.tags); } + @Import(name="updatedAt") + private @Nullable Output updatedAt; + + public Optional> updatedAt() { + return Optional.ofNullable(this.updatedAt); + } + private SqlQueryArgs() {} private SqlQueryArgs(SqlQueryArgs $) { + this.createdAt = $.createdAt; this.dataSourceId = $.dataSourceId; this.description = $.description; this.name = $.name; @@ -105,6 +120,7 @@ private SqlQueryArgs(SqlQueryArgs $) { this.runAsRole = $.runAsRole; this.schedule = $.schedule; this.tags = $.tags; + this.updatedAt = $.updatedAt; } public static Builder builder() { @@ -125,6 +141,15 @@ public Builder(SqlQueryArgs defaults) { $ = new SqlQueryArgs(Objects.requireNonNull(defaults)); } + public Builder createdAt(@Nullable Output createdAt) { + $.createdAt = createdAt; + return this; + } + + public Builder createdAt(String createdAt) { + return createdAt(Output.of(createdAt)); + } + public Builder dataSourceId(Output dataSourceId) { $.dataSourceId = dataSourceId; return this; @@ -230,6 +255,15 @@ public Builder tags(String... tags) { return tags(List.of(tags)); } + public Builder updatedAt(@Nullable Output updatedAt) { + $.updatedAt = updatedAt; + return this; + } + + public Builder updatedAt(String updatedAt) { + return updatedAt(Output.of(updatedAt)); + } + public SqlQueryArgs build() { $.dataSourceId = Objects.requireNonNull($.dataSourceId, "expected parameter 'dataSourceId' to be non-null"); $.query = Objects.requireNonNull($.query, "expected parameter 'query' to be non-null"); diff --git a/sdk/java/src/main/java/com/pulumi/databricks/StorageCredential.java b/sdk/java/src/main/java/com/pulumi/databricks/StorageCredential.java index 90a1c0ab..22febe2a 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/StorageCredential.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/StorageCredential.java @@ -201,6 +201,12 @@ public Output> comment() { public Output databricksGcpServiceAccount() { return this.databricksGcpServiceAccount; } + @Export(name="forceDestroy", type=Boolean.class, parameters={}) + private Output forceDestroy; + + public Output> forceDestroy() { + return Codegen.optional(this.forceDestroy); + } @Export(name="gcpServiceAccountKey", type=StorageCredentialGcpServiceAccountKey.class, parameters={}) private Output gcpServiceAccountKey; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/StorageCredentialArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/StorageCredentialArgs.java index 3204e6e2..6cca74ba 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/StorageCredentialArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/StorageCredentialArgs.java @@ -56,6 +56,13 @@ public Optional> databr return Optional.ofNullable(this.databricksGcpServiceAccount); } + @Import(name="forceDestroy") + private @Nullable Output forceDestroy; + + public Optional> forceDestroy() { + return Optional.ofNullable(this.forceDestroy); + } + @Import(name="gcpServiceAccountKey") private @Nullable Output gcpServiceAccountKey; @@ -127,6 +134,7 @@ private StorageCredentialArgs(StorageCredentialArgs $) { this.azureServicePrincipal = $.azureServicePrincipal; this.comment = $.comment; this.databricksGcpServiceAccount = $.databricksGcpServiceAccount; + this.forceDestroy = $.forceDestroy; this.gcpServiceAccountKey = $.gcpServiceAccountKey; this.metastoreId = $.metastoreId; this.name = $.name; @@ -197,6 +205,15 @@ public Builder databricksGcpServiceAccount(StorageCredentialDatabricksGcpService return databricksGcpServiceAccount(Output.of(databricksGcpServiceAccount)); } + public Builder forceDestroy(@Nullable Output forceDestroy) { + $.forceDestroy = forceDestroy; + return this; + } + + public Builder forceDestroy(Boolean forceDestroy) { + return forceDestroy(Output.of(forceDestroy)); + } + public Builder gcpServiceAccountKey(@Nullable Output gcpServiceAccountKey) { $.gcpServiceAccountKey = gcpServiceAccountKey; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/AccessControlRuleSetGrantRuleArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/AccessControlRuleSetGrantRuleArgs.java index bf169195..8ebd56e0 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/AccessControlRuleSetGrantRuleArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/AccessControlRuleSetGrantRuleArgs.java @@ -38,18 +38,20 @@ public Optional>> principals() { } /** - * Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles). + * Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles) or [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page). * * `roles/servicePrincipal.manager` - Manager of a service principal. * * `roles/servicePrincipal.user` - User of a service principal. + * * `roles/group.manager` - Manager of a group. * */ @Import(name="role", required=true) private Output role; /** - * @return Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles). + * @return Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles) or [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page). * * `roles/servicePrincipal.manager` - Manager of a service principal. * * `roles/servicePrincipal.user` - User of a service principal. + * * `roles/group.manager` - Manager of a group. * */ public Output role() { @@ -122,9 +124,10 @@ public Builder principals(String... principals) { } /** - * @param role Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles). + * @param role Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles) or [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page). * * `roles/servicePrincipal.manager` - Manager of a service principal. * * `roles/servicePrincipal.user` - User of a service principal. + * * `roles/group.manager` - Manager of a group. * * @return builder * @@ -135,9 +138,10 @@ public Builder role(Output role) { } /** - * @param role Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles). + * @param role Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles) or [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page). * * `roles/servicePrincipal.manager` - Manager of a service principal. * * `roles/servicePrincipal.user` - User of a service principal. + * * `roles/group.manager` - Manager of a group. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/AccessControlRuleSetState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/AccessControlRuleSetState.java index 23eb190a..dec5a623 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/AccessControlRuleSetState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/AccessControlRuleSetState.java @@ -46,6 +46,8 @@ public Optional>> grantRules() { /** * Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: * * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + * * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + * * `accounts/{account_id}/ruleSets/default` * */ @Import(name="name") @@ -54,6 +56,8 @@ public Optional>> grantRules() { /** * @return Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: * * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + * * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + * * `accounts/{account_id}/ruleSets/default` * */ public Optional> name() { @@ -135,6 +139,8 @@ public Builder grantRules(AccessControlRuleSetGrantRuleArgs... grantRules) { /** * @param name Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: * * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + * * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + * * `accounts/{account_id}/ruleSets/default` * * @return builder * @@ -147,6 +153,8 @@ public Builder name(@Nullable Output name) { /** * @param name Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: * * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + * * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + * * `accounts/{account_id}/ruleSets/default` * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/CatalogState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/CatalogState.java index fd884268..c5b540f5 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/CatalogState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/CatalogState.java @@ -33,6 +33,21 @@ public Optional> comment() { return Optional.ofNullable(this.comment); } + /** + * For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + * + */ + @Import(name="connectionName") + private @Nullable Output connectionName; + + /** + * @return For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + * + */ + public Optional> connectionName() { + return Optional.ofNullable(this.connectionName); + } + /** * Delete catalog regardless of its contents. * @@ -164,6 +179,7 @@ private CatalogState() {} private CatalogState(CatalogState $) { this.comment = $.comment; + this.connectionName = $.connectionName; this.forceDestroy = $.forceDestroy; this.isolationMode = $.isolationMode; this.metastoreId = $.metastoreId; @@ -214,6 +230,27 @@ public Builder comment(String comment) { return comment(Output.of(comment)); } + /** + * @param connectionName For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + * + * @return builder + * + */ + public Builder connectionName(@Nullable Output connectionName) { + $.connectionName = connectionName; + return this; + } + + /** + * @param connectionName For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + * + * @return builder + * + */ + public Builder connectionName(String connectionName) { + return connectionName(Output.of(connectionName)); + } + /** * @param forceDestroy Delete catalog regardless of its contents. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/ConnectionState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/ConnectionState.java new file mode 100644 index 00000000..0e0bc78c --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/ConnectionState.java @@ -0,0 +1,305 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.Boolean; +import java.lang.Object; +import java.lang.String; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class ConnectionState extends com.pulumi.resources.ResourceArgs { + + public static final ConnectionState Empty = new ConnectionState(); + + /** + * Free-form text. + * + */ + @Import(name="comment") + private @Nullable Output comment; + + /** + * @return Free-form text. + * + */ + public Optional> comment() { + return Optional.ofNullable(this.comment); + } + + /** + * Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + * + */ + @Import(name="connectionType") + private @Nullable Output connectionType; + + /** + * @return Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + * + */ + public Optional> connectionType() { + return Optional.ofNullable(this.connectionType); + } + + @Import(name="metastoreId") + private @Nullable Output metastoreId; + + public Optional> metastoreId() { + return Optional.ofNullable(this.metastoreId); + } + + /** + * Name of the Connection. + * + */ + @Import(name="name") + private @Nullable Output name; + + /** + * @return Name of the Connection. + * + */ + public Optional> name() { + return Optional.ofNullable(this.name); + } + + /** + * The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + * + */ + @Import(name="options") + private @Nullable Output> options; + + /** + * @return The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + * + */ + public Optional>> options() { + return Optional.ofNullable(this.options); + } + + /** + * Name of the connection owner. + * + */ + @Import(name="owner") + private @Nullable Output owner; + + /** + * @return Name of the connection owner. + * + */ + public Optional> owner() { + return Optional.ofNullable(this.owner); + } + + /** + * Free-form connection properties. + * + */ + @Import(name="properties") + private @Nullable Output> properties; + + /** + * @return Free-form connection properties. + * + */ + public Optional>> properties() { + return Optional.ofNullable(this.properties); + } + + @Import(name="readOnly") + private @Nullable Output readOnly; + + public Optional> readOnly() { + return Optional.ofNullable(this.readOnly); + } + + private ConnectionState() {} + + private ConnectionState(ConnectionState $) { + this.comment = $.comment; + this.connectionType = $.connectionType; + this.metastoreId = $.metastoreId; + this.name = $.name; + this.options = $.options; + this.owner = $.owner; + this.properties = $.properties; + this.readOnly = $.readOnly; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(ConnectionState defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private ConnectionState $; + + public Builder() { + $ = new ConnectionState(); + } + + public Builder(ConnectionState defaults) { + $ = new ConnectionState(Objects.requireNonNull(defaults)); + } + + /** + * @param comment Free-form text. + * + * @return builder + * + */ + public Builder comment(@Nullable Output comment) { + $.comment = comment; + return this; + } + + /** + * @param comment Free-form text. + * + * @return builder + * + */ + public Builder comment(String comment) { + return comment(Output.of(comment)); + } + + /** + * @param connectionType Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + * + * @return builder + * + */ + public Builder connectionType(@Nullable Output connectionType) { + $.connectionType = connectionType; + return this; + } + + /** + * @param connectionType Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + * + * @return builder + * + */ + public Builder connectionType(String connectionType) { + return connectionType(Output.of(connectionType)); + } + + public Builder metastoreId(@Nullable Output metastoreId) { + $.metastoreId = metastoreId; + return this; + } + + public Builder metastoreId(String metastoreId) { + return metastoreId(Output.of(metastoreId)); + } + + /** + * @param name Name of the Connection. + * + * @return builder + * + */ + public Builder name(@Nullable Output name) { + $.name = name; + return this; + } + + /** + * @param name Name of the Connection. + * + * @return builder + * + */ + public Builder name(String name) { + return name(Output.of(name)); + } + + /** + * @param options The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + * + * @return builder + * + */ + public Builder options(@Nullable Output> options) { + $.options = options; + return this; + } + + /** + * @param options The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + * + * @return builder + * + */ + public Builder options(Map options) { + return options(Output.of(options)); + } + + /** + * @param owner Name of the connection owner. + * + * @return builder + * + */ + public Builder owner(@Nullable Output owner) { + $.owner = owner; + return this; + } + + /** + * @param owner Name of the connection owner. + * + * @return builder + * + */ + public Builder owner(String owner) { + return owner(Output.of(owner)); + } + + /** + * @param properties Free-form connection properties. + * + * @return builder + * + */ + public Builder properties(@Nullable Output> properties) { + $.properties = properties; + return this; + } + + /** + * @param properties Free-form connection properties. + * + * @return builder + * + */ + public Builder properties(Map properties) { + return properties(Output.of(properties)); + } + + public Builder readOnly(@Nullable Output readOnly) { + $.readOnly = readOnly; + return this; + } + + public Builder readOnly(Boolean readOnly) { + return readOnly(Output.of(readOnly)); + } + + public ConnectionState build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/ExternalLocationEncryptionDetailsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/ExternalLocationEncryptionDetailsArgs.java new file mode 100644 index 00000000..7c28bb61 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/ExternalLocationEncryptionDetailsArgs.java @@ -0,0 +1,63 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.databricks.inputs.ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class ExternalLocationEncryptionDetailsArgs extends com.pulumi.resources.ResourceArgs { + + public static final ExternalLocationEncryptionDetailsArgs Empty = new ExternalLocationEncryptionDetailsArgs(); + + @Import(name="sseEncryptionDetails") + private @Nullable Output sseEncryptionDetails; + + public Optional> sseEncryptionDetails() { + return Optional.ofNullable(this.sseEncryptionDetails); + } + + private ExternalLocationEncryptionDetailsArgs() {} + + private ExternalLocationEncryptionDetailsArgs(ExternalLocationEncryptionDetailsArgs $) { + this.sseEncryptionDetails = $.sseEncryptionDetails; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(ExternalLocationEncryptionDetailsArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private ExternalLocationEncryptionDetailsArgs $; + + public Builder() { + $ = new ExternalLocationEncryptionDetailsArgs(); + } + + public Builder(ExternalLocationEncryptionDetailsArgs defaults) { + $ = new ExternalLocationEncryptionDetailsArgs(Objects.requireNonNull(defaults)); + } + + public Builder sseEncryptionDetails(@Nullable Output sseEncryptionDetails) { + $.sseEncryptionDetails = sseEncryptionDetails; + return this; + } + + public Builder sseEncryptionDetails(ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs sseEncryptionDetails) { + return sseEncryptionDetails(Output.of(sseEncryptionDetails)); + } + + public ExternalLocationEncryptionDetailsArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs.java new file mode 100644 index 00000000..bcc1bb8d --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs.java @@ -0,0 +1,80 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs extends com.pulumi.resources.ResourceArgs { + + public static final ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs Empty = new ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs(); + + @Import(name="algorithm") + private @Nullable Output algorithm; + + public Optional> algorithm() { + return Optional.ofNullable(this.algorithm); + } + + @Import(name="awsKmsKeyArn") + private @Nullable Output awsKmsKeyArn; + + public Optional> awsKmsKeyArn() { + return Optional.ofNullable(this.awsKmsKeyArn); + } + + private ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs() {} + + private ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs(ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs $) { + this.algorithm = $.algorithm; + this.awsKmsKeyArn = $.awsKmsKeyArn; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs $; + + public Builder() { + $ = new ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs(); + } + + public Builder(ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs defaults) { + $ = new ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs(Objects.requireNonNull(defaults)); + } + + public Builder algorithm(@Nullable Output algorithm) { + $.algorithm = algorithm; + return this; + } + + public Builder algorithm(String algorithm) { + return algorithm(Output.of(algorithm)); + } + + public Builder awsKmsKeyArn(@Nullable Output awsKmsKeyArn) { + $.awsKmsKeyArn = awsKmsKeyArn; + return this; + } + + public Builder awsKmsKeyArn(String awsKmsKeyArn) { + return awsKmsKeyArn(Output.of(awsKmsKeyArn)); + } + + public ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/ExternalLocationState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/ExternalLocationState.java index 56bb619b..a9afab6b 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/ExternalLocationState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/ExternalLocationState.java @@ -5,6 +5,7 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; +import com.pulumi.databricks.inputs.ExternalLocationEncryptionDetailsArgs; import java.lang.Boolean; import java.lang.String; import java.util.Objects; @@ -16,6 +17,21 @@ public final class ExternalLocationState extends com.pulumi.resources.ResourceAr public static final ExternalLocationState Empty = new ExternalLocationState(); + /** + * The ARN of the s3 access point to use with the external location (AWS). + * + */ + @Import(name="accessPoint") + private @Nullable Output accessPoint; + + /** + * @return The ARN of the s3 access point to use with the external location (AWS). + * + */ + public Optional> accessPoint() { + return Optional.ofNullable(this.accessPoint); + } + /** * User-supplied free-form text. * @@ -32,20 +48,35 @@ public Optional> comment() { } /** - * Name of the databricks.StorageCredential to use with this External Location. + * Name of the databricks.StorageCredential to use with this external location. * */ @Import(name="credentialName") private @Nullable Output credentialName; /** - * @return Name of the databricks.StorageCredential to use with this External Location. + * @return Name of the databricks.StorageCredential to use with this external location. * */ public Optional> credentialName() { return Optional.ofNullable(this.credentialName); } + /** + * The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + * + */ + @Import(name="encryptionDetails") + private @Nullable Output encryptionDetails; + + /** + * @return The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + * + */ + public Optional> encryptionDetails() { + return Optional.ofNullable(this.encryptionDetails); + } + /** * Destroy external location regardless of its dependents. * @@ -61,6 +92,21 @@ public Optional> forceDestroy() { return Optional.ofNullable(this.forceDestroy); } + /** + * Update external location regardless of its dependents. + * + */ + @Import(name="forceUpdate") + private @Nullable Output forceUpdate; + + /** + * @return Update external location regardless of its dependents. + * + */ + public Optional> forceUpdate() { + return Optional.ofNullable(this.forceUpdate); + } + @Import(name="metastoreId") private @Nullable Output metastoreId; @@ -84,14 +130,14 @@ public Optional> name() { } /** - * Username/groupname/sp application_id of the external Location owner. + * Username/groupname/sp application_id of the external location owner. * */ @Import(name="owner") private @Nullable Output owner; /** - * @return Username/groupname/sp application_id of the external Location owner. + * @return Username/groupname/sp application_id of the external location owner. * */ public Optional> owner() { @@ -146,9 +192,12 @@ public Optional> url() { private ExternalLocationState() {} private ExternalLocationState(ExternalLocationState $) { + this.accessPoint = $.accessPoint; this.comment = $.comment; this.credentialName = $.credentialName; + this.encryptionDetails = $.encryptionDetails; this.forceDestroy = $.forceDestroy; + this.forceUpdate = $.forceUpdate; this.metastoreId = $.metastoreId; this.name = $.name; this.owner = $.owner; @@ -175,6 +224,27 @@ public Builder(ExternalLocationState defaults) { $ = new ExternalLocationState(Objects.requireNonNull(defaults)); } + /** + * @param accessPoint The ARN of the s3 access point to use with the external location (AWS). + * + * @return builder + * + */ + public Builder accessPoint(@Nullable Output accessPoint) { + $.accessPoint = accessPoint; + return this; + } + + /** + * @param accessPoint The ARN of the s3 access point to use with the external location (AWS). + * + * @return builder + * + */ + public Builder accessPoint(String accessPoint) { + return accessPoint(Output.of(accessPoint)); + } + /** * @param comment User-supplied free-form text. * @@ -197,7 +267,7 @@ public Builder comment(String comment) { } /** - * @param credentialName Name of the databricks.StorageCredential to use with this External Location. + * @param credentialName Name of the databricks.StorageCredential to use with this external location. * * @return builder * @@ -208,7 +278,7 @@ public Builder credentialName(@Nullable Output credentialName) { } /** - * @param credentialName Name of the databricks.StorageCredential to use with this External Location. + * @param credentialName Name of the databricks.StorageCredential to use with this external location. * * @return builder * @@ -217,6 +287,27 @@ public Builder credentialName(String credentialName) { return credentialName(Output.of(credentialName)); } + /** + * @param encryptionDetails The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + * + * @return builder + * + */ + public Builder encryptionDetails(@Nullable Output encryptionDetails) { + $.encryptionDetails = encryptionDetails; + return this; + } + + /** + * @param encryptionDetails The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + * + * @return builder + * + */ + public Builder encryptionDetails(ExternalLocationEncryptionDetailsArgs encryptionDetails) { + return encryptionDetails(Output.of(encryptionDetails)); + } + /** * @param forceDestroy Destroy external location regardless of its dependents. * @@ -238,6 +329,27 @@ public Builder forceDestroy(Boolean forceDestroy) { return forceDestroy(Output.of(forceDestroy)); } + /** + * @param forceUpdate Update external location regardless of its dependents. + * + * @return builder + * + */ + public Builder forceUpdate(@Nullable Output forceUpdate) { + $.forceUpdate = forceUpdate; + return this; + } + + /** + * @param forceUpdate Update external location regardless of its dependents. + * + * @return builder + * + */ + public Builder forceUpdate(Boolean forceUpdate) { + return forceUpdate(Output.of(forceUpdate)); + } + public Builder metastoreId(@Nullable Output metastoreId) { $.metastoreId = metastoreId; return this; @@ -269,7 +381,7 @@ public Builder name(String name) { } /** - * @param owner Username/groupname/sp application_id of the external Location owner. + * @param owner Username/groupname/sp application_id of the external location owner. * * @return builder * @@ -280,7 +392,7 @@ public Builder owner(@Nullable Output owner) { } /** - * @param owner Username/groupname/sp application_id of the external Location owner. + * @param owner Username/groupname/sp application_id of the external location owner. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetGroupArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetGroupArgs.java index 733a62ea..92bd50f2 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetGroupArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetGroupArgs.java @@ -17,6 +17,21 @@ public final class GetGroupArgs extends com.pulumi.resources.InvokeArgs { public static final GetGroupArgs Empty = new GetGroupArgs(); + /** + * identifier for use in databricks_access_control_rule_set, e.g. `groups/Some Group`. + * + */ + @Import(name="aclPrincipalId") + private @Nullable Output aclPrincipalId; + + /** + * @return identifier for use in databricks_access_control_rule_set, e.g. `groups/Some Group`. + * + */ + public Optional> aclPrincipalId() { + return Optional.ofNullable(this.aclPrincipalId); + } + /** * True if group members can create clusters * @@ -203,6 +218,7 @@ public Optional> workspaceAccess() { private GetGroupArgs() {} private GetGroupArgs(GetGroupArgs $) { + this.aclPrincipalId = $.aclPrincipalId; this.allowClusterCreate = $.allowClusterCreate; this.allowInstancePoolCreate = $.allowInstancePoolCreate; this.childGroups = $.childGroups; @@ -236,6 +252,27 @@ public Builder(GetGroupArgs defaults) { $ = new GetGroupArgs(Objects.requireNonNull(defaults)); } + /** + * @param aclPrincipalId identifier for use in databricks_access_control_rule_set, e.g. `groups/Some Group`. + * + * @return builder + * + */ + public Builder aclPrincipalId(@Nullable Output aclPrincipalId) { + $.aclPrincipalId = aclPrincipalId; + return this; + } + + /** + * @param aclPrincipalId identifier for use in databricks_access_control_rule_set, e.g. `groups/Some Group`. + * + * @return builder + * + */ + public Builder aclPrincipalId(String aclPrincipalId) { + return aclPrincipalId(Output.of(aclPrincipalId)); + } + /** * @param allowClusterCreate True if group members can create clusters * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetGroupPlainArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetGroupPlainArgs.java index 0c6c1c4f..e40526fa 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetGroupPlainArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetGroupPlainArgs.java @@ -16,6 +16,21 @@ public final class GetGroupPlainArgs extends com.pulumi.resources.InvokeArgs { public static final GetGroupPlainArgs Empty = new GetGroupPlainArgs(); + /** + * identifier for use in databricks_access_control_rule_set, e.g. `groups/Some Group`. + * + */ + @Import(name="aclPrincipalId") + private @Nullable String aclPrincipalId; + + /** + * @return identifier for use in databricks_access_control_rule_set, e.g. `groups/Some Group`. + * + */ + public Optional aclPrincipalId() { + return Optional.ofNullable(this.aclPrincipalId); + } + /** * True if group members can create clusters * @@ -202,6 +217,7 @@ public Optional workspaceAccess() { private GetGroupPlainArgs() {} private GetGroupPlainArgs(GetGroupPlainArgs $) { + this.aclPrincipalId = $.aclPrincipalId; this.allowClusterCreate = $.allowClusterCreate; this.allowInstancePoolCreate = $.allowInstancePoolCreate; this.childGroups = $.childGroups; @@ -235,6 +251,17 @@ public Builder(GetGroupPlainArgs defaults) { $ = new GetGroupPlainArgs(Objects.requireNonNull(defaults)); } + /** + * @param aclPrincipalId identifier for use in databricks_access_control_rule_set, e.g. `groups/Some Group`. + * + * @return builder + * + */ + public Builder aclPrincipalId(@Nullable String aclPrincipalId) { + $.aclPrincipalId = aclPrincipalId; + return this; + } + /** * @param allowClusterCreate True if group members can create clusters * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsRunJobTask.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsRunJobTask.java index 3b976b9c..ec4be844 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsRunJobTask.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsRunJobTask.java @@ -4,6 +4,7 @@ package com.pulumi.databricks.inputs; import com.pulumi.core.annotations.Import; +import java.lang.Integer; import java.lang.Object; import java.lang.String; import java.util.Map; @@ -17,9 +18,9 @@ public final class GetJobJobSettingsSettingsRunJobTask extends com.pulumi.resour public static final GetJobJobSettingsSettingsRunJobTask Empty = new GetJobJobSettingsSettingsRunJobTask(); @Import(name="jobId", required=true) - private String jobId; + private Integer jobId; - public String jobId() { + public Integer jobId() { return this.jobId; } @@ -55,7 +56,7 @@ public Builder(GetJobJobSettingsSettingsRunJobTask defaults) { $ = new GetJobJobSettingsSettingsRunJobTask(Objects.requireNonNull(defaults)); } - public Builder jobId(String jobId) { + public Builder jobId(Integer jobId) { $.jobId = jobId; return this; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsRunJobTaskArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsRunJobTaskArgs.java index 514e90bb..da7e8b85 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsRunJobTaskArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsRunJobTaskArgs.java @@ -5,6 +5,7 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; +import java.lang.Integer; import java.lang.Object; import java.lang.String; import java.util.Map; @@ -18,9 +19,9 @@ public final class GetJobJobSettingsSettingsRunJobTaskArgs extends com.pulumi.re public static final GetJobJobSettingsSettingsRunJobTaskArgs Empty = new GetJobJobSettingsSettingsRunJobTaskArgs(); @Import(name="jobId", required=true) - private Output jobId; + private Output jobId; - public Output jobId() { + public Output jobId() { return this.jobId; } @@ -56,12 +57,12 @@ public Builder(GetJobJobSettingsSettingsRunJobTaskArgs defaults) { $ = new GetJobJobSettingsSettingsRunJobTaskArgs(Objects.requireNonNull(defaults)); } - public Builder jobId(Output jobId) { + public Builder jobId(Output jobId) { $.jobId = jobId; return this; } - public Builder jobId(String jobId) { + public Builder jobId(Integer jobId) { return jobId(Output.of(jobId)); } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskRunJobTask.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskRunJobTask.java index 12ce6d88..e21eda1d 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskRunJobTask.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskRunJobTask.java @@ -4,6 +4,7 @@ package com.pulumi.databricks.inputs; import com.pulumi.core.annotations.Import; +import java.lang.Integer; import java.lang.Object; import java.lang.String; import java.util.Map; @@ -17,9 +18,9 @@ public final class GetJobJobSettingsSettingsTaskRunJobTask extends com.pulumi.re public static final GetJobJobSettingsSettingsTaskRunJobTask Empty = new GetJobJobSettingsSettingsTaskRunJobTask(); @Import(name="jobId", required=true) - private String jobId; + private Integer jobId; - public String jobId() { + public Integer jobId() { return this.jobId; } @@ -55,7 +56,7 @@ public Builder(GetJobJobSettingsSettingsTaskRunJobTask defaults) { $ = new GetJobJobSettingsSettingsTaskRunJobTask(Objects.requireNonNull(defaults)); } - public Builder jobId(String jobId) { + public Builder jobId(Integer jobId) { $.jobId = jobId; return this; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskRunJobTaskArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskRunJobTaskArgs.java index 8034990c..c46d76bc 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskRunJobTaskArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskRunJobTaskArgs.java @@ -5,6 +5,7 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; +import java.lang.Integer; import java.lang.Object; import java.lang.String; import java.util.Map; @@ -18,9 +19,9 @@ public final class GetJobJobSettingsSettingsTaskRunJobTaskArgs extends com.pulum public static final GetJobJobSettingsSettingsTaskRunJobTaskArgs Empty = new GetJobJobSettingsSettingsTaskRunJobTaskArgs(); @Import(name="jobId", required=true) - private Output jobId; + private Output jobId; - public Output jobId() { + public Output jobId() { return this.jobId; } @@ -56,12 +57,12 @@ public Builder(GetJobJobSettingsSettingsTaskRunJobTaskArgs defaults) { $ = new GetJobJobSettingsSettingsTaskRunJobTaskArgs(Objects.requireNonNull(defaults)); } - public Builder jobId(Output jobId) { + public Builder jobId(Output jobId) { $.jobId = jobId; return this; } - public Builder jobId(String jobId) { + public Builder jobId(Integer jobId) { return jobId(Output.of(jobId)); } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetServicePrincipalArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetServicePrincipalArgs.java index c35dd92e..c4e6acac 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetServicePrincipalArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetServicePrincipalArgs.java @@ -16,6 +16,21 @@ public final class GetServicePrincipalArgs extends com.pulumi.resources.InvokeAr public static final GetServicePrincipalArgs Empty = new GetServicePrincipalArgs(); + /** + * identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. + * + */ + @Import(name="aclPrincipalId") + private @Nullable Output aclPrincipalId; + + /** + * @return identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. + * + */ + public Optional> aclPrincipalId() { + return Optional.ofNullable(this.aclPrincipalId); + } + /** * Whether service principal is active or not. * @@ -131,6 +146,7 @@ public Optional> spId() { private GetServicePrincipalArgs() {} private GetServicePrincipalArgs(GetServicePrincipalArgs $) { + this.aclPrincipalId = $.aclPrincipalId; this.active = $.active; this.applicationId = $.applicationId; this.displayName = $.displayName; @@ -159,6 +175,27 @@ public Builder(GetServicePrincipalArgs defaults) { $ = new GetServicePrincipalArgs(Objects.requireNonNull(defaults)); } + /** + * @param aclPrincipalId identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. + * + * @return builder + * + */ + public Builder aclPrincipalId(@Nullable Output aclPrincipalId) { + $.aclPrincipalId = aclPrincipalId; + return this; + } + + /** + * @param aclPrincipalId identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. + * + * @return builder + * + */ + public Builder aclPrincipalId(String aclPrincipalId) { + return aclPrincipalId(Output.of(aclPrincipalId)); + } + /** * @param active Whether service principal is active or not. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetServicePrincipalPlainArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetServicePrincipalPlainArgs.java index 3a0704a5..3e1010e3 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetServicePrincipalPlainArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetServicePrincipalPlainArgs.java @@ -15,6 +15,21 @@ public final class GetServicePrincipalPlainArgs extends com.pulumi.resources.Inv public static final GetServicePrincipalPlainArgs Empty = new GetServicePrincipalPlainArgs(); + /** + * identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. + * + */ + @Import(name="aclPrincipalId") + private @Nullable String aclPrincipalId; + + /** + * @return identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. + * + */ + public Optional aclPrincipalId() { + return Optional.ofNullable(this.aclPrincipalId); + } + /** * Whether service principal is active or not. * @@ -130,6 +145,7 @@ public Optional spId() { private GetServicePrincipalPlainArgs() {} private GetServicePrincipalPlainArgs(GetServicePrincipalPlainArgs $) { + this.aclPrincipalId = $.aclPrincipalId; this.active = $.active; this.applicationId = $.applicationId; this.displayName = $.displayName; @@ -158,6 +174,17 @@ public Builder(GetServicePrincipalPlainArgs defaults) { $ = new GetServicePrincipalPlainArgs(Objects.requireNonNull(defaults)); } + /** + * @param aclPrincipalId identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. + * + * @return builder + * + */ + public Builder aclPrincipalId(@Nullable String aclPrincipalId) { + $.aclPrincipalId = aclPrincipalId; + return this; + } + /** * @param active Whether service principal is active or not. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GrantsState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GrantsState.java index c5d1b4e2..7086f9e4 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GrantsState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GrantsState.java @@ -31,6 +31,13 @@ public Optional> externalLocation() { return Optional.ofNullable(this.externalLocation); } + @Import(name="foreignConnection") + private @Nullable Output foreignConnection; + + public Optional> foreignConnection() { + return Optional.ofNullable(this.foreignConnection); + } + @Import(name="function") private @Nullable Output function; @@ -106,6 +113,7 @@ private GrantsState() {} private GrantsState(GrantsState $) { this.catalog = $.catalog; this.externalLocation = $.externalLocation; + this.foreignConnection = $.foreignConnection; this.function = $.function; this.grants = $.grants; this.materializedView = $.materializedView; @@ -154,6 +162,15 @@ public Builder externalLocation(String externalLocation) { return externalLocation(Output.of(externalLocation)); } + public Builder foreignConnection(@Nullable Output foreignConnection) { + $.foreignConnection = foreignConnection; + return this; + } + + public Builder foreignConnection(String foreignConnection) { + return foreignConnection(Output.of(foreignConnection)); + } + public Builder function(@Nullable Output function) { $.function = function; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobRunJobTaskArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobRunJobTaskArgs.java index 0d1241dd..e13521a5 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobRunJobTaskArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobRunJobTaskArgs.java @@ -5,6 +5,7 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; +import java.lang.Integer; import java.lang.Object; import java.lang.String; import java.util.Map; @@ -22,13 +23,13 @@ public final class JobRunJobTaskArgs extends com.pulumi.resources.ResourceArgs { * */ @Import(name="jobId", required=true) - private Output jobId; + private Output jobId; /** * @return (String) ID of the job * */ - public Output jobId() { + public Output jobId() { return this.jobId; } @@ -78,7 +79,7 @@ public Builder(JobRunJobTaskArgs defaults) { * @return builder * */ - public Builder jobId(Output jobId) { + public Builder jobId(Output jobId) { $.jobId = jobId; return this; } @@ -89,7 +90,7 @@ public Builder jobId(Output jobId) { * @return builder * */ - public Builder jobId(String jobId) { + public Builder jobId(Integer jobId) { return jobId(Output.of(jobId)); } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskRunJobTaskArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskRunJobTaskArgs.java index 80ebb28a..ff324769 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskRunJobTaskArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskRunJobTaskArgs.java @@ -5,6 +5,7 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; +import java.lang.Integer; import java.lang.Object; import java.lang.String; import java.util.Map; @@ -22,13 +23,13 @@ public final class JobTaskRunJobTaskArgs extends com.pulumi.resources.ResourceAr * */ @Import(name="jobId", required=true) - private Output jobId; + private Output jobId; /** * @return (String) ID of the job * */ - public Output jobId() { + public Output jobId() { return this.jobId; } @@ -78,7 +79,7 @@ public Builder(JobTaskRunJobTaskArgs defaults) { * @return builder * */ - public Builder jobId(Output jobId) { + public Builder jobId(Output jobId) { $.jobId = jobId; return this; } @@ -89,7 +90,7 @@ public Builder jobId(Output jobId) { * @return builder * */ - public Builder jobId(String jobId) { + public Builder jobId(Integer jobId) { return jobId(Output.of(jobId)); } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreDataAccessAwsIamRoleArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreDataAccessAwsIamRoleArgs.java index 40a8ead2..2c42aa45 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreDataAccessAwsIamRoleArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreDataAccessAwsIamRoleArgs.java @@ -16,7 +16,7 @@ public final class MetastoreDataAccessAwsIamRoleArgs extends com.pulumi.resource /** * The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF` * - * `azure_service_principal` optional configuration block for credential details for Azure: + * `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure (Recommended): * */ @Import(name="roleArn", required=true) @@ -25,7 +25,7 @@ public final class MetastoreDataAccessAwsIamRoleArgs extends com.pulumi.resource /** * @return The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF` * - * `azure_service_principal` optional configuration block for credential details for Azure: + * `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure (Recommended): * */ public Output roleArn() { @@ -59,7 +59,7 @@ public Builder(MetastoreDataAccessAwsIamRoleArgs defaults) { /** * @param roleArn The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF` * - * `azure_service_principal` optional configuration block for credential details for Azure: + * `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure (Recommended): * * @return builder * @@ -72,7 +72,7 @@ public Builder roleArn(Output roleArn) { /** * @param roleArn The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF` * - * `azure_service_principal` optional configuration block for credential details for Azure: + * `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure (Recommended): * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreDataAccessAzureServicePrincipalArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreDataAccessAzureServicePrincipalArgs.java index bbf501b1..f8c584a6 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreDataAccessAzureServicePrincipalArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreDataAccessAzureServicePrincipalArgs.java @@ -31,8 +31,6 @@ public Output applicationId() { /** * The client secret generated for the above app ID in AAD. **This field is redacted on output** * - * `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure: - * */ @Import(name="clientSecret", required=true) private Output clientSecret; @@ -40,8 +38,6 @@ public Output applicationId() { /** * @return The client secret generated for the above app ID in AAD. **This field is redacted on output** * - * `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure: - * */ public Output clientSecret() { return this.clientSecret; @@ -112,8 +108,6 @@ public Builder applicationId(String applicationId) { /** * @param clientSecret The client secret generated for the above app ID in AAD. **This field is redacted on output** * - * `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure: - * * @return builder * */ @@ -125,8 +119,6 @@ public Builder clientSecret(Output clientSecret) { /** * @param clientSecret The client secret generated for the above app ID in AAD. **This field is redacted on output** * - * `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure: - * * @return builder * */ diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreDataAccessDatabricksGcpServiceAccountArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreDataAccessDatabricksGcpServiceAccountArgs.java index 29edeb7a..4efa2fbc 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreDataAccessDatabricksGcpServiceAccountArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreDataAccessDatabricksGcpServiceAccountArgs.java @@ -18,6 +18,8 @@ public final class MetastoreDataAccessDatabricksGcpServiceAccountArgs extends co /** * The email of the GCP service account created, to be granted access to relevant buckets. * + * `azure_service_principal` optional configuration block for credential details for Azure (Legacy): + * */ @Import(name="email") private @Nullable Output email; @@ -25,6 +27,8 @@ public final class MetastoreDataAccessDatabricksGcpServiceAccountArgs extends co /** * @return The email of the GCP service account created, to be granted access to relevant buckets. * + * `azure_service_principal` optional configuration block for credential details for Azure (Legacy): + * */ public Optional> email() { return Optional.ofNullable(this.email); @@ -57,6 +61,8 @@ public Builder(MetastoreDataAccessDatabricksGcpServiceAccountArgs defaults) { /** * @param email The email of the GCP service account created, to be granted access to relevant buckets. * + * `azure_service_principal` optional configuration block for credential details for Azure (Legacy): + * * @return builder * */ @@ -68,6 +74,8 @@ public Builder email(@Nullable Output email) { /** * @param email The email of the GCP service account created, to be granted access to relevant buckets. * + * `azure_service_principal` optional configuration block for credential details for Azure (Legacy): + * * @return builder * */ diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreDataAccessGcpServiceAccountKeyArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreDataAccessGcpServiceAccountKeyArgs.java index a3f77c5c..36878570 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreDataAccessGcpServiceAccountKeyArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreDataAccessGcpServiceAccountKeyArgs.java @@ -16,6 +16,8 @@ public final class MetastoreDataAccessGcpServiceAccountKeyArgs extends com.pulum /** * The email of the GCP service account created, to be granted access to relevant buckets. * + * `azure_service_principal` optional configuration block for credential details for Azure (Legacy): + * */ @Import(name="email", required=true) private Output email; @@ -23,6 +25,8 @@ public final class MetastoreDataAccessGcpServiceAccountKeyArgs extends com.pulum /** * @return The email of the GCP service account created, to be granted access to relevant buckets. * + * `azure_service_principal` optional configuration block for credential details for Azure (Legacy): + * */ public Output email() { return this.email; @@ -71,6 +75,8 @@ public Builder(MetastoreDataAccessGcpServiceAccountKeyArgs defaults) { /** * @param email The email of the GCP service account created, to be granted access to relevant buckets. * + * `azure_service_principal` optional configuration block for credential details for Azure (Legacy): + * * @return builder * */ @@ -82,6 +88,8 @@ public Builder email(Output email) { /** * @param email The email of the GCP service account created, to be granted access to relevant buckets. * + * `azure_service_principal` optional configuration block for credential details for Azure (Legacy): + * * @return builder * */ diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreState.java index 55aea661..c322f1b8 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MetastoreState.java @@ -112,6 +112,13 @@ public Optional> globalMetastoreId() { return Optional.ofNullable(this.globalMetastoreId); } + @Import(name="metastoreId") + private @Nullable Output metastoreId; + + public Optional> metastoreId() { + return Optional.ofNullable(this.metastoreId); + } + /** * Name of metastore. * @@ -142,9 +149,17 @@ public Optional> owner() { return Optional.ofNullable(this.owner); } + /** + * The region of the metastore + * + */ @Import(name="region") private @Nullable Output region; + /** + * @return The region of the metastore + * + */ public Optional> region() { return Optional.ofNullable(this.region); } @@ -164,6 +179,13 @@ public Optional> storageRoot() { return Optional.ofNullable(this.storageRoot); } + @Import(name="storageRootCredentialId") + private @Nullable Output storageRootCredentialId; + + public Optional> storageRootCredentialId() { + return Optional.ofNullable(this.storageRootCredentialId); + } + @Import(name="updatedAt") private @Nullable Output updatedAt; @@ -190,10 +212,12 @@ private MetastoreState(MetastoreState $) { this.deltaSharingScope = $.deltaSharingScope; this.forceDestroy = $.forceDestroy; this.globalMetastoreId = $.globalMetastoreId; + this.metastoreId = $.metastoreId; this.name = $.name; this.owner = $.owner; this.region = $.region; this.storageRoot = $.storageRoot; + this.storageRootCredentialId = $.storageRootCredentialId; this.updatedAt = $.updatedAt; this.updatedBy = $.updatedBy; } @@ -345,6 +369,15 @@ public Builder globalMetastoreId(String globalMetastoreId) { return globalMetastoreId(Output.of(globalMetastoreId)); } + public Builder metastoreId(@Nullable Output metastoreId) { + $.metastoreId = metastoreId; + return this; + } + + public Builder metastoreId(String metastoreId) { + return metastoreId(Output.of(metastoreId)); + } + /** * @param name Name of metastore. * @@ -387,11 +420,23 @@ public Builder owner(String owner) { return owner(Output.of(owner)); } + /** + * @param region The region of the metastore + * + * @return builder + * + */ public Builder region(@Nullable Output region) { $.region = region; return this; } + /** + * @param region The region of the metastore + * + * @return builder + * + */ public Builder region(String region) { return region(Output.of(region)); } @@ -417,6 +462,15 @@ public Builder storageRoot(String storageRoot) { return storageRoot(Output.of(storageRoot)); } + public Builder storageRootCredentialId(@Nullable Output storageRootCredentialId) { + $.storageRootCredentialId = storageRootCredentialId; + return this; + } + + public Builder storageRootCredentialId(String storageRootCredentialId) { + return storageRootCredentialId(Output.of(storageRootCredentialId)); + } + public Builder updatedAt(@Nullable Output updatedAt) { $.updatedAt = updatedAt; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MlflowModelState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MlflowModelState.java index 9c34cb8a..2880183b 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MlflowModelState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MlflowModelState.java @@ -62,13 +62,6 @@ public Optional> name() { return Optional.ofNullable(this.name); } - @Import(name="registeredModelId") - private @Nullable Output registeredModelId; - - public Optional> registeredModelId() { - return Optional.ofNullable(this.registeredModelId); - } - /** * Tags for the MLflow model. * @@ -98,7 +91,6 @@ private MlflowModelState(MlflowModelState $) { this.description = $.description; this.lastUpdatedTimestamp = $.lastUpdatedTimestamp; this.name = $.name; - this.registeredModelId = $.registeredModelId; this.tags = $.tags; this.userId = $.userId; } @@ -181,15 +173,6 @@ public Builder name(String name) { return name(Output.of(name)); } - public Builder registeredModelId(@Nullable Output registeredModelId) { - $.registeredModelId = registeredModelId; - return this; - } - - public Builder registeredModelId(String registeredModelId) { - return registeredModelId(Output.of(registeredModelId)); - } - /** * @param tags Tags for the MLflow model. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MlflowModelTagArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MlflowModelTagArgs.java index a102f263..10baf2e1 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MlflowModelTagArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MlflowModelTagArgs.java @@ -7,24 +7,26 @@ import com.pulumi.core.annotations.Import; import java.lang.String; import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; public final class MlflowModelTagArgs extends com.pulumi.resources.ResourceArgs { public static final MlflowModelTagArgs Empty = new MlflowModelTagArgs(); - @Import(name="key", required=true) - private Output key; + @Import(name="key") + private @Nullable Output key; - public Output key() { - return this.key; + public Optional> key() { + return Optional.ofNullable(this.key); } - @Import(name="value", required=true) - private Output value; + @Import(name="value") + private @Nullable Output value; - public Output value() { - return this.value; + public Optional> value() { + return Optional.ofNullable(this.value); } private MlflowModelTagArgs() {} @@ -52,7 +54,7 @@ public Builder(MlflowModelTagArgs defaults) { $ = new MlflowModelTagArgs(Objects.requireNonNull(defaults)); } - public Builder key(Output key) { + public Builder key(@Nullable Output key) { $.key = key; return this; } @@ -61,7 +63,7 @@ public Builder key(String key) { return key(Output.of(key)); } - public Builder value(Output value) { + public Builder value(@Nullable Output value) { $.value = value; return this; } @@ -71,8 +73,6 @@ public Builder value(String value) { } public MlflowModelTagArgs build() { - $.key = Objects.requireNonNull($.key, "expected parameter 'key' to be non-null"); - $.value = Objects.requireNonNull($.value, "expected parameter 'value' to be non-null"); return $; } } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/ModelServingConfigServedModelArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/ModelServingConfigServedModelArgs.java index 24b37a1c..8c9bcd3b 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/ModelServingConfigServedModelArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/ModelServingConfigServedModelArgs.java @@ -18,16 +18,32 @@ public final class ModelServingConfigServedModelArgs extends com.pulumi.resource public static final ModelServingConfigServedModelArgs Empty = new ModelServingConfigServedModelArgs(); + /** + * a map of environment variable name/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`. + * + */ @Import(name="environmentVars") private @Nullable Output> environmentVars; + /** + * @return a map of environment variable name/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`. + * + */ public Optional>> environmentVars() { return Optional.ofNullable(this.environmentVars); } + /** + * ARN of the instance profile that the served model will use to access AWS resources. + * + */ @Import(name="instanceProfileArn") private @Nullable Output instanceProfileArn; + /** + * @return ARN of the instance profile that the served model will use to access AWS resources. + * + */ public Optional> instanceProfileArn() { return Optional.ofNullable(this.instanceProfileArn); } @@ -137,20 +153,44 @@ public Builder(ModelServingConfigServedModelArgs defaults) { $ = new ModelServingConfigServedModelArgs(Objects.requireNonNull(defaults)); } + /** + * @param environmentVars a map of environment variable name/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`. + * + * @return builder + * + */ public Builder environmentVars(@Nullable Output> environmentVars) { $.environmentVars = environmentVars; return this; } + /** + * @param environmentVars a map of environment variable name/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`. + * + * @return builder + * + */ public Builder environmentVars(Map environmentVars) { return environmentVars(Output.of(environmentVars)); } + /** + * @param instanceProfileArn ARN of the instance profile that the served model will use to access AWS resources. + * + * @return builder + * + */ public Builder instanceProfileArn(@Nullable Output instanceProfileArn) { $.instanceProfileArn = instanceProfileArn; return this; } + /** + * @param instanceProfileArn ARN of the instance profile that the served model will use to access AWS resources. + * + * @return builder + * + */ public Builder instanceProfileArn(String instanceProfileArn) { return instanceProfileArn(Output.of(instanceProfileArn)); } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MwsCustomerManagedKeysGcpKeyInfoArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MwsCustomerManagedKeysGcpKeyInfoArgs.java index 3b2c53f8..ead02310 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MwsCustomerManagedKeysGcpKeyInfoArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MwsCustomerManagedKeysGcpKeyInfoArgs.java @@ -13,9 +13,17 @@ public final class MwsCustomerManagedKeysGcpKeyInfoArgs extends com.pulumi.resou public static final MwsCustomerManagedKeysGcpKeyInfoArgs Empty = new MwsCustomerManagedKeysGcpKeyInfoArgs(); + /** + * The GCP KMS key's resource name. + * + */ @Import(name="kmsKeyId", required=true) private Output kmsKeyId; + /** + * @return The GCP KMS key's resource name. + * + */ public Output kmsKeyId() { return this.kmsKeyId; } @@ -44,11 +52,23 @@ public Builder(MwsCustomerManagedKeysGcpKeyInfoArgs defaults) { $ = new MwsCustomerManagedKeysGcpKeyInfoArgs(Objects.requireNonNull(defaults)); } + /** + * @param kmsKeyId The GCP KMS key's resource name. + * + * @return builder + * + */ public Builder kmsKeyId(Output kmsKeyId) { $.kmsKeyId = kmsKeyId; return this; } + /** + * @param kmsKeyId The GCP KMS key's resource name. + * + * @return builder + * + */ public Builder kmsKeyId(String kmsKeyId) { return kmsKeyId(Output.of(kmsKeyId)); } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MwsCustomerManagedKeysState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MwsCustomerManagedKeysState.java index ef0235b1..b24fc676 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MwsCustomerManagedKeysState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MwsCustomerManagedKeysState.java @@ -35,14 +35,14 @@ public Optional> accountId() { } /** - * This field is a block and is documented below. + * This field is a block and is documented below. This conflicts with `gcp_key_info` * */ @Import(name="awsKeyInfo") private @Nullable Output awsKeyInfo; /** - * @return This field is a block and is documented below. + * @return This field is a block and is documented below. This conflicts with `gcp_key_info` * */ public Optional> awsKeyInfo() { @@ -79,9 +79,17 @@ public Optional> customerManagedKeyId() { return Optional.ofNullable(this.customerManagedKeyId); } + /** + * This field is a block and is documented below. This conflicts with `aws_key_info` + * + */ @Import(name="gcpKeyInfo") private @Nullable Output gcpKeyInfo; + /** + * @return This field is a block and is documented below. This conflicts with `aws_key_info` + * + */ public Optional> gcpKeyInfo() { return Optional.ofNullable(this.gcpKeyInfo); } @@ -152,7 +160,7 @@ public Builder accountId(String accountId) { } /** - * @param awsKeyInfo This field is a block and is documented below. + * @param awsKeyInfo This field is a block and is documented below. This conflicts with `gcp_key_info` * * @return builder * @@ -163,7 +171,7 @@ public Builder awsKeyInfo(@Nullable Output } /** - * @param awsKeyInfo This field is a block and is documented below. + * @param awsKeyInfo This field is a block and is documented below. This conflicts with `gcp_key_info` * * @return builder * @@ -214,11 +222,23 @@ public Builder customerManagedKeyId(String customerManagedKeyId) { return customerManagedKeyId(Output.of(customerManagedKeyId)); } + /** + * @param gcpKeyInfo This field is a block and is documented below. This conflicts with `aws_key_info` + * + * @return builder + * + */ public Builder gcpKeyInfo(@Nullable Output gcpKeyInfo) { $.gcpKeyInfo = gcpKeyInfo; return this; } + /** + * @param gcpKeyInfo This field is a block and is documented below. This conflicts with `aws_key_info` + * + * @return builder + * + */ public Builder gcpKeyInfo(MwsCustomerManagedKeysGcpKeyInfoArgs gcpKeyInfo) { return gcpKeyInfo(Output.of(gcpKeyInfo)); } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MwsWorkspacesState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MwsWorkspacesState.java index 77bc0ea5..f1f7bf68 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MwsWorkspacesState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MwsWorkspacesState.java @@ -248,9 +248,17 @@ public Optional> storageConfigurationId() { return Optional.ofNullable(this.storageConfigurationId); } + /** + * `customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. + * + */ @Import(name="storageCustomerManagedKeyId") private @Nullable Output storageCustomerManagedKeyId; + /** + * @return `customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. + * + */ public Optional> storageCustomerManagedKeyId() { return Optional.ofNullable(this.storageCustomerManagedKeyId); } @@ -695,11 +703,23 @@ public Builder storageConfigurationId(String storageConfigurationId) { return storageConfigurationId(Output.of(storageConfigurationId)); } + /** + * @param storageCustomerManagedKeyId `customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. + * + * @return builder + * + */ public Builder storageCustomerManagedKeyId(@Nullable Output storageCustomerManagedKeyId) { $.storageCustomerManagedKeyId = storageCustomerManagedKeyId; return this; } + /** + * @param storageCustomerManagedKeyId `customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. + * + * @return builder + * + */ public Builder storageCustomerManagedKeyId(String storageCustomerManagedKeyId) { return storageCustomerManagedKeyId(Output.of(storageCustomerManagedKeyId)); } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MwsWorkspacesTokenArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MwsWorkspacesTokenArgs.java index 77787e03..4b5f20c7 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/MwsWorkspacesTokenArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/MwsWorkspacesTokenArgs.java @@ -23,9 +23,17 @@ public Optional> comment() { return Optional.ofNullable(this.comment); } + /** + * Token expiry lifetime. By default its 2592000 (30 days). + * + */ @Import(name="lifetimeSeconds") private @Nullable Output lifetimeSeconds; + /** + * @return Token expiry lifetime. By default its 2592000 (30 days). + * + */ public Optional> lifetimeSeconds() { return Optional.ofNullable(this.lifetimeSeconds); } @@ -80,11 +88,23 @@ public Builder comment(String comment) { return comment(Output.of(comment)); } + /** + * @param lifetimeSeconds Token expiry lifetime. By default its 2592000 (30 days). + * + * @return builder + * + */ public Builder lifetimeSeconds(@Nullable Output lifetimeSeconds) { $.lifetimeSeconds = lifetimeSeconds; return this; } + /** + * @param lifetimeSeconds Token expiry lifetime. By default its 2592000 (30 days). + * + * @return builder + * + */ public Builder lifetimeSeconds(Integer lifetimeSeconds) { return lifetimeSeconds(Output.of(lifetimeSeconds)); } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/ShareState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/ShareState.java index 7f4e2691..85dfa821 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/ShareState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/ShareState.java @@ -70,6 +70,21 @@ public Optional>> objects() { return Optional.ofNullable(this.objects); } + /** + * User name/group name/sp application_id of the share owner. + * + */ + @Import(name="owner") + private @Nullable Output owner; + + /** + * @return User name/group name/sp application_id of the share owner. + * + */ + public Optional> owner() { + return Optional.ofNullable(this.owner); + } + private ShareState() {} private ShareState(ShareState $) { @@ -77,6 +92,7 @@ private ShareState(ShareState $) { this.createdBy = $.createdBy; this.name = $.name; this.objects = $.objects; + this.owner = $.owner; } public static Builder builder() { @@ -173,6 +189,27 @@ public Builder objects(ShareObjectArgs... objects) { return objects(List.of(objects)); } + /** + * @param owner User name/group name/sp application_id of the share owner. + * + * @return builder + * + */ + public Builder owner(@Nullable Output owner) { + $.owner = owner; + return this; + } + + /** + * @param owner User name/group name/sp application_id of the share owner. + * + * @return builder + * + */ + public Builder owner(String owner) { + return owner(Output.of(owner)); + } + public ShareState build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlAlertState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlAlertState.java index d7d993cc..41558115 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlAlertState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlAlertState.java @@ -17,6 +17,13 @@ public final class SqlAlertState extends com.pulumi.resources.ResourceArgs { public static final SqlAlertState Empty = new SqlAlertState(); + @Import(name="createdAt") + private @Nullable Output createdAt; + + public Optional> createdAt() { + return Optional.ofNullable(this.createdAt); + } + /** * Name of the alert. * @@ -92,14 +99,23 @@ public Optional> rearm() { return Optional.ofNullable(this.rearm); } + @Import(name="updatedAt") + private @Nullable Output updatedAt; + + public Optional> updatedAt() { + return Optional.ofNullable(this.updatedAt); + } + private SqlAlertState() {} private SqlAlertState(SqlAlertState $) { + this.createdAt = $.createdAt; this.name = $.name; this.options = $.options; this.parent = $.parent; this.queryId = $.queryId; this.rearm = $.rearm; + this.updatedAt = $.updatedAt; } public static Builder builder() { @@ -120,6 +136,15 @@ public Builder(SqlAlertState defaults) { $ = new SqlAlertState(Objects.requireNonNull(defaults)); } + public Builder createdAt(@Nullable Output createdAt) { + $.createdAt = createdAt; + return this; + } + + public Builder createdAt(String createdAt) { + return createdAt(Output.of(createdAt)); + } + /** * @param name Name of the alert. * @@ -225,6 +250,15 @@ public Builder rearm(Integer rearm) { return rearm(Output.of(rearm)); } + public Builder updatedAt(@Nullable Output updatedAt) { + $.updatedAt = updatedAt; + return this; + } + + public Builder updatedAt(String updatedAt) { + return updatedAt(Output.of(updatedAt)); + } + public SqlAlertState build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlDashboardState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlDashboardState.java index 8ec3fa8f..906c69b8 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlDashboardState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlDashboardState.java @@ -16,6 +16,13 @@ public final class SqlDashboardState extends com.pulumi.resources.ResourceArgs { public static final SqlDashboardState Empty = new SqlDashboardState(); + @Import(name="createdAt") + private @Nullable Output createdAt; + + public Optional> createdAt() { + return Optional.ofNullable(this.createdAt); + } + @Import(name="name") private @Nullable Output name; @@ -37,12 +44,21 @@ public Optional>> tags() { return Optional.ofNullable(this.tags); } + @Import(name="updatedAt") + private @Nullable Output updatedAt; + + public Optional> updatedAt() { + return Optional.ofNullable(this.updatedAt); + } + private SqlDashboardState() {} private SqlDashboardState(SqlDashboardState $) { + this.createdAt = $.createdAt; this.name = $.name; this.parent = $.parent; this.tags = $.tags; + this.updatedAt = $.updatedAt; } public static Builder builder() { @@ -63,6 +79,15 @@ public Builder(SqlDashboardState defaults) { $ = new SqlDashboardState(Objects.requireNonNull(defaults)); } + public Builder createdAt(@Nullable Output createdAt) { + $.createdAt = createdAt; + return this; + } + + public Builder createdAt(String createdAt) { + return createdAt(Output.of(createdAt)); + } + public Builder name(@Nullable Output name) { $.name = name; return this; @@ -94,6 +119,15 @@ public Builder tags(String... tags) { return tags(List.of(tags)); } + public Builder updatedAt(@Nullable Output updatedAt) { + $.updatedAt = updatedAt; + return this; + } + + public Builder updatedAt(String updatedAt) { + return updatedAt(Output.of(updatedAt)); + } + public SqlDashboardState build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlQueryState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlQueryState.java index 440c06e3..9bcfd7df 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlQueryState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlQueryState.java @@ -18,6 +18,13 @@ public final class SqlQueryState extends com.pulumi.resources.ResourceArgs { public static final SqlQueryState Empty = new SqlQueryState(); + @Import(name="createdAt") + private @Nullable Output createdAt; + + public Optional> createdAt() { + return Optional.ofNullable(this.createdAt); + } + @Import(name="dataSourceId") private @Nullable Output dataSourceId; @@ -93,9 +100,17 @@ public Optional>> tags() { return Optional.ofNullable(this.tags); } + @Import(name="updatedAt") + private @Nullable Output updatedAt; + + public Optional> updatedAt() { + return Optional.ofNullable(this.updatedAt); + } + private SqlQueryState() {} private SqlQueryState(SqlQueryState $) { + this.createdAt = $.createdAt; this.dataSourceId = $.dataSourceId; this.description = $.description; this.name = $.name; @@ -105,6 +120,7 @@ private SqlQueryState(SqlQueryState $) { this.runAsRole = $.runAsRole; this.schedule = $.schedule; this.tags = $.tags; + this.updatedAt = $.updatedAt; } public static Builder builder() { @@ -125,6 +141,15 @@ public Builder(SqlQueryState defaults) { $ = new SqlQueryState(Objects.requireNonNull(defaults)); } + public Builder createdAt(@Nullable Output createdAt) { + $.createdAt = createdAt; + return this; + } + + public Builder createdAt(String createdAt) { + return createdAt(Output.of(createdAt)); + } + public Builder dataSourceId(@Nullable Output dataSourceId) { $.dataSourceId = dataSourceId; return this; @@ -230,6 +255,15 @@ public Builder tags(String... tags) { return tags(List.of(tags)); } + public Builder updatedAt(@Nullable Output updatedAt) { + $.updatedAt = updatedAt; + return this; + } + + public Builder updatedAt(String updatedAt) { + return updatedAt(Output.of(updatedAt)); + } + public SqlQueryState build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlTableColumnArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlTableColumnArgs.java index 236adf2a..286bc89f 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlTableColumnArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/SqlTableColumnArgs.java @@ -62,18 +62,18 @@ public Optional> nullable() { } /** - * Column type spec (with metadata) as SQL text + * Column type spec (with metadata) as SQL text. Not supported for `VIEW` table_type. * */ - @Import(name="type", required=true) - private Output type; + @Import(name="type") + private @Nullable Output type; /** - * @return Column type spec (with metadata) as SQL text + * @return Column type spec (with metadata) as SQL text. Not supported for `VIEW` table_type. * */ - public Output type() { - return this.type; + public Optional> type() { + return Optional.ofNullable(this.type); } private SqlTableColumnArgs() {} @@ -167,18 +167,18 @@ public Builder nullable(Boolean nullable) { } /** - * @param type Column type spec (with metadata) as SQL text + * @param type Column type spec (with metadata) as SQL text. Not supported for `VIEW` table_type. * * @return builder * */ - public Builder type(Output type) { + public Builder type(@Nullable Output type) { $.type = type; return this; } /** - * @param type Column type spec (with metadata) as SQL text + * @param type Column type spec (with metadata) as SQL text. Not supported for `VIEW` table_type. * * @return builder * @@ -189,7 +189,6 @@ public Builder type(String type) { public SqlTableColumnArgs build() { $.name = Objects.requireNonNull($.name, "expected parameter 'name' to be non-null"); - $.type = Objects.requireNonNull($.type, "expected parameter 'type' to be non-null"); return $; } } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/StorageCredentialState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/StorageCredentialState.java index 255d2948..8301c412 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/StorageCredentialState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/StorageCredentialState.java @@ -56,6 +56,13 @@ public Optional> databr return Optional.ofNullable(this.databricksGcpServiceAccount); } + @Import(name="forceDestroy") + private @Nullable Output forceDestroy; + + public Optional> forceDestroy() { + return Optional.ofNullable(this.forceDestroy); + } + @Import(name="gcpServiceAccountKey") private @Nullable Output gcpServiceAccountKey; @@ -127,6 +134,7 @@ private StorageCredentialState(StorageCredentialState $) { this.azureServicePrincipal = $.azureServicePrincipal; this.comment = $.comment; this.databricksGcpServiceAccount = $.databricksGcpServiceAccount; + this.forceDestroy = $.forceDestroy; this.gcpServiceAccountKey = $.gcpServiceAccountKey; this.metastoreId = $.metastoreId; this.name = $.name; @@ -197,6 +205,15 @@ public Builder databricksGcpServiceAccount(StorageCredentialDatabricksGcpService return databricksGcpServiceAccount(Output.of(databricksGcpServiceAccount)); } + public Builder forceDestroy(@Nullable Output forceDestroy) { + $.forceDestroy = forceDestroy; + return this; + } + + public Builder forceDestroy(Boolean forceDestroy) { + return forceDestroy(Output.of(forceDestroy)); + } + public Builder gcpServiceAccountKey(@Nullable Output gcpServiceAccountKey) { $.gcpServiceAccountKey = gcpServiceAccountKey; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/AccessControlRuleSetGrantRule.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/AccessControlRuleSetGrantRule.java index 9911364c..c9fb5702 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/AccessControlRuleSetGrantRule.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/AccessControlRuleSetGrantRule.java @@ -20,9 +20,10 @@ public final class AccessControlRuleSetGrantRule { */ private @Nullable List principals; /** - * @return Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles). + * @return Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles) or [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page). * * `roles/servicePrincipal.manager` - Manager of a service principal. * * `roles/servicePrincipal.user` - User of a service principal. + * * `roles/group.manager` - Manager of a group. * */ private String role; @@ -39,9 +40,10 @@ public List principals() { return this.principals == null ? List.of() : this.principals; } /** - * @return Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles). + * @return Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles) or [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page). * * `roles/servicePrincipal.manager` - Manager of a service principal. * * `roles/servicePrincipal.user` - User of a service principal. + * * `roles/group.manager` - Manager of a group. * */ public String role() { diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/ExternalLocationEncryptionDetails.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/ExternalLocationEncryptionDetails.java new file mode 100644 index 00000000..337d929b --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/ExternalLocationEncryptionDetails.java @@ -0,0 +1,48 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.databricks.outputs.ExternalLocationEncryptionDetailsSseEncryptionDetails; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class ExternalLocationEncryptionDetails { + private @Nullable ExternalLocationEncryptionDetailsSseEncryptionDetails sseEncryptionDetails; + + private ExternalLocationEncryptionDetails() {} + public Optional sseEncryptionDetails() { + return Optional.ofNullable(this.sseEncryptionDetails); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(ExternalLocationEncryptionDetails defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable ExternalLocationEncryptionDetailsSseEncryptionDetails sseEncryptionDetails; + public Builder() {} + public Builder(ExternalLocationEncryptionDetails defaults) { + Objects.requireNonNull(defaults); + this.sseEncryptionDetails = defaults.sseEncryptionDetails; + } + + @CustomType.Setter + public Builder sseEncryptionDetails(@Nullable ExternalLocationEncryptionDetailsSseEncryptionDetails sseEncryptionDetails) { + this.sseEncryptionDetails = sseEncryptionDetails; + return this; + } + public ExternalLocationEncryptionDetails build() { + final var o = new ExternalLocationEncryptionDetails(); + o.sseEncryptionDetails = sseEncryptionDetails; + return o; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/ExternalLocationEncryptionDetailsSseEncryptionDetails.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/ExternalLocationEncryptionDetailsSseEncryptionDetails.java new file mode 100644 index 00000000..a6730bc9 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/ExternalLocationEncryptionDetailsSseEncryptionDetails.java @@ -0,0 +1,60 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class ExternalLocationEncryptionDetailsSseEncryptionDetails { + private @Nullable String algorithm; + private @Nullable String awsKmsKeyArn; + + private ExternalLocationEncryptionDetailsSseEncryptionDetails() {} + public Optional algorithm() { + return Optional.ofNullable(this.algorithm); + } + public Optional awsKmsKeyArn() { + return Optional.ofNullable(this.awsKmsKeyArn); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(ExternalLocationEncryptionDetailsSseEncryptionDetails defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String algorithm; + private @Nullable String awsKmsKeyArn; + public Builder() {} + public Builder(ExternalLocationEncryptionDetailsSseEncryptionDetails defaults) { + Objects.requireNonNull(defaults); + this.algorithm = defaults.algorithm; + this.awsKmsKeyArn = defaults.awsKmsKeyArn; + } + + @CustomType.Setter + public Builder algorithm(@Nullable String algorithm) { + this.algorithm = algorithm; + return this; + } + @CustomType.Setter + public Builder awsKmsKeyArn(@Nullable String awsKmsKeyArn) { + this.awsKmsKeyArn = awsKmsKeyArn; + return this; + } + public ExternalLocationEncryptionDetailsSseEncryptionDetails build() { + final var o = new ExternalLocationEncryptionDetailsSseEncryptionDetails(); + o.algorithm = algorithm; + o.awsKmsKeyArn = awsKmsKeyArn; + return o; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetCurrentUserResult.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetCurrentUserResult.java index 147fa1bd..8bf54de2 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetCurrentUserResult.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetCurrentUserResult.java @@ -9,6 +9,7 @@ @CustomType public final class GetCurrentUserResult { + private String aclPrincipalId; private String alphanumeric; private String externalId; private String home; @@ -22,6 +23,9 @@ public final class GetCurrentUserResult { private String workspaceUrl; private GetCurrentUserResult() {} + public String aclPrincipalId() { + return this.aclPrincipalId; + } public String alphanumeric() { return this.alphanumeric; } @@ -57,6 +61,7 @@ public static Builder builder(GetCurrentUserResult defaults) { } @CustomType.Builder public static final class Builder { + private String aclPrincipalId; private String alphanumeric; private String externalId; private String home; @@ -67,6 +72,7 @@ public static final class Builder { public Builder() {} public Builder(GetCurrentUserResult defaults) { Objects.requireNonNull(defaults); + this.aclPrincipalId = defaults.aclPrincipalId; this.alphanumeric = defaults.alphanumeric; this.externalId = defaults.externalId; this.home = defaults.home; @@ -76,6 +82,11 @@ public Builder(GetCurrentUserResult defaults) { this.workspaceUrl = defaults.workspaceUrl; } + @CustomType.Setter + public Builder aclPrincipalId(String aclPrincipalId) { + this.aclPrincipalId = Objects.requireNonNull(aclPrincipalId); + return this; + } @CustomType.Setter public Builder alphanumeric(String alphanumeric) { this.alphanumeric = Objects.requireNonNull(alphanumeric); @@ -113,6 +124,7 @@ public Builder workspaceUrl(String workspaceUrl) { } public GetCurrentUserResult build() { final var o = new GetCurrentUserResult(); + o.aclPrincipalId = aclPrincipalId; o.alphanumeric = alphanumeric; o.externalId = externalId; o.home = home; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetGroupResult.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetGroupResult.java index 972ceede..277c6087 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetGroupResult.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetGroupResult.java @@ -13,6 +13,11 @@ @CustomType public final class GetGroupResult { + /** + * @return identifier for use in databricks_access_control_rule_set, e.g. `groups/Some Group`. + * + */ + private String aclPrincipalId; /** * @return True if group members can create clusters * @@ -71,6 +76,13 @@ public final class GetGroupResult { private @Nullable Boolean workspaceAccess; private GetGroupResult() {} + /** + * @return identifier for use in databricks_access_control_rule_set, e.g. `groups/Some Group`. + * + */ + public String aclPrincipalId() { + return this.aclPrincipalId; + } /** * @return True if group members can create clusters * @@ -165,6 +177,7 @@ public static Builder builder(GetGroupResult defaults) { } @CustomType.Builder public static final class Builder { + private String aclPrincipalId; private @Nullable Boolean allowClusterCreate; private @Nullable Boolean allowInstancePoolCreate; private List childGroups; @@ -182,6 +195,7 @@ public static final class Builder { public Builder() {} public Builder(GetGroupResult defaults) { Objects.requireNonNull(defaults); + this.aclPrincipalId = defaults.aclPrincipalId; this.allowClusterCreate = defaults.allowClusterCreate; this.allowInstancePoolCreate = defaults.allowInstancePoolCreate; this.childGroups = defaults.childGroups; @@ -198,6 +212,11 @@ public Builder(GetGroupResult defaults) { this.workspaceAccess = defaults.workspaceAccess; } + @CustomType.Setter + public Builder aclPrincipalId(String aclPrincipalId) { + this.aclPrincipalId = Objects.requireNonNull(aclPrincipalId); + return this; + } @CustomType.Setter public Builder allowClusterCreate(@Nullable Boolean allowClusterCreate) { this.allowClusterCreate = allowClusterCreate; @@ -288,6 +307,7 @@ public Builder workspaceAccess(@Nullable Boolean workspaceAccess) { } public GetGroupResult build() { final var o = new GetGroupResult(); + o.aclPrincipalId = aclPrincipalId; o.allowClusterCreate = allowClusterCreate; o.allowInstancePoolCreate = allowInstancePoolCreate; o.childGroups = childGroups; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsRunJobTask.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsRunJobTask.java index 01672a81..15bbf37f 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsRunJobTask.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsRunJobTask.java @@ -4,6 +4,7 @@ package com.pulumi.databricks.outputs; import com.pulumi.core.annotations.CustomType; +import java.lang.Integer; import java.lang.Object; import java.lang.String; import java.util.Map; @@ -12,11 +13,11 @@ @CustomType public final class GetJobJobSettingsSettingsRunJobTask { - private String jobId; + private Integer jobId; private @Nullable Map jobParameters; private GetJobJobSettingsSettingsRunJobTask() {} - public String jobId() { + public Integer jobId() { return this.jobId; } public Map jobParameters() { @@ -32,7 +33,7 @@ public static Builder builder(GetJobJobSettingsSettingsRunJobTask defaults) { } @CustomType.Builder public static final class Builder { - private String jobId; + private Integer jobId; private @Nullable Map jobParameters; public Builder() {} public Builder(GetJobJobSettingsSettingsRunJobTask defaults) { @@ -42,7 +43,7 @@ public Builder(GetJobJobSettingsSettingsRunJobTask defaults) { } @CustomType.Setter - public Builder jobId(String jobId) { + public Builder jobId(Integer jobId) { this.jobId = Objects.requireNonNull(jobId); return this; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskRunJobTask.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskRunJobTask.java index 24857b52..eac7dac9 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskRunJobTask.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskRunJobTask.java @@ -4,6 +4,7 @@ package com.pulumi.databricks.outputs; import com.pulumi.core.annotations.CustomType; +import java.lang.Integer; import java.lang.Object; import java.lang.String; import java.util.Map; @@ -12,11 +13,11 @@ @CustomType public final class GetJobJobSettingsSettingsTaskRunJobTask { - private String jobId; + private Integer jobId; private @Nullable Map jobParameters; private GetJobJobSettingsSettingsTaskRunJobTask() {} - public String jobId() { + public Integer jobId() { return this.jobId; } public Map jobParameters() { @@ -32,7 +33,7 @@ public static Builder builder(GetJobJobSettingsSettingsTaskRunJobTask defaults) } @CustomType.Builder public static final class Builder { - private String jobId; + private Integer jobId; private @Nullable Map jobParameters; public Builder() {} public Builder(GetJobJobSettingsSettingsTaskRunJobTask defaults) { @@ -42,7 +43,7 @@ public Builder(GetJobJobSettingsSettingsTaskRunJobTask defaults) { } @CustomType.Setter - public Builder jobId(String jobId) { + public Builder jobId(Integer jobId) { this.jobId = Objects.requireNonNull(jobId); return this; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetServicePrincipalResult.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetServicePrincipalResult.java index 6549b67d..57d2fd81 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetServicePrincipalResult.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetServicePrincipalResult.java @@ -10,6 +10,11 @@ @CustomType public final class GetServicePrincipalResult { + /** + * @return identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. + * + */ + private String aclPrincipalId; /** * @return Whether service principal is active or not. * @@ -44,6 +49,13 @@ public final class GetServicePrincipalResult { private String spId; private GetServicePrincipalResult() {} + /** + * @return identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. + * + */ + public String aclPrincipalId() { + return this.aclPrincipalId; + } /** * @return Whether service principal is active or not. * @@ -102,6 +114,7 @@ public static Builder builder(GetServicePrincipalResult defaults) { } @CustomType.Builder public static final class Builder { + private String aclPrincipalId; private Boolean active; private String applicationId; private String displayName; @@ -113,6 +126,7 @@ public static final class Builder { public Builder() {} public Builder(GetServicePrincipalResult defaults) { Objects.requireNonNull(defaults); + this.aclPrincipalId = defaults.aclPrincipalId; this.active = defaults.active; this.applicationId = defaults.applicationId; this.displayName = defaults.displayName; @@ -123,6 +137,11 @@ public Builder(GetServicePrincipalResult defaults) { this.spId = defaults.spId; } + @CustomType.Setter + public Builder aclPrincipalId(String aclPrincipalId) { + this.aclPrincipalId = Objects.requireNonNull(aclPrincipalId); + return this; + } @CustomType.Setter public Builder active(Boolean active) { this.active = Objects.requireNonNull(active); @@ -165,6 +184,7 @@ public Builder spId(String spId) { } public GetServicePrincipalResult build() { final var o = new GetServicePrincipalResult(); + o.aclPrincipalId = aclPrincipalId; o.active = active; o.applicationId = applicationId; o.displayName = displayName; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetUserResult.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetUserResult.java index 38a4a8c5..0f3a03c9 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetUserResult.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetUserResult.java @@ -11,6 +11,11 @@ @CustomType public final class GetUserResult { + /** + * @return identifier for use in databricks_access_control_rule_set, e.g. `users/mr.foo@example.com`. + * + */ + private String aclPrincipalId; /** * @return Alphanumeric representation of user local name. e.g. `mr_foo`. * @@ -50,6 +55,13 @@ public final class GetUserResult { private @Nullable String userName; private GetUserResult() {} + /** + * @return identifier for use in databricks_access_control_rule_set, e.g. `users/mr.foo@example.com`. + * + */ + public String aclPrincipalId() { + return this.aclPrincipalId; + } /** * @return Alphanumeric representation of user local name. e.g. `mr_foo`. * @@ -115,6 +127,7 @@ public static Builder builder(GetUserResult defaults) { } @CustomType.Builder public static final class Builder { + private String aclPrincipalId; private String alphanumeric; private String applicationId; private String displayName; @@ -127,6 +140,7 @@ public static final class Builder { public Builder() {} public Builder(GetUserResult defaults) { Objects.requireNonNull(defaults); + this.aclPrincipalId = defaults.aclPrincipalId; this.alphanumeric = defaults.alphanumeric; this.applicationId = defaults.applicationId; this.displayName = defaults.displayName; @@ -138,6 +152,11 @@ public Builder(GetUserResult defaults) { this.userName = defaults.userName; } + @CustomType.Setter + public Builder aclPrincipalId(String aclPrincipalId) { + this.aclPrincipalId = Objects.requireNonNull(aclPrincipalId); + return this; + } @CustomType.Setter public Builder alphanumeric(String alphanumeric) { this.alphanumeric = Objects.requireNonNull(alphanumeric); @@ -185,6 +204,7 @@ public Builder userName(@Nullable String userName) { } public GetUserResult build() { final var o = new GetUserResult(); + o.aclPrincipalId = aclPrincipalId; o.alphanumeric = alphanumeric; o.applicationId = applicationId; o.displayName = displayName; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobRunJobTask.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobRunJobTask.java index 4e700739..b09f3d1a 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobRunJobTask.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobRunJobTask.java @@ -4,6 +4,7 @@ package com.pulumi.databricks.outputs; import com.pulumi.core.annotations.CustomType; +import java.lang.Integer; import java.lang.Object; import java.lang.String; import java.util.Map; @@ -16,7 +17,7 @@ public final class JobRunJobTask { * @return (String) ID of the job * */ - private String jobId; + private Integer jobId; /** * @return (Map) Job parameters for the task * @@ -28,7 +29,7 @@ private JobRunJobTask() {} * @return (String) ID of the job * */ - public String jobId() { + public Integer jobId() { return this.jobId; } /** @@ -48,7 +49,7 @@ public static Builder builder(JobRunJobTask defaults) { } @CustomType.Builder public static final class Builder { - private String jobId; + private Integer jobId; private @Nullable Map jobParameters; public Builder() {} public Builder(JobRunJobTask defaults) { @@ -58,7 +59,7 @@ public Builder(JobRunJobTask defaults) { } @CustomType.Setter - public Builder jobId(String jobId) { + public Builder jobId(Integer jobId) { this.jobId = Objects.requireNonNull(jobId); return this; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskRunJobTask.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskRunJobTask.java index 91f9e2b3..2da65eb2 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskRunJobTask.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskRunJobTask.java @@ -4,6 +4,7 @@ package com.pulumi.databricks.outputs; import com.pulumi.core.annotations.CustomType; +import java.lang.Integer; import java.lang.Object; import java.lang.String; import java.util.Map; @@ -16,7 +17,7 @@ public final class JobTaskRunJobTask { * @return (String) ID of the job * */ - private String jobId; + private Integer jobId; /** * @return (Map) Job parameters for the task * @@ -28,7 +29,7 @@ private JobTaskRunJobTask() {} * @return (String) ID of the job * */ - public String jobId() { + public Integer jobId() { return this.jobId; } /** @@ -48,7 +49,7 @@ public static Builder builder(JobTaskRunJobTask defaults) { } @CustomType.Builder public static final class Builder { - private String jobId; + private Integer jobId; private @Nullable Map jobParameters; public Builder() {} public Builder(JobTaskRunJobTask defaults) { @@ -58,7 +59,7 @@ public Builder(JobTaskRunJobTask defaults) { } @CustomType.Setter - public Builder jobId(String jobId) { + public Builder jobId(Integer jobId) { this.jobId = Objects.requireNonNull(jobId); return this; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/MetastoreDataAccessAwsIamRole.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/MetastoreDataAccessAwsIamRole.java index 98fea724..c81f93e2 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/MetastoreDataAccessAwsIamRole.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/MetastoreDataAccessAwsIamRole.java @@ -12,7 +12,7 @@ public final class MetastoreDataAccessAwsIamRole { /** * @return The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF` * - * `azure_service_principal` optional configuration block for credential details for Azure: + * `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure (Recommended): * */ private String roleArn; @@ -21,7 +21,7 @@ private MetastoreDataAccessAwsIamRole() {} /** * @return The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF` * - * `azure_service_principal` optional configuration block for credential details for Azure: + * `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure (Recommended): * */ public String roleArn() { diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/MetastoreDataAccessAzureServicePrincipal.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/MetastoreDataAccessAzureServicePrincipal.java index e10d0367..6ae504f2 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/MetastoreDataAccessAzureServicePrincipal.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/MetastoreDataAccessAzureServicePrincipal.java @@ -17,8 +17,6 @@ public final class MetastoreDataAccessAzureServicePrincipal { /** * @return The client secret generated for the above app ID in AAD. **This field is redacted on output** * - * `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure: - * */ private String clientSecret; /** @@ -38,8 +36,6 @@ public String applicationId() { /** * @return The client secret generated for the above app ID in AAD. **This field is redacted on output** * - * `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure: - * */ public String clientSecret() { return this.clientSecret; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/MetastoreDataAccessDatabricksGcpServiceAccount.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/MetastoreDataAccessDatabricksGcpServiceAccount.java index 3bc1f115..9dea9d2b 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/MetastoreDataAccessDatabricksGcpServiceAccount.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/MetastoreDataAccessDatabricksGcpServiceAccount.java @@ -14,6 +14,8 @@ public final class MetastoreDataAccessDatabricksGcpServiceAccount { /** * @return The email of the GCP service account created, to be granted access to relevant buckets. * + * `azure_service_principal` optional configuration block for credential details for Azure (Legacy): + * */ private @Nullable String email; @@ -21,6 +23,8 @@ private MetastoreDataAccessDatabricksGcpServiceAccount() {} /** * @return The email of the GCP service account created, to be granted access to relevant buckets. * + * `azure_service_principal` optional configuration block for credential details for Azure (Legacy): + * */ public Optional email() { return Optional.ofNullable(this.email); diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/MetastoreDataAccessGcpServiceAccountKey.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/MetastoreDataAccessGcpServiceAccountKey.java index b1160d38..b5d80399 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/MetastoreDataAccessGcpServiceAccountKey.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/MetastoreDataAccessGcpServiceAccountKey.java @@ -12,6 +12,8 @@ public final class MetastoreDataAccessGcpServiceAccountKey { /** * @return The email of the GCP service account created, to be granted access to relevant buckets. * + * `azure_service_principal` optional configuration block for credential details for Azure (Legacy): + * */ private String email; private String privateKey; @@ -21,6 +23,8 @@ private MetastoreDataAccessGcpServiceAccountKey() {} /** * @return The email of the GCP service account created, to be granted access to relevant buckets. * + * `azure_service_principal` optional configuration block for credential details for Azure (Legacy): + * */ public String email() { return this.email; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/MlflowModelTag.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/MlflowModelTag.java index 4a9d8cc7..be94031c 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/MlflowModelTag.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/MlflowModelTag.java @@ -6,18 +6,20 @@ import com.pulumi.core.annotations.CustomType; import java.lang.String; import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; @CustomType public final class MlflowModelTag { - private String key; - private String value; + private @Nullable String key; + private @Nullable String value; private MlflowModelTag() {} - public String key() { - return this.key; + public Optional key() { + return Optional.ofNullable(this.key); } - public String value() { - return this.value; + public Optional value() { + return Optional.ofNullable(this.value); } public static Builder builder() { @@ -29,8 +31,8 @@ public static Builder builder(MlflowModelTag defaults) { } @CustomType.Builder public static final class Builder { - private String key; - private String value; + private @Nullable String key; + private @Nullable String value; public Builder() {} public Builder(MlflowModelTag defaults) { Objects.requireNonNull(defaults); @@ -39,13 +41,13 @@ public Builder(MlflowModelTag defaults) { } @CustomType.Setter - public Builder key(String key) { - this.key = Objects.requireNonNull(key); + public Builder key(@Nullable String key) { + this.key = key; return this; } @CustomType.Setter - public Builder value(String value) { - this.value = Objects.requireNonNull(value); + public Builder value(@Nullable String value) { + this.value = value; return this; } public MlflowModelTag build() { diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/ModelServingConfigServedModel.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/ModelServingConfigServedModel.java index b285a5c5..99d51332 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/ModelServingConfigServedModel.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/ModelServingConfigServedModel.java @@ -14,7 +14,15 @@ @CustomType public final class ModelServingConfigServedModel { + /** + * @return a map of environment variable name/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`. + * + */ private @Nullable Map environmentVars; + /** + * @return ARN of the instance profile that the served model will use to access AWS resources. + * + */ private @Nullable String instanceProfileArn; /** * @return The name of the model in Databricks Model Registry to be served. @@ -43,9 +51,17 @@ public final class ModelServingConfigServedModel { private String workloadSize; private ModelServingConfigServedModel() {} + /** + * @return a map of environment variable name/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`. + * + */ public Map environmentVars() { return this.environmentVars == null ? Map.of() : this.environmentVars; } + /** + * @return ARN of the instance profile that the served model will use to access AWS resources. + * + */ public Optional instanceProfileArn() { return Optional.ofNullable(this.instanceProfileArn); } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/MwsCustomerManagedKeysGcpKeyInfo.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/MwsCustomerManagedKeysGcpKeyInfo.java index 5d0a1674..91211956 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/MwsCustomerManagedKeysGcpKeyInfo.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/MwsCustomerManagedKeysGcpKeyInfo.java @@ -9,9 +9,17 @@ @CustomType public final class MwsCustomerManagedKeysGcpKeyInfo { + /** + * @return The GCP KMS key's resource name. + * + */ private String kmsKeyId; private MwsCustomerManagedKeysGcpKeyInfo() {} + /** + * @return The GCP KMS key's resource name. + * + */ public String kmsKeyId() { return this.kmsKeyId; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/MwsWorkspacesToken.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/MwsWorkspacesToken.java index 2fb5d314..a6e10f1c 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/MwsWorkspacesToken.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/MwsWorkspacesToken.java @@ -13,6 +13,10 @@ @CustomType public final class MwsWorkspacesToken { private @Nullable String comment; + /** + * @return Token expiry lifetime. By default its 2592000 (30 days). + * + */ private @Nullable Integer lifetimeSeconds; private @Nullable String tokenId; private @Nullable String tokenValue; @@ -21,6 +25,10 @@ private MwsWorkspacesToken() {} public Optional comment() { return Optional.ofNullable(this.comment); } + /** + * @return Token expiry lifetime. By default its 2592000 (30 days). + * + */ public Optional lifetimeSeconds() { return Optional.ofNullable(this.lifetimeSeconds); } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlTableColumn.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlTableColumn.java index 348a6c26..72e137d0 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlTableColumn.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/SqlTableColumn.java @@ -28,10 +28,10 @@ public final class SqlTableColumn { */ private @Nullable Boolean nullable; /** - * @return Column type spec (with metadata) as SQL text + * @return Column type spec (with metadata) as SQL text. Not supported for `VIEW` table_type. * */ - private String type; + private @Nullable String type; private SqlTableColumn() {} /** @@ -56,11 +56,11 @@ public Optional nullable() { return Optional.ofNullable(this.nullable); } /** - * @return Column type spec (with metadata) as SQL text + * @return Column type spec (with metadata) as SQL text. Not supported for `VIEW` table_type. * */ - public String type() { - return this.type; + public Optional type() { + return Optional.ofNullable(this.type); } public static Builder builder() { @@ -75,7 +75,7 @@ public static final class Builder { private @Nullable String comment; private String name; private @Nullable Boolean nullable; - private String type; + private @Nullable String type; public Builder() {} public Builder(SqlTableColumn defaults) { Objects.requireNonNull(defaults); @@ -101,8 +101,8 @@ public Builder nullable(@Nullable Boolean nullable) { return this; } @CustomType.Setter - public Builder type(String type) { - this.type = Objects.requireNonNull(type); + public Builder type(@Nullable String type) { + this.type = type; return this; } public SqlTableColumn build() { diff --git a/sdk/nodejs/accessControlRuleSet.ts b/sdk/nodejs/accessControlRuleSet.ts index 09374162..1bf61c6b 100644 --- a/sdk/nodejs/accessControlRuleSet.ts +++ b/sdk/nodejs/accessControlRuleSet.ts @@ -9,10 +9,128 @@ import * as utilities from "./utilities"; /** * This resource allows you to manage access rules on Databricks account level resources. For convenience we allow accessing this resource through the Databricks account and workspace. * - * > **Note** Currently, we only support managing access rules on service principal resources through `databricks.AccessControlRuleSet`. + * > **Note** Currently, we only support managing access rules on service principal, group and account resources through `databricks.AccessControlRuleSet`. * * > **Warning** `databricks.AccessControlRuleSet` cannot be used to manage access rules for resources supported by databricks_permissions. Refer to its documentation for more information. * + * ## Service principal rule set usage + * + * Through a Databricks workspace: + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as databricks from "@pulumi/databricks"; + * + * const accountId = "00000000-0000-0000-0000-000000000000"; + * const ds = databricks.getGroup({ + * displayName: "Data Science", + * }); + * const automationSp = new databricks.ServicePrincipal("automationSp", {displayName: "SP_FOR_AUTOMATION"}); + * const automationSpRuleSet = new databricks.AccessControlRuleSet("automationSpRuleSet", {grantRules: [{ + * principals: [ds.then(ds => ds.aclPrincipalId)], + * role: "roles/servicePrincipal.user", + * }]}); + * ``` + * + * Through AWS Databricks account: + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as databricks from "@pulumi/databricks"; + * + * const accountId = "00000000-0000-0000-0000-000000000000"; + * // account level group creation + * const ds = new databricks.Group("ds", {}); + * const automationSp = new databricks.ServicePrincipal("automationSp", {displayName: "SP_FOR_AUTOMATION"}); + * const automationSpRuleSet = new databricks.AccessControlRuleSet("automationSpRuleSet", {grantRules: [{ + * principals: [ds.aclPrincipalId], + * role: "roles/servicePrincipal.user", + * }]}); + * ``` + * + * Through Azure Databricks account: + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as databricks from "@pulumi/databricks"; + * + * const accountId = "00000000-0000-0000-0000-000000000000"; + * // account level group creation + * const ds = new databricks.Group("ds", {}); + * const automationSp = new databricks.ServicePrincipal("automationSp", { + * applicationId: "00000000-0000-0000-0000-000000000000", + * displayName: "SP_FOR_AUTOMATION", + * }); + * const automationSpRuleSet = new databricks.AccessControlRuleSet("automationSpRuleSet", {grantRules: [{ + * principals: [ds.aclPrincipalId], + * role: "roles/servicePrincipal.user", + * }]}); + * ``` + * + * Through GCP Databricks account: + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as databricks from "@pulumi/databricks"; + * + * const accountId = "00000000-0000-0000-0000-000000000000"; + * // account level group creation + * const ds = new databricks.Group("ds", {}); + * const automationSp = new databricks.ServicePrincipal("automationSp", {displayName: "SP_FOR_AUTOMATION"}); + * const automationSpRuleSet = new databricks.AccessControlRuleSet("automationSpRuleSet", {grantRules: [{ + * principals: [ds.aclPrincipalId], + * role: "roles/servicePrincipal.user", + * }]}); + * ``` + * + * ## Group rule set usage + * + * Refer to the appropriate provider configuration as shown in the examples for service principal rule set. + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as databricks from "@pulumi/databricks"; + * + * const accountId = "00000000-0000-0000-0000-000000000000"; + * const ds = databricks.getGroup({ + * displayName: "Data Science", + * }); + * const john = databricks.getUser({ + * userName: "john.doe@example.com", + * }); + * const dsGroupRuleSet = new databricks.AccessControlRuleSet("dsGroupRuleSet", {grantRules: [{ + * principals: [john.then(john => john.aclPrincipalId)], + * role: "roles/group.manager", + * }]}); + * ``` + * + * ## Account rule set usage + * + * Refer to the appropriate provider configuration as shown in the examples for service principal rule set. + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as databricks from "@pulumi/databricks"; + * + * const accountId = "00000000-0000-0000-0000-000000000000"; + * const ds = databricks.getGroup({ + * displayName: "Data Science", + * }); + * const john = databricks.getUser({ + * userName: "john.doe@example.com", + * }); + * const accountRuleSet = new databricks.AccessControlRuleSet("accountRuleSet", {grantRules: [ + * { + * principals: [john.then(john => john.aclPrincipalId)], + * role: "roles/group.manager", + * }, + * { + * principals: [data.databricks_user.ds.acl_principal_id], + * role: "roles/servicePrincipal.manager", + * }, + * ]}); + * ``` + * * ## Related Resources * * The following resources are often used in the same context: @@ -59,6 +177,8 @@ export class AccessControlRuleSet extends pulumi.CustomResource { /** * Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: * * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + * * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + * * `accounts/{account_id}/ruleSets/default` */ public readonly name!: pulumi.Output; @@ -103,6 +223,8 @@ export interface AccessControlRuleSetState { /** * Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: * * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + * * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + * * `accounts/{account_id}/ruleSets/default` */ name?: pulumi.Input; } @@ -120,6 +242,8 @@ export interface AccessControlRuleSetArgs { /** * Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: * * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + * * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + * * `accounts/{account_id}/ruleSets/default` */ name?: pulumi.Input; } diff --git a/sdk/nodejs/catalog.ts b/sdk/nodejs/catalog.ts index 8d204690..62211912 100644 --- a/sdk/nodejs/catalog.ts +++ b/sdk/nodejs/catalog.ts @@ -67,6 +67,10 @@ export class Catalog extends pulumi.CustomResource { * User-supplied free-form text. */ public readonly comment!: pulumi.Output; + /** + * For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + */ + public readonly connectionName!: pulumi.Output; /** * Delete catalog regardless of its contents. */ @@ -115,6 +119,7 @@ export class Catalog extends pulumi.CustomResource { if (opts.id) { const state = argsOrState as CatalogState | undefined; resourceInputs["comment"] = state ? state.comment : undefined; + resourceInputs["connectionName"] = state ? state.connectionName : undefined; resourceInputs["forceDestroy"] = state ? state.forceDestroy : undefined; resourceInputs["isolationMode"] = state ? state.isolationMode : undefined; resourceInputs["metastoreId"] = state ? state.metastoreId : undefined; @@ -127,6 +132,7 @@ export class Catalog extends pulumi.CustomResource { } else { const args = argsOrState as CatalogArgs | undefined; resourceInputs["comment"] = args ? args.comment : undefined; + resourceInputs["connectionName"] = args ? args.connectionName : undefined; resourceInputs["forceDestroy"] = args ? args.forceDestroy : undefined; resourceInputs["isolationMode"] = args ? args.isolationMode : undefined; resourceInputs["metastoreId"] = args ? args.metastoreId : undefined; @@ -150,6 +156,10 @@ export interface CatalogState { * User-supplied free-form text. */ comment?: pulumi.Input; + /** + * For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + */ + connectionName?: pulumi.Input; /** * Delete catalog regardless of its contents. */ @@ -193,6 +203,10 @@ export interface CatalogArgs { * User-supplied free-form text. */ comment?: pulumi.Input; + /** + * For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + */ + connectionName?: pulumi.Input; /** * Delete catalog regardless of its contents. */ diff --git a/sdk/nodejs/connection.ts b/sdk/nodejs/connection.ts new file mode 100644 index 00000000..22699b05 --- /dev/null +++ b/sdk/nodejs/connection.ts @@ -0,0 +1,206 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +import * as pulumi from "@pulumi/pulumi"; +import * as utilities from "./utilities"; + +/** + * Lakehouse Federation is the query federation platform for Databricks. Databricks uses Unity Catalog to manage query federation. To make a dataset available for read-only querying using Lakehouse Federation, you create the following: + * + * - A connection, a securable object in Unity Catalog that specifies a path and credentials for accessing an external database system. + * - A foreign catalog + * + * This resource manages connections in Unity Catalog + * + * ## Example Usage + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as databricks from "@pulumi/databricks"; + * + * const mysql = new databricks.Connection("mysql", { + * comment: "this is a connection to mysql db", + * connectionType: "MYSQL", + * options: { + * host: "test.mysql.database.azure.com", + * password: "password", + * port: "3306", + * user: "user", + * }, + * properties: { + * purpose: "testing", + * }, + * }); + * ``` + * + * ## Import + * + * This resource can be imported by `name` bash + * + * ```sh + * $ pulumi import databricks:index/connection:Connection this + * ``` + */ +export class Connection extends pulumi.CustomResource { + /** + * Get an existing Connection resource's state with the given name, ID, and optional extra + * properties used to qualify the lookup. + * + * @param name The _unique_ name of the resulting resource. + * @param id The _unique_ provider ID of the resource to lookup. + * @param state Any extra arguments used during the lookup. + * @param opts Optional settings to control the behavior of the CustomResource. + */ + public static get(name: string, id: pulumi.Input, state?: ConnectionState, opts?: pulumi.CustomResourceOptions): Connection { + return new Connection(name, state, { ...opts, id: id }); + } + + /** @internal */ + public static readonly __pulumiType = 'databricks:index/connection:Connection'; + + /** + * Returns true if the given object is an instance of Connection. This is designed to work even + * when multiple copies of the Pulumi SDK have been loaded into the same process. + */ + public static isInstance(obj: any): obj is Connection { + if (obj === undefined || obj === null) { + return false; + } + return obj['__pulumiType'] === Connection.__pulumiType; + } + + /** + * Free-form text. + */ + public readonly comment!: pulumi.Output; + /** + * Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + */ + public readonly connectionType!: pulumi.Output; + public readonly metastoreId!: pulumi.Output; + /** + * Name of the Connection. + */ + public readonly name!: pulumi.Output; + /** + * The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + */ + public readonly options!: pulumi.Output<{[key: string]: any}>; + /** + * Name of the connection owner. + */ + public readonly owner!: pulumi.Output; + /** + * Free-form connection properties. + */ + public readonly properties!: pulumi.Output<{[key: string]: any} | undefined>; + public readonly readOnly!: pulumi.Output; + + /** + * Create a Connection resource with the given unique name, arguments, and options. + * + * @param name The _unique_ name of the resource. + * @param args The arguments to use to populate this resource's properties. + * @param opts A bag of options that control this resource's behavior. + */ + constructor(name: string, args: ConnectionArgs, opts?: pulumi.CustomResourceOptions) + constructor(name: string, argsOrState?: ConnectionArgs | ConnectionState, opts?: pulumi.CustomResourceOptions) { + let resourceInputs: pulumi.Inputs = {}; + opts = opts || {}; + if (opts.id) { + const state = argsOrState as ConnectionState | undefined; + resourceInputs["comment"] = state ? state.comment : undefined; + resourceInputs["connectionType"] = state ? state.connectionType : undefined; + resourceInputs["metastoreId"] = state ? state.metastoreId : undefined; + resourceInputs["name"] = state ? state.name : undefined; + resourceInputs["options"] = state ? state.options : undefined; + resourceInputs["owner"] = state ? state.owner : undefined; + resourceInputs["properties"] = state ? state.properties : undefined; + resourceInputs["readOnly"] = state ? state.readOnly : undefined; + } else { + const args = argsOrState as ConnectionArgs | undefined; + if ((!args || args.connectionType === undefined) && !opts.urn) { + throw new Error("Missing required property 'connectionType'"); + } + if ((!args || args.options === undefined) && !opts.urn) { + throw new Error("Missing required property 'options'"); + } + resourceInputs["comment"] = args ? args.comment : undefined; + resourceInputs["connectionType"] = args ? args.connectionType : undefined; + resourceInputs["metastoreId"] = args ? args.metastoreId : undefined; + resourceInputs["name"] = args ? args.name : undefined; + resourceInputs["options"] = args?.options ? pulumi.secret(args.options) : undefined; + resourceInputs["owner"] = args ? args.owner : undefined; + resourceInputs["properties"] = args ? args.properties : undefined; + resourceInputs["readOnly"] = args ? args.readOnly : undefined; + } + opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); + const secretOpts = { additionalSecretOutputs: ["options"] }; + opts = pulumi.mergeOptions(opts, secretOpts); + super(Connection.__pulumiType, name, resourceInputs, opts); + } +} + +/** + * Input properties used for looking up and filtering Connection resources. + */ +export interface ConnectionState { + /** + * Free-form text. + */ + comment?: pulumi.Input; + /** + * Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + */ + connectionType?: pulumi.Input; + metastoreId?: pulumi.Input; + /** + * Name of the Connection. + */ + name?: pulumi.Input; + /** + * The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + */ + options?: pulumi.Input<{[key: string]: any}>; + /** + * Name of the connection owner. + */ + owner?: pulumi.Input; + /** + * Free-form connection properties. + */ + properties?: pulumi.Input<{[key: string]: any}>; + readOnly?: pulumi.Input; +} + +/** + * The set of arguments for constructing a Connection resource. + */ +export interface ConnectionArgs { + /** + * Free-form text. + */ + comment?: pulumi.Input; + /** + * Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + */ + connectionType: pulumi.Input; + metastoreId?: pulumi.Input; + /** + * Name of the Connection. + */ + name?: pulumi.Input; + /** + * The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + */ + options: pulumi.Input<{[key: string]: any}>; + /** + * Name of the connection owner. + */ + owner?: pulumi.Input; + /** + * Free-form connection properties. + */ + properties?: pulumi.Input<{[key: string]: any}>; + readOnly?: pulumi.Input; +} diff --git a/sdk/nodejs/externalLocation.ts b/sdk/nodejs/externalLocation.ts index e519a4a8..5eb91115 100644 --- a/sdk/nodejs/externalLocation.ts +++ b/sdk/nodejs/externalLocation.ts @@ -2,6 +2,8 @@ // *** Do not edit by hand unless you're certain you know what you are doing! *** import * as pulumi from "@pulumi/pulumi"; +import * as inputs from "./types/input"; +import * as outputs from "./types/output"; import * as utilities from "./utilities"; /** @@ -46,25 +48,37 @@ export class ExternalLocation extends pulumi.CustomResource { return obj['__pulumiType'] === ExternalLocation.__pulumiType; } + /** + * The ARN of the s3 access point to use with the external location (AWS). + */ + public readonly accessPoint!: pulumi.Output; /** * User-supplied free-form text. */ public readonly comment!: pulumi.Output; /** - * Name of the databricks.StorageCredential to use with this External Location. + * Name of the databricks.StorageCredential to use with this external location. */ public readonly credentialName!: pulumi.Output; + /** + * The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + */ + public readonly encryptionDetails!: pulumi.Output; /** * Destroy external location regardless of its dependents. */ public readonly forceDestroy!: pulumi.Output; + /** + * Update external location regardless of its dependents. + */ + public readonly forceUpdate!: pulumi.Output; public readonly metastoreId!: pulumi.Output; /** * Name of External Location, which must be unique within the databricks_metastore. Change forces creation of a new resource. */ public readonly name!: pulumi.Output; /** - * Username/groupname/sp applicationId of the external Location owner. + * Username/groupname/sp applicationId of the external location owner. */ public readonly owner!: pulumi.Output; /** @@ -93,9 +107,12 @@ export class ExternalLocation extends pulumi.CustomResource { opts = opts || {}; if (opts.id) { const state = argsOrState as ExternalLocationState | undefined; + resourceInputs["accessPoint"] = state ? state.accessPoint : undefined; resourceInputs["comment"] = state ? state.comment : undefined; resourceInputs["credentialName"] = state ? state.credentialName : undefined; + resourceInputs["encryptionDetails"] = state ? state.encryptionDetails : undefined; resourceInputs["forceDestroy"] = state ? state.forceDestroy : undefined; + resourceInputs["forceUpdate"] = state ? state.forceUpdate : undefined; resourceInputs["metastoreId"] = state ? state.metastoreId : undefined; resourceInputs["name"] = state ? state.name : undefined; resourceInputs["owner"] = state ? state.owner : undefined; @@ -110,9 +127,12 @@ export class ExternalLocation extends pulumi.CustomResource { if ((!args || args.url === undefined) && !opts.urn) { throw new Error("Missing required property 'url'"); } + resourceInputs["accessPoint"] = args ? args.accessPoint : undefined; resourceInputs["comment"] = args ? args.comment : undefined; resourceInputs["credentialName"] = args ? args.credentialName : undefined; + resourceInputs["encryptionDetails"] = args ? args.encryptionDetails : undefined; resourceInputs["forceDestroy"] = args ? args.forceDestroy : undefined; + resourceInputs["forceUpdate"] = args ? args.forceUpdate : undefined; resourceInputs["metastoreId"] = args ? args.metastoreId : undefined; resourceInputs["name"] = args ? args.name : undefined; resourceInputs["owner"] = args ? args.owner : undefined; @@ -129,25 +149,37 @@ export class ExternalLocation extends pulumi.CustomResource { * Input properties used for looking up and filtering ExternalLocation resources. */ export interface ExternalLocationState { + /** + * The ARN of the s3 access point to use with the external location (AWS). + */ + accessPoint?: pulumi.Input; /** * User-supplied free-form text. */ comment?: pulumi.Input; /** - * Name of the databricks.StorageCredential to use with this External Location. + * Name of the databricks.StorageCredential to use with this external location. */ credentialName?: pulumi.Input; + /** + * The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + */ + encryptionDetails?: pulumi.Input; /** * Destroy external location regardless of its dependents. */ forceDestroy?: pulumi.Input; + /** + * Update external location regardless of its dependents. + */ + forceUpdate?: pulumi.Input; metastoreId?: pulumi.Input; /** * Name of External Location, which must be unique within the databricks_metastore. Change forces creation of a new resource. */ name?: pulumi.Input; /** - * Username/groupname/sp applicationId of the external Location owner. + * Username/groupname/sp applicationId of the external location owner. */ owner?: pulumi.Input; /** @@ -168,25 +200,37 @@ export interface ExternalLocationState { * The set of arguments for constructing a ExternalLocation resource. */ export interface ExternalLocationArgs { + /** + * The ARN of the s3 access point to use with the external location (AWS). + */ + accessPoint?: pulumi.Input; /** * User-supplied free-form text. */ comment?: pulumi.Input; /** - * Name of the databricks.StorageCredential to use with this External Location. + * Name of the databricks.StorageCredential to use with this external location. */ credentialName: pulumi.Input; + /** + * The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + */ + encryptionDetails?: pulumi.Input; /** * Destroy external location regardless of its dependents. */ forceDestroy?: pulumi.Input; + /** + * Update external location regardless of its dependents. + */ + forceUpdate?: pulumi.Input; metastoreId?: pulumi.Input; /** * Name of External Location, which must be unique within the databricks_metastore. Change forces creation of a new resource. */ name?: pulumi.Input; /** - * Username/groupname/sp applicationId of the external Location owner. + * Username/groupname/sp applicationId of the external location owner. */ owner?: pulumi.Input; /** diff --git a/sdk/nodejs/getCurrentUser.ts b/sdk/nodejs/getCurrentUser.ts index 451da290..79b40a70 100644 --- a/sdk/nodejs/getCurrentUser.ts +++ b/sdk/nodejs/getCurrentUser.ts @@ -16,6 +16,7 @@ import * as utilities from "./utilities"; * * `repos` - Personal Repos location of the user, e.g. `/Repos/mr.foo@example.com`. * * `alphanumeric` - Alphanumeric representation of user local name. e.g. `mrFoo`. * * `workspaceUrl` - URL of the current Databricks workspace. + * * `aclPrincipalId` - identifier for use in databricks_access_control_rule_set, e.g. `users/mr.foo@example.com` if current user is user, or `servicePrincipals/00000000-0000-0000-0000-000000000000` if current user is service principal. * * ## Related Resources * @@ -37,6 +38,7 @@ export function getCurrentUser(opts?: pulumi.InvokeOptions): Promise; /** * True if group members can create clusters */ diff --git a/sdk/nodejs/getServicePrincipal.ts b/sdk/nodejs/getServicePrincipal.ts index 7d2e857d..b8a54ca5 100644 --- a/sdk/nodejs/getServicePrincipal.ts +++ b/sdk/nodejs/getServicePrincipal.ts @@ -46,6 +46,7 @@ export function getServicePrincipal(args?: GetServicePrincipalArgs, opts?: pulum opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts || {}); return pulumi.runtime.invoke("databricks:index/getServicePrincipal:getServicePrincipal", { + "aclPrincipalId": args.aclPrincipalId, "active": args.active, "applicationId": args.applicationId, "displayName": args.displayName, @@ -61,6 +62,10 @@ export function getServicePrincipal(args?: GetServicePrincipalArgs, opts?: pulum * A collection of arguments for invoking getServicePrincipal. */ export interface GetServicePrincipalArgs { + /** + * identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. + */ + aclPrincipalId?: string; /** * Whether service principal is active or not. */ @@ -96,6 +101,10 @@ export interface GetServicePrincipalArgs { * A collection of values returned by getServicePrincipal. */ export interface GetServicePrincipalResult { + /** + * identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. + */ + readonly aclPrincipalId: string; /** * Whether service principal is active or not. */ @@ -168,6 +177,10 @@ export function getServicePrincipalOutput(args?: GetServicePrincipalOutputArgs, * A collection of arguments for invoking getServicePrincipal. */ export interface GetServicePrincipalOutputArgs { + /** + * identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. + */ + aclPrincipalId?: pulumi.Input; /** * Whether service principal is active or not. */ diff --git a/sdk/nodejs/getUser.ts b/sdk/nodejs/getUser.ts index a545eb59..6c87f949 100644 --- a/sdk/nodejs/getUser.ts +++ b/sdk/nodejs/getUser.ts @@ -70,6 +70,10 @@ export interface GetUserArgs { * A collection of values returned by getUser. */ export interface GetUserResult { + /** + * identifier for use in databricks_access_control_rule_set, e.g. `users/mr.foo@example.com`. + */ + readonly aclPrincipalId: string; /** * Alphanumeric representation of user local name. e.g. `mrFoo`. */ diff --git a/sdk/nodejs/grants.ts b/sdk/nodejs/grants.ts index 74066d64..6ee46111 100644 --- a/sdk/nodejs/grants.ts +++ b/sdk/nodejs/grants.ts @@ -36,6 +36,7 @@ export class Grants extends pulumi.CustomResource { public readonly catalog!: pulumi.Output; public readonly externalLocation!: pulumi.Output; + public readonly foreignConnection!: pulumi.Output; public readonly function!: pulumi.Output; public readonly grants!: pulumi.Output; public readonly materializedView!: pulumi.Output; @@ -62,6 +63,7 @@ export class Grants extends pulumi.CustomResource { const state = argsOrState as GrantsState | undefined; resourceInputs["catalog"] = state ? state.catalog : undefined; resourceInputs["externalLocation"] = state ? state.externalLocation : undefined; + resourceInputs["foreignConnection"] = state ? state.foreignConnection : undefined; resourceInputs["function"] = state ? state.function : undefined; resourceInputs["grants"] = state ? state.grants : undefined; resourceInputs["materializedView"] = state ? state.materializedView : undefined; @@ -79,6 +81,7 @@ export class Grants extends pulumi.CustomResource { } resourceInputs["catalog"] = args ? args.catalog : undefined; resourceInputs["externalLocation"] = args ? args.externalLocation : undefined; + resourceInputs["foreignConnection"] = args ? args.foreignConnection : undefined; resourceInputs["function"] = args ? args.function : undefined; resourceInputs["grants"] = args ? args.grants : undefined; resourceInputs["materializedView"] = args ? args.materializedView : undefined; @@ -101,6 +104,7 @@ export class Grants extends pulumi.CustomResource { export interface GrantsState { catalog?: pulumi.Input; externalLocation?: pulumi.Input; + foreignConnection?: pulumi.Input; function?: pulumi.Input; grants?: pulumi.Input[]>; materializedView?: pulumi.Input; @@ -119,6 +123,7 @@ export interface GrantsState { export interface GrantsArgs { catalog?: pulumi.Input; externalLocation?: pulumi.Input; + foreignConnection?: pulumi.Input; function?: pulumi.Input; grants: pulumi.Input[]>; materializedView?: pulumi.Input; diff --git a/sdk/nodejs/index.ts b/sdk/nodejs/index.ts index 7d15ba29..6f2043a6 100644 --- a/sdk/nodejs/index.ts +++ b/sdk/nodejs/index.ts @@ -30,6 +30,11 @@ export type ClusterPolicy = import("./clusterPolicy").ClusterPolicy; export const ClusterPolicy: typeof import("./clusterPolicy").ClusterPolicy = null as any; utilities.lazyLoad(exports, ["ClusterPolicy"], () => require("./clusterPolicy")); +export { ConnectionArgs, ConnectionState } from "./connection"; +export type Connection = import("./connection").Connection; +export const Connection: typeof import("./connection").Connection = null as any; +utilities.lazyLoad(exports, ["Connection"], () => require("./connection")); + export { DbfsFileArgs, DbfsFileState } from "./dbfsFile"; export type DbfsFile = import("./dbfsFile").DbfsFile; export const DbfsFile: typeof import("./dbfsFile").DbfsFile = null as any; @@ -567,6 +572,8 @@ const _module = { return new Cluster(name, undefined, { urn }) case "databricks:index/clusterPolicy:ClusterPolicy": return new ClusterPolicy(name, undefined, { urn }) + case "databricks:index/connection:Connection": + return new Connection(name, undefined, { urn }) case "databricks:index/dbfsFile:DbfsFile": return new DbfsFile(name, undefined, { urn }) case "databricks:index/directory:Directory": @@ -711,6 +718,7 @@ pulumi.runtime.registerResourceModule("databricks", "index/catalog", _module) pulumi.runtime.registerResourceModule("databricks", "index/catalogWorkspaceBinding", _module) pulumi.runtime.registerResourceModule("databricks", "index/cluster", _module) pulumi.runtime.registerResourceModule("databricks", "index/clusterPolicy", _module) +pulumi.runtime.registerResourceModule("databricks", "index/connection", _module) pulumi.runtime.registerResourceModule("databricks", "index/dbfsFile", _module) pulumi.runtime.registerResourceModule("databricks", "index/directory", _module) pulumi.runtime.registerResourceModule("databricks", "index/entitlements", _module) diff --git a/sdk/nodejs/metastore.ts b/sdk/nodejs/metastore.ts index 92254b0c..0d83a634 100644 --- a/sdk/nodejs/metastore.ts +++ b/sdk/nodejs/metastore.ts @@ -5,9 +5,6 @@ import * as pulumi from "@pulumi/pulumi"; import * as utilities from "./utilities"; /** - * > **Notes** - * Unity Catalog APIs are accessible via **workspace-level APIs**. This design may change in the future. - * * A metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces in order to control which workloads use each metastore. * * Unity Catalog offers a new metastore with built in security and auditing. This is distinct to the metastore used in previous versions of Databricks (based on the Hive Metastore). @@ -69,6 +66,7 @@ export class Metastore extends pulumi.CustomResource { */ public readonly forceDestroy!: pulumi.Output; public readonly globalMetastoreId!: pulumi.Output; + public readonly metastoreId!: pulumi.Output; /** * Name of metastore. */ @@ -77,11 +75,15 @@ export class Metastore extends pulumi.CustomResource { * Username/groupname/sp applicationId of the metastore owner. */ public readonly owner!: pulumi.Output; + /** + * The region of the metastore + */ public readonly region!: pulumi.Output; /** * Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource. */ public readonly storageRoot!: pulumi.Output; + public readonly storageRootCredentialId!: pulumi.Output; public readonly updatedAt!: pulumi.Output; public readonly updatedBy!: pulumi.Output; @@ -107,10 +109,12 @@ export class Metastore extends pulumi.CustomResource { resourceInputs["deltaSharingScope"] = state ? state.deltaSharingScope : undefined; resourceInputs["forceDestroy"] = state ? state.forceDestroy : undefined; resourceInputs["globalMetastoreId"] = state ? state.globalMetastoreId : undefined; + resourceInputs["metastoreId"] = state ? state.metastoreId : undefined; resourceInputs["name"] = state ? state.name : undefined; resourceInputs["owner"] = state ? state.owner : undefined; resourceInputs["region"] = state ? state.region : undefined; resourceInputs["storageRoot"] = state ? state.storageRoot : undefined; + resourceInputs["storageRootCredentialId"] = state ? state.storageRootCredentialId : undefined; resourceInputs["updatedAt"] = state ? state.updatedAt : undefined; resourceInputs["updatedBy"] = state ? state.updatedBy : undefined; } else { @@ -127,10 +131,12 @@ export class Metastore extends pulumi.CustomResource { resourceInputs["deltaSharingScope"] = args ? args.deltaSharingScope : undefined; resourceInputs["forceDestroy"] = args ? args.forceDestroy : undefined; resourceInputs["globalMetastoreId"] = args ? args.globalMetastoreId : undefined; + resourceInputs["metastoreId"] = args ? args.metastoreId : undefined; resourceInputs["name"] = args ? args.name : undefined; resourceInputs["owner"] = args ? args.owner : undefined; resourceInputs["region"] = args ? args.region : undefined; resourceInputs["storageRoot"] = args ? args.storageRoot : undefined; + resourceInputs["storageRootCredentialId"] = args ? args.storageRootCredentialId : undefined; resourceInputs["updatedAt"] = args ? args.updatedAt : undefined; resourceInputs["updatedBy"] = args ? args.updatedBy : undefined; } @@ -164,6 +170,7 @@ export interface MetastoreState { */ forceDestroy?: pulumi.Input; globalMetastoreId?: pulumi.Input; + metastoreId?: pulumi.Input; /** * Name of metastore. */ @@ -172,11 +179,15 @@ export interface MetastoreState { * Username/groupname/sp applicationId of the metastore owner. */ owner?: pulumi.Input; + /** + * The region of the metastore + */ region?: pulumi.Input; /** * Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource. */ storageRoot?: pulumi.Input; + storageRootCredentialId?: pulumi.Input; updatedAt?: pulumi.Input; updatedBy?: pulumi.Input; } @@ -206,6 +217,7 @@ export interface MetastoreArgs { */ forceDestroy?: pulumi.Input; globalMetastoreId?: pulumi.Input; + metastoreId?: pulumi.Input; /** * Name of metastore. */ @@ -214,11 +226,15 @@ export interface MetastoreArgs { * Username/groupname/sp applicationId of the metastore owner. */ owner?: pulumi.Input; + /** + * The region of the metastore + */ region?: pulumi.Input; /** * Path on cloud storage account, where managed `databricks.Table` are stored. Change forces creation of a new resource. */ storageRoot: pulumi.Input; + storageRootCredentialId?: pulumi.Input; updatedAt?: pulumi.Input; updatedBy?: pulumi.Input; } diff --git a/sdk/nodejs/metastoreAssignment.ts b/sdk/nodejs/metastoreAssignment.ts index 1026f0a6..2b2fa506 100644 --- a/sdk/nodejs/metastoreAssignment.ts +++ b/sdk/nodejs/metastoreAssignment.ts @@ -16,6 +16,7 @@ import * as utilities from "./utilities"; * const thisMetastore = new databricks.Metastore("thisMetastore", { * storageRoot: `s3://${aws_s3_bucket.metastore.id}/metastore`, * owner: "uc admins", + * region: "us-east-1", * forceDestroy: true, * }); * const thisMetastoreAssignment = new databricks.MetastoreAssignment("thisMetastoreAssignment", { diff --git a/sdk/nodejs/mlflowModel.ts b/sdk/nodejs/mlflowModel.ts index bdb04342..b34f4da0 100644 --- a/sdk/nodejs/mlflowModel.ts +++ b/sdk/nodejs/mlflowModel.ts @@ -81,22 +81,21 @@ export class MlflowModel extends pulumi.CustomResource { return obj['__pulumiType'] === MlflowModel.__pulumiType; } - public readonly creationTimestamp!: pulumi.Output; + public readonly creationTimestamp!: pulumi.Output; /** * The description of the MLflow model. */ public readonly description!: pulumi.Output; - public readonly lastUpdatedTimestamp!: pulumi.Output; + public readonly lastUpdatedTimestamp!: pulumi.Output; /** * Name of MLflow model. Change of name triggers new resource. */ public readonly name!: pulumi.Output; - public readonly registeredModelId!: pulumi.Output; /** * Tags for the MLflow model. */ public readonly tags!: pulumi.Output; - public readonly userId!: pulumi.Output; + public readonly userId!: pulumi.Output; /** * Create a MlflowModel resource with the given unique name, arguments, and options. @@ -115,7 +114,6 @@ export class MlflowModel extends pulumi.CustomResource { resourceInputs["description"] = state ? state.description : undefined; resourceInputs["lastUpdatedTimestamp"] = state ? state.lastUpdatedTimestamp : undefined; resourceInputs["name"] = state ? state.name : undefined; - resourceInputs["registeredModelId"] = state ? state.registeredModelId : undefined; resourceInputs["tags"] = state ? state.tags : undefined; resourceInputs["userId"] = state ? state.userId : undefined; } else { @@ -124,7 +122,6 @@ export class MlflowModel extends pulumi.CustomResource { resourceInputs["description"] = args ? args.description : undefined; resourceInputs["lastUpdatedTimestamp"] = args ? args.lastUpdatedTimestamp : undefined; resourceInputs["name"] = args ? args.name : undefined; - resourceInputs["registeredModelId"] = args ? args.registeredModelId : undefined; resourceInputs["tags"] = args ? args.tags : undefined; resourceInputs["userId"] = args ? args.userId : undefined; } @@ -147,7 +144,6 @@ export interface MlflowModelState { * Name of MLflow model. Change of name triggers new resource. */ name?: pulumi.Input; - registeredModelId?: pulumi.Input; /** * Tags for the MLflow model. */ @@ -169,7 +165,6 @@ export interface MlflowModelArgs { * Name of MLflow model. Change of name triggers new resource. */ name?: pulumi.Input; - registeredModelId?: pulumi.Input; /** * Tags for the MLflow model. */ diff --git a/sdk/nodejs/mwsCustomerManagedKeys.ts b/sdk/nodejs/mwsCustomerManagedKeys.ts index 20bdff0d..d7304d57 100644 --- a/sdk/nodejs/mwsCustomerManagedKeys.ts +++ b/sdk/nodejs/mwsCustomerManagedKeys.ts @@ -10,9 +10,11 @@ import * as utilities from "./utilities"; * ## Example Usage * * > **Note** If you've used the resource before, please add `useCases = ["MANAGED_SERVICES"]` to keep the previous behaviour. + * * ### Customer-managed key for managed services * * You must configure this during workspace creation + * ### For AWS * * ```typescript * import * as pulumi from "@pulumi/pulumi"; @@ -61,7 +63,25 @@ import * as utilities from "./utilities"; * useCases: ["MANAGED_SERVICES"], * }); * ``` + * ### For GCP + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as databricks from "@pulumi/databricks"; + * + * const config = new pulumi.Config(); + * const databricksAccountId = config.requireObject("databricksAccountId"); + * const cmekResourceId = config.requireObject("cmekResourceId"); + * const managedServices = new databricks.MwsCustomerManagedKeys("managedServices", { + * accountId: databricksAccountId, + * gcpKeyInfo: { + * kmsKeyId: cmekResourceId, + * }, + * useCases: ["MANAGED_SERVICES"], + * }); + * ``` * ### Customer-managed key for workspace storage + * ### For AWS * * ```typescript * import * as pulumi from "@pulumi/pulumi"; @@ -152,6 +172,23 @@ import * as utilities from "./utilities"; * useCases: ["STORAGE"], * }); * ``` + * ### For GCP + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as databricks from "@pulumi/databricks"; + * + * const config = new pulumi.Config(); + * const databricksAccountId = config.requireObject("databricksAccountId"); + * const cmekResourceId = config.requireObject("cmekResourceId"); + * const storage = new databricks.MwsCustomerManagedKeys("storage", { + * accountId: databricksAccountId, + * gcpKeyInfo: { + * kmsKeyId: cmekResourceId, + * }, + * useCases: ["STORAGE"], + * }); + * ``` * ## Related Resources * * The following resources are used in the same context: @@ -200,7 +237,7 @@ export class MwsCustomerManagedKeys extends pulumi.CustomResource { */ public readonly accountId!: pulumi.Output; /** - * This field is a block and is documented below. + * This field is a block and is documented below. This conflicts with `gcpKeyInfo` */ public readonly awsKeyInfo!: pulumi.Output; /** @@ -211,6 +248,9 @@ export class MwsCustomerManagedKeys extends pulumi.CustomResource { * (String) ID of the encryption key configuration object. */ public readonly customerManagedKeyId!: pulumi.Output; + /** + * This field is a block and is documented below. This conflicts with `awsKeyInfo` + */ public readonly gcpKeyInfo!: pulumi.Output; /** * *(since v0.3.4)* List of use cases for which this key will be used. *If you've used the resource before, please add `useCases = ["MANAGED_SERVICES"]` to keep the previous behaviour.* Possible values are: @@ -265,7 +305,7 @@ export interface MwsCustomerManagedKeysState { */ accountId?: pulumi.Input; /** - * This field is a block and is documented below. + * This field is a block and is documented below. This conflicts with `gcpKeyInfo` */ awsKeyInfo?: pulumi.Input; /** @@ -276,6 +316,9 @@ export interface MwsCustomerManagedKeysState { * (String) ID of the encryption key configuration object. */ customerManagedKeyId?: pulumi.Input; + /** + * This field is a block and is documented below. This conflicts with `awsKeyInfo` + */ gcpKeyInfo?: pulumi.Input; /** * *(since v0.3.4)* List of use cases for which this key will be used. *If you've used the resource before, please add `useCases = ["MANAGED_SERVICES"]` to keep the previous behaviour.* Possible values are: @@ -292,7 +335,7 @@ export interface MwsCustomerManagedKeysArgs { */ accountId: pulumi.Input; /** - * This field is a block and is documented below. + * This field is a block and is documented below. This conflicts with `gcpKeyInfo` */ awsKeyInfo?: pulumi.Input; /** @@ -303,6 +346,9 @@ export interface MwsCustomerManagedKeysArgs { * (String) ID of the encryption key configuration object. */ customerManagedKeyId?: pulumi.Input; + /** + * This field is a block and is documented below. This conflicts with `awsKeyInfo` + */ gcpKeyInfo?: pulumi.Input; /** * *(since v0.3.4)* List of use cases for which this key will be used. *If you've used the resource before, please add `useCases = ["MANAGED_SERVICES"]` to keep the previous behaviour.* Possible values are: diff --git a/sdk/nodejs/mwsWorkspaces.ts b/sdk/nodejs/mwsWorkspaces.ts index df81f96f..81c94ec0 100644 --- a/sdk/nodejs/mwsWorkspaces.ts +++ b/sdk/nodejs/mwsWorkspaces.ts @@ -93,6 +93,9 @@ export class MwsWorkspaces extends pulumi.CustomResource { * `storageConfigurationId` from storage configuration. */ public readonly storageConfigurationId!: pulumi.Output; + /** + * `customerManagedKeyId` from customer managed keys with `useCases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. + */ public readonly storageCustomerManagedKeyId!: pulumi.Output; public readonly token!: pulumi.Output; /** @@ -253,6 +256,9 @@ export interface MwsWorkspacesState { * `storageConfigurationId` from storage configuration. */ storageConfigurationId?: pulumi.Input; + /** + * `customerManagedKeyId` from customer managed keys with `useCases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. + */ storageCustomerManagedKeyId?: pulumi.Input; token?: pulumi.Input; /** @@ -335,6 +341,9 @@ export interface MwsWorkspacesArgs { * `storageConfigurationId` from storage configuration. */ storageConfigurationId?: pulumi.Input; + /** + * `customerManagedKeyId` from customer managed keys with `useCases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. + */ storageCustomerManagedKeyId?: pulumi.Input; token?: pulumi.Input; /** diff --git a/sdk/nodejs/share.ts b/sdk/nodejs/share.ts index 7685b847..a3846edd 100644 --- a/sdk/nodejs/share.ts +++ b/sdk/nodejs/share.ts @@ -47,6 +47,10 @@ export class Share extends pulumi.CustomResource { */ public readonly name!: pulumi.Output; public readonly objects!: pulumi.Output; + /** + * User name/group name/sp applicationId of the share owner. + */ + public readonly owner!: pulumi.Output; /** * Create a Share resource with the given unique name, arguments, and options. @@ -65,12 +69,14 @@ export class Share extends pulumi.CustomResource { resourceInputs["createdBy"] = state ? state.createdBy : undefined; resourceInputs["name"] = state ? state.name : undefined; resourceInputs["objects"] = state ? state.objects : undefined; + resourceInputs["owner"] = state ? state.owner : undefined; } else { const args = argsOrState as ShareArgs | undefined; resourceInputs["createdAt"] = args ? args.createdAt : undefined; resourceInputs["createdBy"] = args ? args.createdBy : undefined; resourceInputs["name"] = args ? args.name : undefined; resourceInputs["objects"] = args ? args.objects : undefined; + resourceInputs["owner"] = args ? args.owner : undefined; } opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); super(Share.__pulumiType, name, resourceInputs, opts); @@ -94,6 +100,10 @@ export interface ShareState { */ name?: pulumi.Input; objects?: pulumi.Input[]>; + /** + * User name/group name/sp applicationId of the share owner. + */ + owner?: pulumi.Input; } /** @@ -113,4 +123,8 @@ export interface ShareArgs { */ name?: pulumi.Input; objects?: pulumi.Input[]>; + /** + * User name/group name/sp applicationId of the share owner. + */ + owner?: pulumi.Input; } diff --git a/sdk/nodejs/sqlAlert.ts b/sdk/nodejs/sqlAlert.ts index 6f28eb0e..aa9a7f76 100644 --- a/sdk/nodejs/sqlAlert.ts +++ b/sdk/nodejs/sqlAlert.ts @@ -48,6 +48,7 @@ export class SqlAlert extends pulumi.CustomResource { return obj['__pulumiType'] === SqlAlert.__pulumiType; } + public readonly createdAt!: pulumi.Output; /** * Name of the alert. */ @@ -68,6 +69,7 @@ export class SqlAlert extends pulumi.CustomResource { * Number of seconds after being triggered before the alert rearms itself and can be triggered again. If not defined, alert will never be triggered again. */ public readonly rearm!: pulumi.Output; + public readonly updatedAt!: pulumi.Output; /** * Create a SqlAlert resource with the given unique name, arguments, and options. @@ -82,11 +84,13 @@ export class SqlAlert extends pulumi.CustomResource { opts = opts || {}; if (opts.id) { const state = argsOrState as SqlAlertState | undefined; + resourceInputs["createdAt"] = state ? state.createdAt : undefined; resourceInputs["name"] = state ? state.name : undefined; resourceInputs["options"] = state ? state.options : undefined; resourceInputs["parent"] = state ? state.parent : undefined; resourceInputs["queryId"] = state ? state.queryId : undefined; resourceInputs["rearm"] = state ? state.rearm : undefined; + resourceInputs["updatedAt"] = state ? state.updatedAt : undefined; } else { const args = argsOrState as SqlAlertArgs | undefined; if ((!args || args.options === undefined) && !opts.urn) { @@ -95,11 +99,13 @@ export class SqlAlert extends pulumi.CustomResource { if ((!args || args.queryId === undefined) && !opts.urn) { throw new Error("Missing required property 'queryId'"); } + resourceInputs["createdAt"] = args ? args.createdAt : undefined; resourceInputs["name"] = args ? args.name : undefined; resourceInputs["options"] = args ? args.options : undefined; resourceInputs["parent"] = args ? args.parent : undefined; resourceInputs["queryId"] = args ? args.queryId : undefined; resourceInputs["rearm"] = args ? args.rearm : undefined; + resourceInputs["updatedAt"] = args ? args.updatedAt : undefined; } opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); super(SqlAlert.__pulumiType, name, resourceInputs, opts); @@ -110,6 +116,7 @@ export class SqlAlert extends pulumi.CustomResource { * Input properties used for looking up and filtering SqlAlert resources. */ export interface SqlAlertState { + createdAt?: pulumi.Input; /** * Name of the alert. */ @@ -130,12 +137,14 @@ export interface SqlAlertState { * Number of seconds after being triggered before the alert rearms itself and can be triggered again. If not defined, alert will never be triggered again. */ rearm?: pulumi.Input; + updatedAt?: pulumi.Input; } /** * The set of arguments for constructing a SqlAlert resource. */ export interface SqlAlertArgs { + createdAt?: pulumi.Input; /** * Name of the alert. */ @@ -156,4 +165,5 @@ export interface SqlAlertArgs { * Number of seconds after being triggered before the alert rearms itself and can be triggered again. If not defined, alert will never be triggered again. */ rearm?: pulumi.Input; + updatedAt?: pulumi.Input; } diff --git a/sdk/nodejs/sqlDashboard.ts b/sdk/nodejs/sqlDashboard.ts index 4af3197f..fa5baea6 100644 --- a/sdk/nodejs/sqlDashboard.ts +++ b/sdk/nodejs/sqlDashboard.ts @@ -86,9 +86,11 @@ export class SqlDashboard extends pulumi.CustomResource { return obj['__pulumiType'] === SqlDashboard.__pulumiType; } + public readonly createdAt!: pulumi.Output; public readonly name!: pulumi.Output; public readonly parent!: pulumi.Output; public readonly tags!: pulumi.Output; + public readonly updatedAt!: pulumi.Output; /** * Create a SqlDashboard resource with the given unique name, arguments, and options. @@ -103,14 +105,18 @@ export class SqlDashboard extends pulumi.CustomResource { opts = opts || {}; if (opts.id) { const state = argsOrState as SqlDashboardState | undefined; + resourceInputs["createdAt"] = state ? state.createdAt : undefined; resourceInputs["name"] = state ? state.name : undefined; resourceInputs["parent"] = state ? state.parent : undefined; resourceInputs["tags"] = state ? state.tags : undefined; + resourceInputs["updatedAt"] = state ? state.updatedAt : undefined; } else { const args = argsOrState as SqlDashboardArgs | undefined; + resourceInputs["createdAt"] = args ? args.createdAt : undefined; resourceInputs["name"] = args ? args.name : undefined; resourceInputs["parent"] = args ? args.parent : undefined; resourceInputs["tags"] = args ? args.tags : undefined; + resourceInputs["updatedAt"] = args ? args.updatedAt : undefined; } opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); super(SqlDashboard.__pulumiType, name, resourceInputs, opts); @@ -121,16 +127,20 @@ export class SqlDashboard extends pulumi.CustomResource { * Input properties used for looking up and filtering SqlDashboard resources. */ export interface SqlDashboardState { + createdAt?: pulumi.Input; name?: pulumi.Input; parent?: pulumi.Input; tags?: pulumi.Input[]>; + updatedAt?: pulumi.Input; } /** * The set of arguments for constructing a SqlDashboard resource. */ export interface SqlDashboardArgs { + createdAt?: pulumi.Input; name?: pulumi.Input; parent?: pulumi.Input; tags?: pulumi.Input[]>; + updatedAt?: pulumi.Input; } diff --git a/sdk/nodejs/sqlQuery.ts b/sdk/nodejs/sqlQuery.ts index 29baec24..55fc95ff 100644 --- a/sdk/nodejs/sqlQuery.ts +++ b/sdk/nodejs/sqlQuery.ts @@ -142,6 +142,7 @@ export class SqlQuery extends pulumi.CustomResource { return obj['__pulumiType'] === SqlQuery.__pulumiType; } + public readonly createdAt!: pulumi.Output; public readonly dataSourceId!: pulumi.Output; public readonly description!: pulumi.Output; public readonly name!: pulumi.Output; @@ -154,6 +155,7 @@ export class SqlQuery extends pulumi.CustomResource { */ public readonly schedule!: pulumi.Output; public readonly tags!: pulumi.Output; + public readonly updatedAt!: pulumi.Output; /** * Create a SqlQuery resource with the given unique name, arguments, and options. @@ -168,6 +170,7 @@ export class SqlQuery extends pulumi.CustomResource { opts = opts || {}; if (opts.id) { const state = argsOrState as SqlQueryState | undefined; + resourceInputs["createdAt"] = state ? state.createdAt : undefined; resourceInputs["dataSourceId"] = state ? state.dataSourceId : undefined; resourceInputs["description"] = state ? state.description : undefined; resourceInputs["name"] = state ? state.name : undefined; @@ -177,6 +180,7 @@ export class SqlQuery extends pulumi.CustomResource { resourceInputs["runAsRole"] = state ? state.runAsRole : undefined; resourceInputs["schedule"] = state ? state.schedule : undefined; resourceInputs["tags"] = state ? state.tags : undefined; + resourceInputs["updatedAt"] = state ? state.updatedAt : undefined; } else { const args = argsOrState as SqlQueryArgs | undefined; if ((!args || args.dataSourceId === undefined) && !opts.urn) { @@ -185,6 +189,7 @@ export class SqlQuery extends pulumi.CustomResource { if ((!args || args.query === undefined) && !opts.urn) { throw new Error("Missing required property 'query'"); } + resourceInputs["createdAt"] = args ? args.createdAt : undefined; resourceInputs["dataSourceId"] = args ? args.dataSourceId : undefined; resourceInputs["description"] = args ? args.description : undefined; resourceInputs["name"] = args ? args.name : undefined; @@ -194,6 +199,7 @@ export class SqlQuery extends pulumi.CustomResource { resourceInputs["runAsRole"] = args ? args.runAsRole : undefined; resourceInputs["schedule"] = args ? args.schedule : undefined; resourceInputs["tags"] = args ? args.tags : undefined; + resourceInputs["updatedAt"] = args ? args.updatedAt : undefined; } opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); super(SqlQuery.__pulumiType, name, resourceInputs, opts); @@ -204,6 +210,7 @@ export class SqlQuery extends pulumi.CustomResource { * Input properties used for looking up and filtering SqlQuery resources. */ export interface SqlQueryState { + createdAt?: pulumi.Input; dataSourceId?: pulumi.Input; description?: pulumi.Input; name?: pulumi.Input; @@ -216,12 +223,14 @@ export interface SqlQueryState { */ schedule?: pulumi.Input; tags?: pulumi.Input[]>; + updatedAt?: pulumi.Input; } /** * The set of arguments for constructing a SqlQuery resource. */ export interface SqlQueryArgs { + createdAt?: pulumi.Input; dataSourceId: pulumi.Input; description?: pulumi.Input; name?: pulumi.Input; @@ -234,4 +243,5 @@ export interface SqlQueryArgs { */ schedule?: pulumi.Input; tags?: pulumi.Input[]>; + updatedAt?: pulumi.Input; } diff --git a/sdk/nodejs/storageCredential.ts b/sdk/nodejs/storageCredential.ts index 2cadb03a..7066eaf2 100644 --- a/sdk/nodejs/storageCredential.ts +++ b/sdk/nodejs/storageCredential.ts @@ -113,6 +113,7 @@ export class StorageCredential extends pulumi.CustomResource { public readonly azureServicePrincipal!: pulumi.Output; public readonly comment!: pulumi.Output; public readonly databricksGcpServiceAccount!: pulumi.Output; + public readonly forceDestroy!: pulumi.Output; public readonly gcpServiceAccountKey!: pulumi.Output; public readonly metastoreId!: pulumi.Output; /** @@ -148,6 +149,7 @@ export class StorageCredential extends pulumi.CustomResource { resourceInputs["azureServicePrincipal"] = state ? state.azureServicePrincipal : undefined; resourceInputs["comment"] = state ? state.comment : undefined; resourceInputs["databricksGcpServiceAccount"] = state ? state.databricksGcpServiceAccount : undefined; + resourceInputs["forceDestroy"] = state ? state.forceDestroy : undefined; resourceInputs["gcpServiceAccountKey"] = state ? state.gcpServiceAccountKey : undefined; resourceInputs["metastoreId"] = state ? state.metastoreId : undefined; resourceInputs["name"] = state ? state.name : undefined; @@ -160,6 +162,7 @@ export class StorageCredential extends pulumi.CustomResource { resourceInputs["azureServicePrincipal"] = args ? args.azureServicePrincipal : undefined; resourceInputs["comment"] = args ? args.comment : undefined; resourceInputs["databricksGcpServiceAccount"] = args ? args.databricksGcpServiceAccount : undefined; + resourceInputs["forceDestroy"] = args ? args.forceDestroy : undefined; resourceInputs["gcpServiceAccountKey"] = args ? args.gcpServiceAccountKey : undefined; resourceInputs["metastoreId"] = args ? args.metastoreId : undefined; resourceInputs["name"] = args ? args.name : undefined; @@ -180,6 +183,7 @@ export interface StorageCredentialState { azureServicePrincipal?: pulumi.Input; comment?: pulumi.Input; databricksGcpServiceAccount?: pulumi.Input; + forceDestroy?: pulumi.Input; gcpServiceAccountKey?: pulumi.Input; metastoreId?: pulumi.Input; /** @@ -207,6 +211,7 @@ export interface StorageCredentialArgs { azureServicePrincipal?: pulumi.Input; comment?: pulumi.Input; databricksGcpServiceAccount?: pulumi.Input; + forceDestroy?: pulumi.Input; gcpServiceAccountKey?: pulumi.Input; metastoreId?: pulumi.Input; /** diff --git a/sdk/nodejs/tsconfig.json b/sdk/nodejs/tsconfig.json index d8a84899..349497cb 100644 --- a/sdk/nodejs/tsconfig.json +++ b/sdk/nodejs/tsconfig.json @@ -20,6 +20,7 @@ "clusterPolicy.ts", "config/index.ts", "config/vars.ts", + "connection.ts", "dbfsFile.ts", "directory.ts", "entitlements.ts", diff --git a/sdk/nodejs/types/input.ts b/sdk/nodejs/types/input.ts index 3e78d9d6..a307b8e4 100644 --- a/sdk/nodejs/types/input.ts +++ b/sdk/nodejs/types/input.ts @@ -14,9 +14,10 @@ export interface AccessControlRuleSetGrantRule { */ principals?: pulumi.Input[]>; /** - * Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles). + * Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles) or [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page). * * `roles/servicePrincipal.manager` - Manager of a service principal. * * `roles/servicePrincipal.user` - User of a service principal. + * * `roles/group.manager` - Manager of a group. */ role: pulumi.Input; } @@ -168,6 +169,15 @@ export interface ClusterWorkloadTypeClients { notebooks?: pulumi.Input; } +export interface ExternalLocationEncryptionDetails { + sseEncryptionDetails?: pulumi.Input; +} + +export interface ExternalLocationEncryptionDetailsSseEncryptionDetails { + algorithm?: pulumi.Input; + awsKmsKeyArn?: pulumi.Input; +} + export interface GetClusterClusterInfo { autoscale?: inputs.GetClusterClusterInfoAutoscale; /** @@ -1769,12 +1779,12 @@ export interface GetJobJobSettingsSettingsRunAsArgs { } export interface GetJobJobSettingsSettingsRunJobTask { - jobId: string; + jobId: number; jobParameters?: {[key: string]: any}; } export interface GetJobJobSettingsSettingsRunJobTaskArgs { - jobId: pulumi.Input; + jobId: pulumi.Input; jobParameters?: pulumi.Input<{[key: string]: any}>; } @@ -2361,12 +2371,12 @@ export interface GetJobJobSettingsSettingsTaskPythonWheelTaskArgs { } export interface GetJobJobSettingsSettingsTaskRunJobTask { - jobId: string; + jobId: number; jobParameters?: {[key: string]: any}; } export interface GetJobJobSettingsSettingsTaskRunJobTaskArgs { - jobId: pulumi.Input; + jobId: pulumi.Input; jobParameters?: pulumi.Input<{[key: string]: any}>; } @@ -3530,7 +3540,7 @@ export interface JobRunJobTask { /** * (String) ID of the job */ - jobId: pulumi.Input; + jobId: pulumi.Input; /** * (Map) Job parameters for the task */ @@ -4042,7 +4052,7 @@ export interface JobTaskRunJobTask { /** * (String) ID of the job */ - jobId: pulumi.Input; + jobId: pulumi.Input; /** * (Map) Job parameters for the task */ @@ -4320,7 +4330,7 @@ export interface MetastoreDataAccessAwsIamRole { /** * The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF` * - * `azureServicePrincipal` optional configuration block for credential details for Azure: + * `azureManagedIdentity` optional configuration block for using managed identity as credential details for Azure (Recommended): */ roleArn: pulumi.Input; } @@ -4341,8 +4351,6 @@ export interface MetastoreDataAccessAzureServicePrincipal { applicationId: pulumi.Input; /** * The client secret generated for the above app ID in AAD. **This field is redacted on output** - * - * `azureManagedIdentity` optional configuration block for using managed identity as credential details for Azure: */ clientSecret: pulumi.Input; /** @@ -4354,6 +4362,8 @@ export interface MetastoreDataAccessAzureServicePrincipal { export interface MetastoreDataAccessDatabricksGcpServiceAccount { /** * The email of the GCP service account created, to be granted access to relevant buckets. + * + * `azureServicePrincipal` optional configuration block for credential details for Azure (Legacy): */ email?: pulumi.Input; } @@ -4361,6 +4371,8 @@ export interface MetastoreDataAccessDatabricksGcpServiceAccount { export interface MetastoreDataAccessGcpServiceAccountKey { /** * The email of the GCP service account created, to be granted access to relevant buckets. + * + * `azureServicePrincipal` optional configuration block for credential details for Azure (Legacy): */ email: pulumi.Input; privateKey: pulumi.Input; @@ -4368,8 +4380,8 @@ export interface MetastoreDataAccessGcpServiceAccountKey { } export interface MlflowModelTag { - key: pulumi.Input; - value: pulumi.Input; + key?: pulumi.Input; + value?: pulumi.Input; } export interface MlflowWebhookHttpUrlSpec { @@ -4418,7 +4430,13 @@ export interface ModelServingConfig { } export interface ModelServingConfigServedModel { + /** + * a map of environment variable name/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`. + */ environmentVars?: pulumi.Input<{[key: string]: any}>; + /** + * ARN of the instance profile that the served model will use to access AWS resources. + */ instanceProfileArn?: pulumi.Input; /** * The name of the model in Databricks Model Registry to be served. @@ -4516,6 +4534,9 @@ export interface MwsCustomerManagedKeysAwsKeyInfo { } export interface MwsCustomerManagedKeysGcpKeyInfo { + /** + * The GCP KMS key's resource name. + */ kmsKeyId: pulumi.Input; } @@ -4618,6 +4639,9 @@ export interface MwsWorkspacesGkeConfig { export interface MwsWorkspacesToken { comment?: pulumi.Input; + /** + * Token expiry lifetime. By default its 2592000 (30 days). + */ lifetimeSeconds?: pulumi.Input; tokenId?: pulumi.Input; tokenValue?: pulumi.Input; @@ -5078,9 +5102,9 @@ export interface SqlTableColumn { */ nullable?: pulumi.Input; /** - * Column type spec (with metadata) as SQL text + * Column type spec (with metadata) as SQL text. Not supported for `VIEW` table_type. */ - type: pulumi.Input; + type?: pulumi.Input; } export interface SqlWidgetParameter { diff --git a/sdk/nodejs/types/output.ts b/sdk/nodejs/types/output.ts index 9dd6cb7b..e5e9fcce 100644 --- a/sdk/nodejs/types/output.ts +++ b/sdk/nodejs/types/output.ts @@ -14,9 +14,10 @@ export interface AccessControlRuleSetGrantRule { */ principals?: string[]; /** - * Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles). + * Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles) or [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page). * * `roles/servicePrincipal.manager` - Manager of a service principal. * * `roles/servicePrincipal.user` - User of a service principal. + * * `roles/group.manager` - Manager of a group. */ role: string; } @@ -168,6 +169,15 @@ export interface ClusterWorkloadTypeClients { notebooks?: boolean; } +export interface ExternalLocationEncryptionDetails { + sseEncryptionDetails?: outputs.ExternalLocationEncryptionDetailsSseEncryptionDetails; +} + +export interface ExternalLocationEncryptionDetailsSseEncryptionDetails { + algorithm?: string; + awsKmsKeyArn?: string; +} + export interface GetClusterClusterInfo { autoscale?: outputs.GetClusterClusterInfoAutoscale; /** @@ -977,7 +987,7 @@ export interface GetJobJobSettingsSettingsRunAs { } export interface GetJobJobSettingsSettingsRunJobTask { - jobId: string; + jobId: number; jobParameters?: {[key: string]: any}; } @@ -1273,7 +1283,7 @@ export interface GetJobJobSettingsSettingsTaskPythonWheelTask { } export interface GetJobJobSettingsSettingsTaskRunJobTask { - jobId: string; + jobId: number; jobParameters?: {[key: string]: any}; } @@ -2240,7 +2250,7 @@ export interface JobRunJobTask { /** * (String) ID of the job */ - jobId: string; + jobId: number; /** * (Map) Job parameters for the task */ @@ -2752,7 +2762,7 @@ export interface JobTaskRunJobTask { /** * (String) ID of the job */ - jobId: string; + jobId: number; /** * (Map) Job parameters for the task */ @@ -3030,7 +3040,7 @@ export interface MetastoreDataAccessAwsIamRole { /** * The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF` * - * `azureServicePrincipal` optional configuration block for credential details for Azure: + * `azureManagedIdentity` optional configuration block for using managed identity as credential details for Azure (Recommended): */ roleArn: string; } @@ -3051,8 +3061,6 @@ export interface MetastoreDataAccessAzureServicePrincipal { applicationId: string; /** * The client secret generated for the above app ID in AAD. **This field is redacted on output** - * - * `azureManagedIdentity` optional configuration block for using managed identity as credential details for Azure: */ clientSecret: string; /** @@ -3064,6 +3072,8 @@ export interface MetastoreDataAccessAzureServicePrincipal { export interface MetastoreDataAccessDatabricksGcpServiceAccount { /** * The email of the GCP service account created, to be granted access to relevant buckets. + * + * `azureServicePrincipal` optional configuration block for credential details for Azure (Legacy): */ email: string; } @@ -3071,6 +3081,8 @@ export interface MetastoreDataAccessDatabricksGcpServiceAccount { export interface MetastoreDataAccessGcpServiceAccountKey { /** * The email of the GCP service account created, to be granted access to relevant buckets. + * + * `azureServicePrincipal` optional configuration block for credential details for Azure (Legacy): */ email: string; privateKey: string; @@ -3078,8 +3090,8 @@ export interface MetastoreDataAccessGcpServiceAccountKey { } export interface MlflowModelTag { - key: string; - value: string; + key?: string; + value?: string; } export interface MlflowWebhookHttpUrlSpec { @@ -3128,7 +3140,13 @@ export interface ModelServingConfig { } export interface ModelServingConfigServedModel { + /** + * a map of environment variable name/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`. + */ environmentVars?: {[key: string]: any}; + /** + * ARN of the instance profile that the served model will use to access AWS resources. + */ instanceProfileArn?: string; /** * The name of the model in Databricks Model Registry to be served. @@ -3226,6 +3244,9 @@ export interface MwsCustomerManagedKeysAwsKeyInfo { } export interface MwsCustomerManagedKeysGcpKeyInfo { + /** + * The GCP KMS key's resource name. + */ kmsKeyId: string; } @@ -3328,6 +3349,9 @@ export interface MwsWorkspacesGkeConfig { export interface MwsWorkspacesToken { comment?: string; + /** + * Token expiry lifetime. By default its 2592000 (30 days). + */ lifetimeSeconds?: number; tokenId: string; tokenValue: string; @@ -3788,9 +3812,9 @@ export interface SqlTableColumn { */ nullable?: boolean; /** - * Column type spec (with metadata) as SQL text + * Column type spec (with metadata) as SQL text. Not supported for `VIEW` table_type. */ - type: string; + type?: string; } export interface SqlWidgetParameter { diff --git a/sdk/python/pulumi_databricks/__init__.py b/sdk/python/pulumi_databricks/__init__.py index b4b1041b..3087730a 100644 --- a/sdk/python/pulumi_databricks/__init__.py +++ b/sdk/python/pulumi_databricks/__init__.py @@ -10,6 +10,7 @@ from .catalog_workspace_binding import * from .cluster import * from .cluster_policy import * +from .connection import * from .dbfs_file import * from .directory import * from .entitlements import * @@ -166,6 +167,14 @@ "databricks:index/clusterPolicy:ClusterPolicy": "ClusterPolicy" } }, + { + "pkg": "databricks", + "mod": "index/connection", + "fqn": "pulumi_databricks", + "classes": { + "databricks:index/connection:Connection": "Connection" + } + }, { "pkg": "databricks", "mod": "index/dbfsFile", diff --git a/sdk/python/pulumi_databricks/_inputs.py b/sdk/python/pulumi_databricks/_inputs.py index fc14b4db..92f6e966 100644 --- a/sdk/python/pulumi_databricks/_inputs.py +++ b/sdk/python/pulumi_databricks/_inputs.py @@ -35,6 +35,8 @@ 'ClusterLibraryPypiArgs', 'ClusterWorkloadTypeArgs', 'ClusterWorkloadTypeClientsArgs', + 'ExternalLocationEncryptionDetailsArgs', + 'ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs', 'GrantsGrantArgs', 'InstancePoolAwsAttributesArgs', 'InstancePoolAzureAttributesArgs', @@ -441,9 +443,10 @@ def __init__(__self__, *, role: pulumi.Input[str], principals: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ - :param pulumi.Input[str] role: Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles). + :param pulumi.Input[str] role: Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles) or [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page). * `roles/servicePrincipal.manager` - Manager of a service principal. * `roles/servicePrincipal.user` - User of a service principal. + * `roles/group.manager` - Manager of a group. :param pulumi.Input[Sequence[pulumi.Input[str]]] principals: a list of principals who are granted a role. The following format is supported: * `users/{username}` (also exposed as `acl_principal_id` attribute of `User` resource). * `groups/{groupname}` (also exposed as `acl_principal_id` attribute of `Group` resource). @@ -457,9 +460,10 @@ def __init__(__self__, *, @pulumi.getter def role(self) -> pulumi.Input[str]: """ - Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles). + Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles) or [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page). * `roles/servicePrincipal.manager` - Manager of a service principal. * `roles/servicePrincipal.user` - User of a service principal. + * `roles/group.manager` - Manager of a group. """ return pulumi.get(self, "role") @@ -1459,6 +1463,52 @@ def notebooks(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "notebooks", value) +@pulumi.input_type +class ExternalLocationEncryptionDetailsArgs: + def __init__(__self__, *, + sse_encryption_details: Optional[pulumi.Input['ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs']] = None): + if sse_encryption_details is not None: + pulumi.set(__self__, "sse_encryption_details", sse_encryption_details) + + @property + @pulumi.getter(name="sseEncryptionDetails") + def sse_encryption_details(self) -> Optional[pulumi.Input['ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs']]: + return pulumi.get(self, "sse_encryption_details") + + @sse_encryption_details.setter + def sse_encryption_details(self, value: Optional[pulumi.Input['ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs']]): + pulumi.set(self, "sse_encryption_details", value) + + +@pulumi.input_type +class ExternalLocationEncryptionDetailsSseEncryptionDetailsArgs: + def __init__(__self__, *, + algorithm: Optional[pulumi.Input[str]] = None, + aws_kms_key_arn: Optional[pulumi.Input[str]] = None): + if algorithm is not None: + pulumi.set(__self__, "algorithm", algorithm) + if aws_kms_key_arn is not None: + pulumi.set(__self__, "aws_kms_key_arn", aws_kms_key_arn) + + @property + @pulumi.getter + def algorithm(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "algorithm") + + @algorithm.setter + def algorithm(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "algorithm", value) + + @property + @pulumi.getter(name="awsKmsKeyArn") + def aws_kms_key_arn(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "aws_kms_key_arn") + + @aws_kms_key_arn.setter + def aws_kms_key_arn(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "aws_kms_key_arn", value) + + @pulumi.input_type class GrantsGrantArgs: def __init__(__self__, *, @@ -5414,10 +5464,10 @@ def user_name(self, value: Optional[pulumi.Input[str]]): @pulumi.input_type class JobRunJobTaskArgs: def __init__(__self__, *, - job_id: pulumi.Input[str], + job_id: pulumi.Input[int], job_parameters: Optional[pulumi.Input[Mapping[str, Any]]] = None): """ - :param pulumi.Input[str] job_id: (String) ID of the job + :param pulumi.Input[int] job_id: (String) ID of the job :param pulumi.Input[Mapping[str, Any]] job_parameters: (Map) Job parameters for the task """ pulumi.set(__self__, "job_id", job_id) @@ -5426,14 +5476,14 @@ def __init__(__self__, *, @property @pulumi.getter(name="jobId") - def job_id(self) -> pulumi.Input[str]: + def job_id(self) -> pulumi.Input[int]: """ (String) ID of the job """ return pulumi.get(self, "job_id") @job_id.setter - def job_id(self, value: pulumi.Input[str]): + def job_id(self, value: pulumi.Input[int]): pulumi.set(self, "job_id", value) @property @@ -8027,10 +8077,10 @@ def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]) @pulumi.input_type class JobTaskRunJobTaskArgs: def __init__(__self__, *, - job_id: pulumi.Input[str], + job_id: pulumi.Input[int], job_parameters: Optional[pulumi.Input[Mapping[str, Any]]] = None): """ - :param pulumi.Input[str] job_id: (String) ID of the job + :param pulumi.Input[int] job_id: (String) ID of the job :param pulumi.Input[Mapping[str, Any]] job_parameters: (Map) Job parameters for the task """ pulumi.set(__self__, "job_id", job_id) @@ -8039,14 +8089,14 @@ def __init__(__self__, *, @property @pulumi.getter(name="jobId") - def job_id(self) -> pulumi.Input[str]: + def job_id(self) -> pulumi.Input[int]: """ (String) ID of the job """ return pulumi.get(self, "job_id") @job_id.setter - def job_id(self, value: pulumi.Input[str]): + def job_id(self, value: pulumi.Input[int]): pulumi.set(self, "job_id", value) @property @@ -8990,7 +9040,7 @@ def __init__(__self__, *, """ :param pulumi.Input[str] role_arn: The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF` - `azure_service_principal` optional configuration block for credential details for Azure: + `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure (Recommended): """ pulumi.set(__self__, "role_arn", role_arn) @@ -9000,7 +9050,7 @@ def role_arn(self) -> pulumi.Input[str]: """ The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF` - `azure_service_principal` optional configuration block for credential details for Azure: + `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure (Recommended): """ return pulumi.get(self, "role_arn") @@ -9044,8 +9094,6 @@ def __init__(__self__, *, """ :param pulumi.Input[str] application_id: The application ID of the application registration within the referenced AAD tenant :param pulumi.Input[str] client_secret: The client secret generated for the above app ID in AAD. **This field is redacted on output** - - `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure: :param pulumi.Input[str] directory_id: The directory ID corresponding to the Azure Active Directory (AAD) tenant of the application """ pulumi.set(__self__, "application_id", application_id) @@ -9069,8 +9117,6 @@ def application_id(self, value: pulumi.Input[str]): def client_secret(self) -> pulumi.Input[str]: """ The client secret generated for the above app ID in AAD. **This field is redacted on output** - - `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure: """ return pulumi.get(self, "client_secret") @@ -9097,6 +9143,8 @@ def __init__(__self__, *, email: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] email: The email of the GCP service account created, to be granted access to relevant buckets. + + `azure_service_principal` optional configuration block for credential details for Azure (Legacy): """ if email is not None: pulumi.set(__self__, "email", email) @@ -9106,6 +9154,8 @@ def __init__(__self__, *, def email(self) -> Optional[pulumi.Input[str]]: """ The email of the GCP service account created, to be granted access to relevant buckets. + + `azure_service_principal` optional configuration block for credential details for Azure (Legacy): """ return pulumi.get(self, "email") @@ -9122,6 +9172,8 @@ def __init__(__self__, *, private_key_id: pulumi.Input[str]): """ :param pulumi.Input[str] email: The email of the GCP service account created, to be granted access to relevant buckets. + + `azure_service_principal` optional configuration block for credential details for Azure (Legacy): """ pulumi.set(__self__, "email", email) pulumi.set(__self__, "private_key", private_key) @@ -9132,6 +9184,8 @@ def __init__(__self__, *, def email(self) -> pulumi.Input[str]: """ The email of the GCP service account created, to be granted access to relevant buckets. + + `azure_service_principal` optional configuration block for credential details for Azure (Legacy): """ return pulumi.get(self, "email") @@ -9161,27 +9215,29 @@ def private_key_id(self, value: pulumi.Input[str]): @pulumi.input_type class MlflowModelTagArgs: def __init__(__self__, *, - key: pulumi.Input[str], - value: pulumi.Input[str]): - pulumi.set(__self__, "key", key) - pulumi.set(__self__, "value", value) + key: Optional[pulumi.Input[str]] = None, + value: Optional[pulumi.Input[str]] = None): + if key is not None: + pulumi.set(__self__, "key", key) + if value is not None: + pulumi.set(__self__, "value", value) @property @pulumi.getter - def key(self) -> pulumi.Input[str]: + def key(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "key") @key.setter - def key(self, value: pulumi.Input[str]): + def key(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "key", value) @property @pulumi.getter - def value(self) -> pulumi.Input[str]: + def value(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "value") @value.setter - def value(self, value: pulumi.Input[str]): + def value(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "value", value) @@ -9360,6 +9416,8 @@ def __init__(__self__, *, :param pulumi.Input[str] model_name: The name of the model in Databricks Model Registry to be served. :param pulumi.Input[str] model_version: The version of the model in Databricks Model Registry to be served. :param pulumi.Input[str] workload_size: The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). + :param pulumi.Input[Mapping[str, Any]] environment_vars: a map of environment variable name/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`. + :param pulumi.Input[str] instance_profile_arn: ARN of the instance profile that the served model will use to access AWS resources. :param pulumi.Input[str] name: The name of a served model. It must be unique across an endpoint. If not specified, this field will default to `modelname-modelversion`. A served model name can consist of alphanumeric characters, dashes, and underscores. :param pulumi.Input[bool] scale_to_zero_enabled: Whether the compute resources for the served model should scale down to zero. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0. The default value is `true`. """ @@ -9414,6 +9472,9 @@ def workload_size(self, value: pulumi.Input[str]): @property @pulumi.getter(name="environmentVars") def environment_vars(self) -> Optional[pulumi.Input[Mapping[str, Any]]]: + """ + a map of environment variable name/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`. + """ return pulumi.get(self, "environment_vars") @environment_vars.setter @@ -9423,6 +9484,9 @@ def environment_vars(self, value: Optional[pulumi.Input[Mapping[str, Any]]]): @property @pulumi.getter(name="instanceProfileArn") def instance_profile_arn(self) -> Optional[pulumi.Input[str]]: + """ + ARN of the instance profile that the served model will use to access AWS resources. + """ return pulumi.get(self, "instance_profile_arn") @instance_profile_arn.setter @@ -9884,11 +9948,17 @@ def key_region(self, value: Optional[pulumi.Input[str]]): class MwsCustomerManagedKeysGcpKeyInfoArgs: def __init__(__self__, *, kms_key_id: pulumi.Input[str]): + """ + :param pulumi.Input[str] kms_key_id: The GCP KMS key's resource name. + """ pulumi.set(__self__, "kms_key_id", kms_key_id) @property @pulumi.getter(name="kmsKeyId") def kms_key_id(self) -> pulumi.Input[str]: + """ + The GCP KMS key's resource name. + """ return pulumi.get(self, "kms_key_id") @kms_key_id.setter @@ -10297,6 +10367,9 @@ def __init__(__self__, *, lifetime_seconds: Optional[pulumi.Input[int]] = None, token_id: Optional[pulumi.Input[str]] = None, token_value: Optional[pulumi.Input[str]] = None): + """ + :param pulumi.Input[int] lifetime_seconds: Token expiry lifetime. By default its 2592000 (30 days). + """ if comment is not None: pulumi.set(__self__, "comment", comment) if lifetime_seconds is not None: @@ -10318,6 +10391,9 @@ def comment(self, value: Optional[pulumi.Input[str]]): @property @pulumi.getter(name="lifetimeSeconds") def lifetime_seconds(self) -> Optional[pulumi.Input[int]]: + """ + Token expiry lifetime. By default its 2592000 (30 days). + """ return pulumi.get(self, "lifetime_seconds") @lifetime_seconds.setter @@ -12898,21 +12974,22 @@ def until_date(self, value: Optional[pulumi.Input[str]]): class SqlTableColumnArgs: def __init__(__self__, *, name: pulumi.Input[str], - type: pulumi.Input[str], comment: Optional[pulumi.Input[str]] = None, - nullable: Optional[pulumi.Input[bool]] = None): + nullable: Optional[pulumi.Input[bool]] = None, + type: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] name: User-visible name of column - :param pulumi.Input[str] type: Column type spec (with metadata) as SQL text :param pulumi.Input[str] comment: User-supplied free-form text. :param pulumi.Input[bool] nullable: Whether field is nullable (Default: `true`) + :param pulumi.Input[str] type: Column type spec (with metadata) as SQL text. Not supported for `VIEW` table_type. """ pulumi.set(__self__, "name", name) - pulumi.set(__self__, "type", type) if comment is not None: pulumi.set(__self__, "comment", comment) if nullable is not None: pulumi.set(__self__, "nullable", nullable) + if type is not None: + pulumi.set(__self__, "type", type) @property @pulumi.getter @@ -12926,18 +13003,6 @@ def name(self) -> pulumi.Input[str]: def name(self, value: pulumi.Input[str]): pulumi.set(self, "name", value) - @property - @pulumi.getter - def type(self) -> pulumi.Input[str]: - """ - Column type spec (with metadata) as SQL text - """ - return pulumi.get(self, "type") - - @type.setter - def type(self, value: pulumi.Input[str]): - pulumi.set(self, "type", value) - @property @pulumi.getter def comment(self) -> Optional[pulumi.Input[str]]: @@ -12962,6 +13027,18 @@ def nullable(self) -> Optional[pulumi.Input[bool]]: def nullable(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "nullable", value) + @property + @pulumi.getter + def type(self) -> Optional[pulumi.Input[str]]: + """ + Column type spec (with metadata) as SQL text. Not supported for `VIEW` table_type. + """ + return pulumi.get(self, "type") + + @type.setter + def type(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "type", value) + @pulumi.input_type class SqlWidgetParameterArgs: @@ -19175,7 +19252,7 @@ def user_name(self, value: Optional[str]): @pulumi.input_type class GetJobJobSettingsSettingsRunJobTaskArgs: def __init__(__self__, *, - job_id: str, + job_id: int, job_parameters: Optional[Mapping[str, Any]] = None): pulumi.set(__self__, "job_id", job_id) if job_parameters is not None: @@ -19183,11 +19260,11 @@ def __init__(__self__, *, @property @pulumi.getter(name="jobId") - def job_id(self) -> str: + def job_id(self) -> int: return pulumi.get(self, "job_id") @job_id.setter - def job_id(self, value: str): + def job_id(self, value: int): pulumi.set(self, "job_id", value) @property @@ -21424,7 +21501,7 @@ def parameters(self, value: Optional[Sequence[str]]): @pulumi.input_type class GetJobJobSettingsSettingsTaskRunJobTaskArgs: def __init__(__self__, *, - job_id: str, + job_id: int, job_parameters: Optional[Mapping[str, Any]] = None): pulumi.set(__self__, "job_id", job_id) if job_parameters is not None: @@ -21432,11 +21509,11 @@ def __init__(__self__, *, @property @pulumi.getter(name="jobId") - def job_id(self) -> str: + def job_id(self) -> int: return pulumi.get(self, "job_id") @job_id.setter - def job_id(self, value: str): + def job_id(self, value: int): pulumi.set(self, "job_id", value) @property diff --git a/sdk/python/pulumi_databricks/access_control_rule_set.py b/sdk/python/pulumi_databricks/access_control_rule_set.py index 3418ad87..90871828 100644 --- a/sdk/python/pulumi_databricks/access_control_rule_set.py +++ b/sdk/python/pulumi_databricks/access_control_rule_set.py @@ -25,6 +25,8 @@ def __init__(__self__, *, !> **Warning** Name uniquely identifies a rule set resource. Ensure all the grant_rules blocks for a rule set name are present in one `AccessControlRuleSet` resource block. Otherwise, after applying changes, users might lose their role assignment even if that was not intended. :param pulumi.Input[str] name: Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + * `accounts/{account_id}/ruleSets/default` """ if grant_rules is not None: pulumi.set(__self__, "grant_rules", grant_rules) @@ -51,6 +53,8 @@ def name(self) -> Optional[pulumi.Input[str]]: """ Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + * `accounts/{account_id}/ruleSets/default` """ return pulumi.get(self, "name") @@ -72,6 +76,8 @@ def __init__(__self__, *, !> **Warning** Name uniquely identifies a rule set resource. Ensure all the grant_rules blocks for a rule set name are present in one `AccessControlRuleSet` resource block. Otherwise, after applying changes, users might lose their role assignment even if that was not intended. :param pulumi.Input[str] name: Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + * `accounts/{account_id}/ruleSets/default` """ if etag is not None: pulumi.set(__self__, "etag", etag) @@ -109,6 +115,8 @@ def name(self) -> Optional[pulumi.Input[str]]: """ Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + * `accounts/{account_id}/ruleSets/default` """ return pulumi.get(self, "name") @@ -128,10 +136,117 @@ def __init__(__self__, """ This resource allows you to manage access rules on Databricks account level resources. For convenience we allow accessing this resource through the Databricks account and workspace. - > **Note** Currently, we only support managing access rules on service principal resources through `AccessControlRuleSet`. + > **Note** Currently, we only support managing access rules on service principal, group and account resources through `AccessControlRuleSet`. > **Warning** `AccessControlRuleSet` cannot be used to manage access rules for resources supported by databricks_permissions. Refer to its documentation for more information. + ## Service principal rule set usage + + Through a Databricks workspace: + + ```python + import pulumi + import pulumi_databricks as databricks + + account_id = "00000000-0000-0000-0000-000000000000" + ds = databricks.get_group(display_name="Data Science") + automation_sp = databricks.ServicePrincipal("automationSp", display_name="SP_FOR_AUTOMATION") + automation_sp_rule_set = databricks.AccessControlRuleSet("automationSpRuleSet", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs( + principals=[ds.acl_principal_id], + role="roles/servicePrincipal.user", + )]) + ``` + + Through AWS Databricks account: + + ```python + import pulumi + import pulumi_databricks as databricks + + account_id = "00000000-0000-0000-0000-000000000000" + # account level group creation + ds = databricks.Group("ds") + automation_sp = databricks.ServicePrincipal("automationSp", display_name="SP_FOR_AUTOMATION") + automation_sp_rule_set = databricks.AccessControlRuleSet("automationSpRuleSet", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs( + principals=[ds.acl_principal_id], + role="roles/servicePrincipal.user", + )]) + ``` + + Through Azure Databricks account: + + ```python + import pulumi + import pulumi_databricks as databricks + + account_id = "00000000-0000-0000-0000-000000000000" + # account level group creation + ds = databricks.Group("ds") + automation_sp = databricks.ServicePrincipal("automationSp", + application_id="00000000-0000-0000-0000-000000000000", + display_name="SP_FOR_AUTOMATION") + automation_sp_rule_set = databricks.AccessControlRuleSet("automationSpRuleSet", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs( + principals=[ds.acl_principal_id], + role="roles/servicePrincipal.user", + )]) + ``` + + Through GCP Databricks account: + + ```python + import pulumi + import pulumi_databricks as databricks + + account_id = "00000000-0000-0000-0000-000000000000" + # account level group creation + ds = databricks.Group("ds") + automation_sp = databricks.ServicePrincipal("automationSp", display_name="SP_FOR_AUTOMATION") + automation_sp_rule_set = databricks.AccessControlRuleSet("automationSpRuleSet", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs( + principals=[ds.acl_principal_id], + role="roles/servicePrincipal.user", + )]) + ``` + + ## Group rule set usage + + Refer to the appropriate provider configuration as shown in the examples for service principal rule set. + + ```python + import pulumi + import pulumi_databricks as databricks + + account_id = "00000000-0000-0000-0000-000000000000" + ds = databricks.get_group(display_name="Data Science") + john = databricks.get_user(user_name="john.doe@example.com") + ds_group_rule_set = databricks.AccessControlRuleSet("dsGroupRuleSet", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs( + principals=[john.acl_principal_id], + role="roles/group.manager", + )]) + ``` + + ## Account rule set usage + + Refer to the appropriate provider configuration as shown in the examples for service principal rule set. + + ```python + import pulumi + import pulumi_databricks as databricks + + account_id = "00000000-0000-0000-0000-000000000000" + ds = databricks.get_group(display_name="Data Science") + john = databricks.get_user(user_name="john.doe@example.com") + account_rule_set = databricks.AccessControlRuleSet("accountRuleSet", grant_rules=[ + databricks.AccessControlRuleSetGrantRuleArgs( + principals=[john.acl_principal_id], + role="roles/group.manager", + ), + databricks.AccessControlRuleSetGrantRuleArgs( + principals=[data["databricks_user"]["ds"]["acl_principal_id"]], + role="roles/servicePrincipal.manager", + ), + ]) + ``` + ## Related Resources The following resources are often used in the same context: @@ -147,6 +262,8 @@ def __init__(__self__, !> **Warning** Name uniquely identifies a rule set resource. Ensure all the grant_rules blocks for a rule set name are present in one `AccessControlRuleSet` resource block. Otherwise, after applying changes, users might lose their role assignment even if that was not intended. :param pulumi.Input[str] name: Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + * `accounts/{account_id}/ruleSets/default` """ ... @overload @@ -157,10 +274,117 @@ def __init__(__self__, """ This resource allows you to manage access rules on Databricks account level resources. For convenience we allow accessing this resource through the Databricks account and workspace. - > **Note** Currently, we only support managing access rules on service principal resources through `AccessControlRuleSet`. + > **Note** Currently, we only support managing access rules on service principal, group and account resources through `AccessControlRuleSet`. > **Warning** `AccessControlRuleSet` cannot be used to manage access rules for resources supported by databricks_permissions. Refer to its documentation for more information. + ## Service principal rule set usage + + Through a Databricks workspace: + + ```python + import pulumi + import pulumi_databricks as databricks + + account_id = "00000000-0000-0000-0000-000000000000" + ds = databricks.get_group(display_name="Data Science") + automation_sp = databricks.ServicePrincipal("automationSp", display_name="SP_FOR_AUTOMATION") + automation_sp_rule_set = databricks.AccessControlRuleSet("automationSpRuleSet", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs( + principals=[ds.acl_principal_id], + role="roles/servicePrincipal.user", + )]) + ``` + + Through AWS Databricks account: + + ```python + import pulumi + import pulumi_databricks as databricks + + account_id = "00000000-0000-0000-0000-000000000000" + # account level group creation + ds = databricks.Group("ds") + automation_sp = databricks.ServicePrincipal("automationSp", display_name="SP_FOR_AUTOMATION") + automation_sp_rule_set = databricks.AccessControlRuleSet("automationSpRuleSet", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs( + principals=[ds.acl_principal_id], + role="roles/servicePrincipal.user", + )]) + ``` + + Through Azure Databricks account: + + ```python + import pulumi + import pulumi_databricks as databricks + + account_id = "00000000-0000-0000-0000-000000000000" + # account level group creation + ds = databricks.Group("ds") + automation_sp = databricks.ServicePrincipal("automationSp", + application_id="00000000-0000-0000-0000-000000000000", + display_name="SP_FOR_AUTOMATION") + automation_sp_rule_set = databricks.AccessControlRuleSet("automationSpRuleSet", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs( + principals=[ds.acl_principal_id], + role="roles/servicePrincipal.user", + )]) + ``` + + Through GCP Databricks account: + + ```python + import pulumi + import pulumi_databricks as databricks + + account_id = "00000000-0000-0000-0000-000000000000" + # account level group creation + ds = databricks.Group("ds") + automation_sp = databricks.ServicePrincipal("automationSp", display_name="SP_FOR_AUTOMATION") + automation_sp_rule_set = databricks.AccessControlRuleSet("automationSpRuleSet", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs( + principals=[ds.acl_principal_id], + role="roles/servicePrincipal.user", + )]) + ``` + + ## Group rule set usage + + Refer to the appropriate provider configuration as shown in the examples for service principal rule set. + + ```python + import pulumi + import pulumi_databricks as databricks + + account_id = "00000000-0000-0000-0000-000000000000" + ds = databricks.get_group(display_name="Data Science") + john = databricks.get_user(user_name="john.doe@example.com") + ds_group_rule_set = databricks.AccessControlRuleSet("dsGroupRuleSet", grant_rules=[databricks.AccessControlRuleSetGrantRuleArgs( + principals=[john.acl_principal_id], + role="roles/group.manager", + )]) + ``` + + ## Account rule set usage + + Refer to the appropriate provider configuration as shown in the examples for service principal rule set. + + ```python + import pulumi + import pulumi_databricks as databricks + + account_id = "00000000-0000-0000-0000-000000000000" + ds = databricks.get_group(display_name="Data Science") + john = databricks.get_user(user_name="john.doe@example.com") + account_rule_set = databricks.AccessControlRuleSet("accountRuleSet", grant_rules=[ + databricks.AccessControlRuleSetGrantRuleArgs( + principals=[john.acl_principal_id], + role="roles/group.manager", + ), + databricks.AccessControlRuleSetGrantRuleArgs( + principals=[data["databricks_user"]["ds"]["acl_principal_id"]], + role="roles/servicePrincipal.manager", + ), + ]) + ``` + ## Related Resources The following resources are often used in the same context: @@ -223,6 +447,8 @@ def get(resource_name: str, !> **Warning** Name uniquely identifies a rule set resource. Ensure all the grant_rules blocks for a rule set name are present in one `AccessControlRuleSet` resource block. Otherwise, after applying changes, users might lose their role assignment even if that was not intended. :param pulumi.Input[str] name: Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + * `accounts/{account_id}/ruleSets/default` """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) @@ -254,6 +480,8 @@ def name(self) -> pulumi.Output[str]: """ Unique identifier of a rule set. The name determines the resource to which the rule set applies. Currently, only default rule sets are supported. The following rule set formats are supported: * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` + * `accounts/{account_id}/groups/{group_id}/ruleSets/default` + * `accounts/{account_id}/ruleSets/default` """ return pulumi.get(self, "name") diff --git a/sdk/python/pulumi_databricks/catalog.py b/sdk/python/pulumi_databricks/catalog.py index b890d1ed..b7aeb8af 100644 --- a/sdk/python/pulumi_databricks/catalog.py +++ b/sdk/python/pulumi_databricks/catalog.py @@ -15,6 +15,7 @@ class CatalogArgs: def __init__(__self__, *, comment: Optional[pulumi.Input[str]] = None, + connection_name: Optional[pulumi.Input[str]] = None, force_destroy: Optional[pulumi.Input[bool]] = None, isolation_mode: Optional[pulumi.Input[str]] = None, metastore_id: Optional[pulumi.Input[str]] = None, @@ -27,6 +28,7 @@ def __init__(__self__, *, """ The set of arguments for constructing a Catalog resource. :param pulumi.Input[str] comment: User-supplied free-form text. + :param pulumi.Input[str] connection_name: For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. :param pulumi.Input[bool] force_destroy: Delete catalog regardless of its contents. :param pulumi.Input[str] isolation_mode: Whether the catalog is accessible from all workspaces or a specific set of workspaces. Can be `ISOLATED` or `OPEN`. Setting the catalog to `ISOLATED` will automatically allow access from the current workspace. :param pulumi.Input[str] name: Name of Catalog relative to parent metastore. @@ -38,6 +40,8 @@ def __init__(__self__, *, """ if comment is not None: pulumi.set(__self__, "comment", comment) + if connection_name is not None: + pulumi.set(__self__, "connection_name", connection_name) if force_destroy is not None: pulumi.set(__self__, "force_destroy", force_destroy) if isolation_mode is not None: @@ -69,6 +73,18 @@ def comment(self) -> Optional[pulumi.Input[str]]: def comment(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "comment", value) + @property + @pulumi.getter(name="connectionName") + def connection_name(self) -> Optional[pulumi.Input[str]]: + """ + For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + """ + return pulumi.get(self, "connection_name") + + @connection_name.setter + def connection_name(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "connection_name", value) + @property @pulumi.getter(name="forceDestroy") def force_destroy(self) -> Optional[pulumi.Input[bool]]: @@ -179,6 +195,7 @@ def storage_root(self, value: Optional[pulumi.Input[str]]): class _CatalogState: def __init__(__self__, *, comment: Optional[pulumi.Input[str]] = None, + connection_name: Optional[pulumi.Input[str]] = None, force_destroy: Optional[pulumi.Input[bool]] = None, isolation_mode: Optional[pulumi.Input[str]] = None, metastore_id: Optional[pulumi.Input[str]] = None, @@ -191,6 +208,7 @@ def __init__(__self__, *, """ Input properties used for looking up and filtering Catalog resources. :param pulumi.Input[str] comment: User-supplied free-form text. + :param pulumi.Input[str] connection_name: For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. :param pulumi.Input[bool] force_destroy: Delete catalog regardless of its contents. :param pulumi.Input[str] isolation_mode: Whether the catalog is accessible from all workspaces or a specific set of workspaces. Can be `ISOLATED` or `OPEN`. Setting the catalog to `ISOLATED` will automatically allow access from the current workspace. :param pulumi.Input[str] name: Name of Catalog relative to parent metastore. @@ -202,6 +220,8 @@ def __init__(__self__, *, """ if comment is not None: pulumi.set(__self__, "comment", comment) + if connection_name is not None: + pulumi.set(__self__, "connection_name", connection_name) if force_destroy is not None: pulumi.set(__self__, "force_destroy", force_destroy) if isolation_mode is not None: @@ -233,6 +253,18 @@ def comment(self) -> Optional[pulumi.Input[str]]: def comment(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "comment", value) + @property + @pulumi.getter(name="connectionName") + def connection_name(self) -> Optional[pulumi.Input[str]]: + """ + For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + """ + return pulumi.get(self, "connection_name") + + @connection_name.setter + def connection_name(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "connection_name", value) + @property @pulumi.getter(name="forceDestroy") def force_destroy(self) -> Optional[pulumi.Input[bool]]: @@ -345,6 +377,7 @@ def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, comment: Optional[pulumi.Input[str]] = None, + connection_name: Optional[pulumi.Input[str]] = None, force_destroy: Optional[pulumi.Input[bool]] = None, isolation_mode: Optional[pulumi.Input[str]] = None, metastore_id: Optional[pulumi.Input[str]] = None, @@ -388,6 +421,7 @@ def __init__(__self__, :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] comment: User-supplied free-form text. + :param pulumi.Input[str] connection_name: For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. :param pulumi.Input[bool] force_destroy: Delete catalog regardless of its contents. :param pulumi.Input[str] isolation_mode: Whether the catalog is accessible from all workspaces or a specific set of workspaces. Can be `ISOLATED` or `OPEN`. Setting the catalog to `ISOLATED` will automatically allow access from the current workspace. :param pulumi.Input[str] name: Name of Catalog relative to parent metastore. @@ -449,6 +483,7 @@ def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, comment: Optional[pulumi.Input[str]] = None, + connection_name: Optional[pulumi.Input[str]] = None, force_destroy: Optional[pulumi.Input[bool]] = None, isolation_mode: Optional[pulumi.Input[str]] = None, metastore_id: Optional[pulumi.Input[str]] = None, @@ -468,6 +503,7 @@ def _internal_init(__self__, __props__ = CatalogArgs.__new__(CatalogArgs) __props__.__dict__["comment"] = comment + __props__.__dict__["connection_name"] = connection_name __props__.__dict__["force_destroy"] = force_destroy __props__.__dict__["isolation_mode"] = isolation_mode __props__.__dict__["metastore_id"] = metastore_id @@ -488,6 +524,7 @@ def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, comment: Optional[pulumi.Input[str]] = None, + connection_name: Optional[pulumi.Input[str]] = None, force_destroy: Optional[pulumi.Input[bool]] = None, isolation_mode: Optional[pulumi.Input[str]] = None, metastore_id: Optional[pulumi.Input[str]] = None, @@ -505,6 +542,7 @@ def get(resource_name: str, :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] comment: User-supplied free-form text. + :param pulumi.Input[str] connection_name: For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. :param pulumi.Input[bool] force_destroy: Delete catalog regardless of its contents. :param pulumi.Input[str] isolation_mode: Whether the catalog is accessible from all workspaces or a specific set of workspaces. Can be `ISOLATED` or `OPEN`. Setting the catalog to `ISOLATED` will automatically allow access from the current workspace. :param pulumi.Input[str] name: Name of Catalog relative to parent metastore. @@ -519,6 +557,7 @@ def get(resource_name: str, __props__ = _CatalogState.__new__(_CatalogState) __props__.__dict__["comment"] = comment + __props__.__dict__["connection_name"] = connection_name __props__.__dict__["force_destroy"] = force_destroy __props__.__dict__["isolation_mode"] = isolation_mode __props__.__dict__["metastore_id"] = metastore_id @@ -538,6 +577,14 @@ def comment(self) -> pulumi.Output[Optional[str]]: """ return pulumi.get(self, "comment") + @property + @pulumi.getter(name="connectionName") + def connection_name(self) -> pulumi.Output[Optional[str]]: + """ + For Foreign Catalogs: the name of the connection to an external data source. Changes forces creation of a new resource. + """ + return pulumi.get(self, "connection_name") + @property @pulumi.getter(name="forceDestroy") def force_destroy(self) -> pulumi.Output[Optional[bool]]: diff --git a/sdk/python/pulumi_databricks/connection.py b/sdk/python/pulumi_databricks/connection.py new file mode 100644 index 00000000..56d4bdd5 --- /dev/null +++ b/sdk/python/pulumi_databricks/connection.py @@ -0,0 +1,518 @@ +# coding=utf-8 +# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +# *** Do not edit by hand unless you're certain you know what you are doing! *** + +import copy +import warnings +import pulumi +import pulumi.runtime +from typing import Any, Mapping, Optional, Sequence, Union, overload +from . import _utilities + +__all__ = ['ConnectionArgs', 'Connection'] + +@pulumi.input_type +class ConnectionArgs: + def __init__(__self__, *, + connection_type: pulumi.Input[str], + options: pulumi.Input[Mapping[str, Any]], + comment: Optional[pulumi.Input[str]] = None, + metastore_id: Optional[pulumi.Input[str]] = None, + name: Optional[pulumi.Input[str]] = None, + owner: Optional[pulumi.Input[str]] = None, + properties: Optional[pulumi.Input[Mapping[str, Any]]] = None, + read_only: Optional[pulumi.Input[bool]] = None): + """ + The set of arguments for constructing a Connection resource. + :param pulumi.Input[str] connection_type: Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + :param pulumi.Input[Mapping[str, Any]] options: The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + :param pulumi.Input[str] comment: Free-form text. + :param pulumi.Input[str] name: Name of the Connection. + :param pulumi.Input[str] owner: Name of the connection owner. + :param pulumi.Input[Mapping[str, Any]] properties: Free-form connection properties. + """ + pulumi.set(__self__, "connection_type", connection_type) + pulumi.set(__self__, "options", options) + if comment is not None: + pulumi.set(__self__, "comment", comment) + if metastore_id is not None: + pulumi.set(__self__, "metastore_id", metastore_id) + if name is not None: + pulumi.set(__self__, "name", name) + if owner is not None: + pulumi.set(__self__, "owner", owner) + if properties is not None: + pulumi.set(__self__, "properties", properties) + if read_only is not None: + pulumi.set(__self__, "read_only", read_only) + + @property + @pulumi.getter(name="connectionType") + def connection_type(self) -> pulumi.Input[str]: + """ + Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + """ + return pulumi.get(self, "connection_type") + + @connection_type.setter + def connection_type(self, value: pulumi.Input[str]): + pulumi.set(self, "connection_type", value) + + @property + @pulumi.getter + def options(self) -> pulumi.Input[Mapping[str, Any]]: + """ + The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + """ + return pulumi.get(self, "options") + + @options.setter + def options(self, value: pulumi.Input[Mapping[str, Any]]): + pulumi.set(self, "options", value) + + @property + @pulumi.getter + def comment(self) -> Optional[pulumi.Input[str]]: + """ + Free-form text. + """ + return pulumi.get(self, "comment") + + @comment.setter + def comment(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "comment", value) + + @property + @pulumi.getter(name="metastoreId") + def metastore_id(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "metastore_id") + + @metastore_id.setter + def metastore_id(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "metastore_id", value) + + @property + @pulumi.getter + def name(self) -> Optional[pulumi.Input[str]]: + """ + Name of the Connection. + """ + return pulumi.get(self, "name") + + @name.setter + def name(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "name", value) + + @property + @pulumi.getter + def owner(self) -> Optional[pulumi.Input[str]]: + """ + Name of the connection owner. + """ + return pulumi.get(self, "owner") + + @owner.setter + def owner(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "owner", value) + + @property + @pulumi.getter + def properties(self) -> Optional[pulumi.Input[Mapping[str, Any]]]: + """ + Free-form connection properties. + """ + return pulumi.get(self, "properties") + + @properties.setter + def properties(self, value: Optional[pulumi.Input[Mapping[str, Any]]]): + pulumi.set(self, "properties", value) + + @property + @pulumi.getter(name="readOnly") + def read_only(self) -> Optional[pulumi.Input[bool]]: + return pulumi.get(self, "read_only") + + @read_only.setter + def read_only(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "read_only", value) + + +@pulumi.input_type +class _ConnectionState: + def __init__(__self__, *, + comment: Optional[pulumi.Input[str]] = None, + connection_type: Optional[pulumi.Input[str]] = None, + metastore_id: Optional[pulumi.Input[str]] = None, + name: Optional[pulumi.Input[str]] = None, + options: Optional[pulumi.Input[Mapping[str, Any]]] = None, + owner: Optional[pulumi.Input[str]] = None, + properties: Optional[pulumi.Input[Mapping[str, Any]]] = None, + read_only: Optional[pulumi.Input[bool]] = None): + """ + Input properties used for looking up and filtering Connection resources. + :param pulumi.Input[str] comment: Free-form text. + :param pulumi.Input[str] connection_type: Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + :param pulumi.Input[str] name: Name of the Connection. + :param pulumi.Input[Mapping[str, Any]] options: The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + :param pulumi.Input[str] owner: Name of the connection owner. + :param pulumi.Input[Mapping[str, Any]] properties: Free-form connection properties. + """ + if comment is not None: + pulumi.set(__self__, "comment", comment) + if connection_type is not None: + pulumi.set(__self__, "connection_type", connection_type) + if metastore_id is not None: + pulumi.set(__self__, "metastore_id", metastore_id) + if name is not None: + pulumi.set(__self__, "name", name) + if options is not None: + pulumi.set(__self__, "options", options) + if owner is not None: + pulumi.set(__self__, "owner", owner) + if properties is not None: + pulumi.set(__self__, "properties", properties) + if read_only is not None: + pulumi.set(__self__, "read_only", read_only) + + @property + @pulumi.getter + def comment(self) -> Optional[pulumi.Input[str]]: + """ + Free-form text. + """ + return pulumi.get(self, "comment") + + @comment.setter + def comment(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "comment", value) + + @property + @pulumi.getter(name="connectionType") + def connection_type(self) -> Optional[pulumi.Input[str]]: + """ + Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + """ + return pulumi.get(self, "connection_type") + + @connection_type.setter + def connection_type(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "connection_type", value) + + @property + @pulumi.getter(name="metastoreId") + def metastore_id(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "metastore_id") + + @metastore_id.setter + def metastore_id(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "metastore_id", value) + + @property + @pulumi.getter + def name(self) -> Optional[pulumi.Input[str]]: + """ + Name of the Connection. + """ + return pulumi.get(self, "name") + + @name.setter + def name(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "name", value) + + @property + @pulumi.getter + def options(self) -> Optional[pulumi.Input[Mapping[str, Any]]]: + """ + The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + """ + return pulumi.get(self, "options") + + @options.setter + def options(self, value: Optional[pulumi.Input[Mapping[str, Any]]]): + pulumi.set(self, "options", value) + + @property + @pulumi.getter + def owner(self) -> Optional[pulumi.Input[str]]: + """ + Name of the connection owner. + """ + return pulumi.get(self, "owner") + + @owner.setter + def owner(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "owner", value) + + @property + @pulumi.getter + def properties(self) -> Optional[pulumi.Input[Mapping[str, Any]]]: + """ + Free-form connection properties. + """ + return pulumi.get(self, "properties") + + @properties.setter + def properties(self, value: Optional[pulumi.Input[Mapping[str, Any]]]): + pulumi.set(self, "properties", value) + + @property + @pulumi.getter(name="readOnly") + def read_only(self) -> Optional[pulumi.Input[bool]]: + return pulumi.get(self, "read_only") + + @read_only.setter + def read_only(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "read_only", value) + + +class Connection(pulumi.CustomResource): + @overload + def __init__(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + comment: Optional[pulumi.Input[str]] = None, + connection_type: Optional[pulumi.Input[str]] = None, + metastore_id: Optional[pulumi.Input[str]] = None, + name: Optional[pulumi.Input[str]] = None, + options: Optional[pulumi.Input[Mapping[str, Any]]] = None, + owner: Optional[pulumi.Input[str]] = None, + properties: Optional[pulumi.Input[Mapping[str, Any]]] = None, + read_only: Optional[pulumi.Input[bool]] = None, + __props__=None): + """ + Lakehouse Federation is the query federation platform for Databricks. Databricks uses Unity Catalog to manage query federation. To make a dataset available for read-only querying using Lakehouse Federation, you create the following: + + - A connection, a securable object in Unity Catalog that specifies a path and credentials for accessing an external database system. + - A foreign catalog + + This resource manages connections in Unity Catalog + + ## Example Usage + + ```python + import pulumi + import pulumi_databricks as databricks + + mysql = databricks.Connection("mysql", + comment="this is a connection to mysql db", + connection_type="MYSQL", + options={ + "host": "test.mysql.database.azure.com", + "password": "password", + "port": "3306", + "user": "user", + }, + properties={ + "purpose": "testing", + }) + ``` + + ## Import + + This resource can be imported by `name` bash + + ```sh + $ pulumi import databricks:index/connection:Connection this + ``` + + :param str resource_name: The name of the resource. + :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[str] comment: Free-form text. + :param pulumi.Input[str] connection_type: Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + :param pulumi.Input[str] name: Name of the Connection. + :param pulumi.Input[Mapping[str, Any]] options: The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + :param pulumi.Input[str] owner: Name of the connection owner. + :param pulumi.Input[Mapping[str, Any]] properties: Free-form connection properties. + """ + ... + @overload + def __init__(__self__, + resource_name: str, + args: ConnectionArgs, + opts: Optional[pulumi.ResourceOptions] = None): + """ + Lakehouse Federation is the query federation platform for Databricks. Databricks uses Unity Catalog to manage query federation. To make a dataset available for read-only querying using Lakehouse Federation, you create the following: + + - A connection, a securable object in Unity Catalog that specifies a path and credentials for accessing an external database system. + - A foreign catalog + + This resource manages connections in Unity Catalog + + ## Example Usage + + ```python + import pulumi + import pulumi_databricks as databricks + + mysql = databricks.Connection("mysql", + comment="this is a connection to mysql db", + connection_type="MYSQL", + options={ + "host": "test.mysql.database.azure.com", + "password": "password", + "port": "3306", + "user": "user", + }, + properties={ + "purpose": "testing", + }) + ``` + + ## Import + + This resource can be imported by `name` bash + + ```sh + $ pulumi import databricks:index/connection:Connection this + ``` + + :param str resource_name: The name of the resource. + :param ConnectionArgs args: The arguments to use to populate this resource's properties. + :param pulumi.ResourceOptions opts: Options for the resource. + """ + ... + def __init__(__self__, resource_name: str, *args, **kwargs): + resource_args, opts = _utilities.get_resource_args_opts(ConnectionArgs, pulumi.ResourceOptions, *args, **kwargs) + if resource_args is not None: + __self__._internal_init(resource_name, opts, **resource_args.__dict__) + else: + __self__._internal_init(resource_name, *args, **kwargs) + + def _internal_init(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + comment: Optional[pulumi.Input[str]] = None, + connection_type: Optional[pulumi.Input[str]] = None, + metastore_id: Optional[pulumi.Input[str]] = None, + name: Optional[pulumi.Input[str]] = None, + options: Optional[pulumi.Input[Mapping[str, Any]]] = None, + owner: Optional[pulumi.Input[str]] = None, + properties: Optional[pulumi.Input[Mapping[str, Any]]] = None, + read_only: Optional[pulumi.Input[bool]] = None, + __props__=None): + opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) + if not isinstance(opts, pulumi.ResourceOptions): + raise TypeError('Expected resource options to be a ResourceOptions instance') + if opts.id is None: + if __props__ is not None: + raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') + __props__ = ConnectionArgs.__new__(ConnectionArgs) + + __props__.__dict__["comment"] = comment + if connection_type is None and not opts.urn: + raise TypeError("Missing required property 'connection_type'") + __props__.__dict__["connection_type"] = connection_type + __props__.__dict__["metastore_id"] = metastore_id + __props__.__dict__["name"] = name + if options is None and not opts.urn: + raise TypeError("Missing required property 'options'") + __props__.__dict__["options"] = None if options is None else pulumi.Output.secret(options) + __props__.__dict__["owner"] = owner + __props__.__dict__["properties"] = properties + __props__.__dict__["read_only"] = read_only + secret_opts = pulumi.ResourceOptions(additional_secret_outputs=["options"]) + opts = pulumi.ResourceOptions.merge(opts, secret_opts) + super(Connection, __self__).__init__( + 'databricks:index/connection:Connection', + resource_name, + __props__, + opts) + + @staticmethod + def get(resource_name: str, + id: pulumi.Input[str], + opts: Optional[pulumi.ResourceOptions] = None, + comment: Optional[pulumi.Input[str]] = None, + connection_type: Optional[pulumi.Input[str]] = None, + metastore_id: Optional[pulumi.Input[str]] = None, + name: Optional[pulumi.Input[str]] = None, + options: Optional[pulumi.Input[Mapping[str, Any]]] = None, + owner: Optional[pulumi.Input[str]] = None, + properties: Optional[pulumi.Input[Mapping[str, Any]]] = None, + read_only: Optional[pulumi.Input[bool]] = None) -> 'Connection': + """ + Get an existing Connection resource's state with the given name, id, and optional extra + properties used to qualify the lookup. + + :param str resource_name: The unique name of the resulting resource. + :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. + :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[str] comment: Free-form text. + :param pulumi.Input[str] connection_type: Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + :param pulumi.Input[str] name: Name of the Connection. + :param pulumi.Input[Mapping[str, Any]] options: The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + :param pulumi.Input[str] owner: Name of the connection owner. + :param pulumi.Input[Mapping[str, Any]] properties: Free-form connection properties. + """ + opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) + + __props__ = _ConnectionState.__new__(_ConnectionState) + + __props__.__dict__["comment"] = comment + __props__.__dict__["connection_type"] = connection_type + __props__.__dict__["metastore_id"] = metastore_id + __props__.__dict__["name"] = name + __props__.__dict__["options"] = options + __props__.__dict__["owner"] = owner + __props__.__dict__["properties"] = properties + __props__.__dict__["read_only"] = read_only + return Connection(resource_name, opts=opts, __props__=__props__) + + @property + @pulumi.getter + def comment(self) -> pulumi.Output[Optional[str]]: + """ + Free-form text. + """ + return pulumi.get(self, "comment") + + @property + @pulumi.getter(name="connectionType") + def connection_type(self) -> pulumi.Output[str]: + """ + Connection type. `MYSQL` `POSTGRESQL` `SNOWFLAKE` `REDSHIFT` `SQLDW` `SQLSERVER` or `DATABRICKS` are supported. [Up-to-date list of connection type supported](https://docs.databricks.com/query-federation/index.html#supported-data-sources) + """ + return pulumi.get(self, "connection_type") + + @property + @pulumi.getter(name="metastoreId") + def metastore_id(self) -> pulumi.Output[str]: + return pulumi.get(self, "metastore_id") + + @property + @pulumi.getter + def name(self) -> pulumi.Output[str]: + """ + Name of the Connection. + """ + return pulumi.get(self, "name") + + @property + @pulumi.getter + def options(self) -> pulumi.Output[Mapping[str, Any]]: + """ + The key value of options required by the connection, e.g. `host`, `port`, `user` and `password`. + """ + return pulumi.get(self, "options") + + @property + @pulumi.getter + def owner(self) -> pulumi.Output[Optional[str]]: + """ + Name of the connection owner. + """ + return pulumi.get(self, "owner") + + @property + @pulumi.getter + def properties(self) -> pulumi.Output[Optional[Mapping[str, Any]]]: + """ + Free-form connection properties. + """ + return pulumi.get(self, "properties") + + @property + @pulumi.getter(name="readOnly") + def read_only(self) -> pulumi.Output[bool]: + return pulumi.get(self, "read_only") + diff --git a/sdk/python/pulumi_databricks/external_location.py b/sdk/python/pulumi_databricks/external_location.py index 9064f94b..98076033 100644 --- a/sdk/python/pulumi_databricks/external_location.py +++ b/sdk/python/pulumi_databricks/external_location.py @@ -8,6 +8,8 @@ import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities +from . import outputs +from ._inputs import * __all__ = ['ExternalLocationArgs', 'ExternalLocation'] @@ -16,8 +18,11 @@ class ExternalLocationArgs: def __init__(__self__, *, credential_name: pulumi.Input[str], url: pulumi.Input[str], + access_point: Optional[pulumi.Input[str]] = None, comment: Optional[pulumi.Input[str]] = None, + encryption_details: Optional[pulumi.Input['ExternalLocationEncryptionDetailsArgs']] = None, force_destroy: Optional[pulumi.Input[bool]] = None, + force_update: Optional[pulumi.Input[bool]] = None, metastore_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, owner: Optional[pulumi.Input[str]] = None, @@ -25,21 +30,30 @@ def __init__(__self__, *, skip_validation: Optional[pulumi.Input[bool]] = None): """ The set of arguments for constructing a ExternalLocation resource. - :param pulumi.Input[str] credential_name: Name of the StorageCredential to use with this External Location. + :param pulumi.Input[str] credential_name: Name of the StorageCredential to use with this external location. :param pulumi.Input[str] url: Path URL in cloud storage, of the form: `s3://[bucket-host]/[bucket-dir]` (AWS), `abfss://[user]@[host]/[path]` (Azure), `gs://[bucket-host]/[bucket-dir]` (GCP). + :param pulumi.Input[str] access_point: The ARN of the s3 access point to use with the external location (AWS). :param pulumi.Input[str] comment: User-supplied free-form text. + :param pulumi.Input['ExternalLocationEncryptionDetailsArgs'] encryption_details: The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). :param pulumi.Input[bool] force_destroy: Destroy external location regardless of its dependents. + :param pulumi.Input[bool] force_update: Update external location regardless of its dependents. :param pulumi.Input[str] name: Name of External Location, which must be unique within the databricks_metastore. Change forces creation of a new resource. - :param pulumi.Input[str] owner: Username/groupname/sp application_id of the external Location owner. + :param pulumi.Input[str] owner: Username/groupname/sp application_id of the external location owner. :param pulumi.Input[bool] read_only: Indicates whether the external location is read-only. :param pulumi.Input[bool] skip_validation: Suppress validation errors if any & force save the external location """ pulumi.set(__self__, "credential_name", credential_name) pulumi.set(__self__, "url", url) + if access_point is not None: + pulumi.set(__self__, "access_point", access_point) if comment is not None: pulumi.set(__self__, "comment", comment) + if encryption_details is not None: + pulumi.set(__self__, "encryption_details", encryption_details) if force_destroy is not None: pulumi.set(__self__, "force_destroy", force_destroy) + if force_update is not None: + pulumi.set(__self__, "force_update", force_update) if metastore_id is not None: pulumi.set(__self__, "metastore_id", metastore_id) if name is not None: @@ -55,7 +69,7 @@ def __init__(__self__, *, @pulumi.getter(name="credentialName") def credential_name(self) -> pulumi.Input[str]: """ - Name of the StorageCredential to use with this External Location. + Name of the StorageCredential to use with this external location. """ return pulumi.get(self, "credential_name") @@ -75,6 +89,18 @@ def url(self) -> pulumi.Input[str]: def url(self, value: pulumi.Input[str]): pulumi.set(self, "url", value) + @property + @pulumi.getter(name="accessPoint") + def access_point(self) -> Optional[pulumi.Input[str]]: + """ + The ARN of the s3 access point to use with the external location (AWS). + """ + return pulumi.get(self, "access_point") + + @access_point.setter + def access_point(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "access_point", value) + @property @pulumi.getter def comment(self) -> Optional[pulumi.Input[str]]: @@ -87,6 +113,18 @@ def comment(self) -> Optional[pulumi.Input[str]]: def comment(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "comment", value) + @property + @pulumi.getter(name="encryptionDetails") + def encryption_details(self) -> Optional[pulumi.Input['ExternalLocationEncryptionDetailsArgs']]: + """ + The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + """ + return pulumi.get(self, "encryption_details") + + @encryption_details.setter + def encryption_details(self, value: Optional[pulumi.Input['ExternalLocationEncryptionDetailsArgs']]): + pulumi.set(self, "encryption_details", value) + @property @pulumi.getter(name="forceDestroy") def force_destroy(self) -> Optional[pulumi.Input[bool]]: @@ -99,6 +137,18 @@ def force_destroy(self) -> Optional[pulumi.Input[bool]]: def force_destroy(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "force_destroy", value) + @property + @pulumi.getter(name="forceUpdate") + def force_update(self) -> Optional[pulumi.Input[bool]]: + """ + Update external location regardless of its dependents. + """ + return pulumi.get(self, "force_update") + + @force_update.setter + def force_update(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "force_update", value) + @property @pulumi.getter(name="metastoreId") def metastore_id(self) -> Optional[pulumi.Input[str]]: @@ -124,7 +174,7 @@ def name(self, value: Optional[pulumi.Input[str]]): @pulumi.getter def owner(self) -> Optional[pulumi.Input[str]]: """ - Username/groupname/sp application_id of the external Location owner. + Username/groupname/sp application_id of the external location owner. """ return pulumi.get(self, "owner") @@ -160,9 +210,12 @@ def skip_validation(self, value: Optional[pulumi.Input[bool]]): @pulumi.input_type class _ExternalLocationState: def __init__(__self__, *, + access_point: Optional[pulumi.Input[str]] = None, comment: Optional[pulumi.Input[str]] = None, credential_name: Optional[pulumi.Input[str]] = None, + encryption_details: Optional[pulumi.Input['ExternalLocationEncryptionDetailsArgs']] = None, force_destroy: Optional[pulumi.Input[bool]] = None, + force_update: Optional[pulumi.Input[bool]] = None, metastore_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, owner: Optional[pulumi.Input[str]] = None, @@ -171,21 +224,30 @@ def __init__(__self__, *, url: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering ExternalLocation resources. + :param pulumi.Input[str] access_point: The ARN of the s3 access point to use with the external location (AWS). :param pulumi.Input[str] comment: User-supplied free-form text. - :param pulumi.Input[str] credential_name: Name of the StorageCredential to use with this External Location. + :param pulumi.Input[str] credential_name: Name of the StorageCredential to use with this external location. + :param pulumi.Input['ExternalLocationEncryptionDetailsArgs'] encryption_details: The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). :param pulumi.Input[bool] force_destroy: Destroy external location regardless of its dependents. + :param pulumi.Input[bool] force_update: Update external location regardless of its dependents. :param pulumi.Input[str] name: Name of External Location, which must be unique within the databricks_metastore. Change forces creation of a new resource. - :param pulumi.Input[str] owner: Username/groupname/sp application_id of the external Location owner. + :param pulumi.Input[str] owner: Username/groupname/sp application_id of the external location owner. :param pulumi.Input[bool] read_only: Indicates whether the external location is read-only. :param pulumi.Input[bool] skip_validation: Suppress validation errors if any & force save the external location :param pulumi.Input[str] url: Path URL in cloud storage, of the form: `s3://[bucket-host]/[bucket-dir]` (AWS), `abfss://[user]@[host]/[path]` (Azure), `gs://[bucket-host]/[bucket-dir]` (GCP). """ + if access_point is not None: + pulumi.set(__self__, "access_point", access_point) if comment is not None: pulumi.set(__self__, "comment", comment) if credential_name is not None: pulumi.set(__self__, "credential_name", credential_name) + if encryption_details is not None: + pulumi.set(__self__, "encryption_details", encryption_details) if force_destroy is not None: pulumi.set(__self__, "force_destroy", force_destroy) + if force_update is not None: + pulumi.set(__self__, "force_update", force_update) if metastore_id is not None: pulumi.set(__self__, "metastore_id", metastore_id) if name is not None: @@ -199,6 +261,18 @@ def __init__(__self__, *, if url is not None: pulumi.set(__self__, "url", url) + @property + @pulumi.getter(name="accessPoint") + def access_point(self) -> Optional[pulumi.Input[str]]: + """ + The ARN of the s3 access point to use with the external location (AWS). + """ + return pulumi.get(self, "access_point") + + @access_point.setter + def access_point(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "access_point", value) + @property @pulumi.getter def comment(self) -> Optional[pulumi.Input[str]]: @@ -215,7 +289,7 @@ def comment(self, value: Optional[pulumi.Input[str]]): @pulumi.getter(name="credentialName") def credential_name(self) -> Optional[pulumi.Input[str]]: """ - Name of the StorageCredential to use with this External Location. + Name of the StorageCredential to use with this external location. """ return pulumi.get(self, "credential_name") @@ -223,6 +297,18 @@ def credential_name(self) -> Optional[pulumi.Input[str]]: def credential_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "credential_name", value) + @property + @pulumi.getter(name="encryptionDetails") + def encryption_details(self) -> Optional[pulumi.Input['ExternalLocationEncryptionDetailsArgs']]: + """ + The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + """ + return pulumi.get(self, "encryption_details") + + @encryption_details.setter + def encryption_details(self, value: Optional[pulumi.Input['ExternalLocationEncryptionDetailsArgs']]): + pulumi.set(self, "encryption_details", value) + @property @pulumi.getter(name="forceDestroy") def force_destroy(self) -> Optional[pulumi.Input[bool]]: @@ -235,6 +321,18 @@ def force_destroy(self) -> Optional[pulumi.Input[bool]]: def force_destroy(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "force_destroy", value) + @property + @pulumi.getter(name="forceUpdate") + def force_update(self) -> Optional[pulumi.Input[bool]]: + """ + Update external location regardless of its dependents. + """ + return pulumi.get(self, "force_update") + + @force_update.setter + def force_update(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "force_update", value) + @property @pulumi.getter(name="metastoreId") def metastore_id(self) -> Optional[pulumi.Input[str]]: @@ -260,7 +358,7 @@ def name(self, value: Optional[pulumi.Input[str]]): @pulumi.getter def owner(self) -> Optional[pulumi.Input[str]]: """ - Username/groupname/sp application_id of the external Location owner. + Username/groupname/sp application_id of the external location owner. """ return pulumi.get(self, "owner") @@ -310,9 +408,12 @@ class ExternalLocation(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, + access_point: Optional[pulumi.Input[str]] = None, comment: Optional[pulumi.Input[str]] = None, credential_name: Optional[pulumi.Input[str]] = None, + encryption_details: Optional[pulumi.Input[pulumi.InputType['ExternalLocationEncryptionDetailsArgs']]] = None, force_destroy: Optional[pulumi.Input[bool]] = None, + force_update: Optional[pulumi.Input[bool]] = None, metastore_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, owner: Optional[pulumi.Input[str]] = None, @@ -336,11 +437,14 @@ def __init__(__self__, :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[str] access_point: The ARN of the s3 access point to use with the external location (AWS). :param pulumi.Input[str] comment: User-supplied free-form text. - :param pulumi.Input[str] credential_name: Name of the StorageCredential to use with this External Location. + :param pulumi.Input[str] credential_name: Name of the StorageCredential to use with this external location. + :param pulumi.Input[pulumi.InputType['ExternalLocationEncryptionDetailsArgs']] encryption_details: The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). :param pulumi.Input[bool] force_destroy: Destroy external location regardless of its dependents. + :param pulumi.Input[bool] force_update: Update external location regardless of its dependents. :param pulumi.Input[str] name: Name of External Location, which must be unique within the databricks_metastore. Change forces creation of a new resource. - :param pulumi.Input[str] owner: Username/groupname/sp application_id of the external Location owner. + :param pulumi.Input[str] owner: Username/groupname/sp application_id of the external location owner. :param pulumi.Input[bool] read_only: Indicates whether the external location is read-only. :param pulumi.Input[bool] skip_validation: Suppress validation errors if any & force save the external location :param pulumi.Input[str] url: Path URL in cloud storage, of the form: `s3://[bucket-host]/[bucket-dir]` (AWS), `abfss://[user]@[host]/[path]` (Azure), `gs://[bucket-host]/[bucket-dir]` (GCP). @@ -380,9 +484,12 @@ def __init__(__self__, resource_name: str, *args, **kwargs): def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, + access_point: Optional[pulumi.Input[str]] = None, comment: Optional[pulumi.Input[str]] = None, credential_name: Optional[pulumi.Input[str]] = None, + encryption_details: Optional[pulumi.Input[pulumi.InputType['ExternalLocationEncryptionDetailsArgs']]] = None, force_destroy: Optional[pulumi.Input[bool]] = None, + force_update: Optional[pulumi.Input[bool]] = None, metastore_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, owner: Optional[pulumi.Input[str]] = None, @@ -398,11 +505,14 @@ def _internal_init(__self__, raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = ExternalLocationArgs.__new__(ExternalLocationArgs) + __props__.__dict__["access_point"] = access_point __props__.__dict__["comment"] = comment if credential_name is None and not opts.urn: raise TypeError("Missing required property 'credential_name'") __props__.__dict__["credential_name"] = credential_name + __props__.__dict__["encryption_details"] = encryption_details __props__.__dict__["force_destroy"] = force_destroy + __props__.__dict__["force_update"] = force_update __props__.__dict__["metastore_id"] = metastore_id __props__.__dict__["name"] = name __props__.__dict__["owner"] = owner @@ -421,9 +531,12 @@ def _internal_init(__self__, def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, + access_point: Optional[pulumi.Input[str]] = None, comment: Optional[pulumi.Input[str]] = None, credential_name: Optional[pulumi.Input[str]] = None, + encryption_details: Optional[pulumi.Input[pulumi.InputType['ExternalLocationEncryptionDetailsArgs']]] = None, force_destroy: Optional[pulumi.Input[bool]] = None, + force_update: Optional[pulumi.Input[bool]] = None, metastore_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, owner: Optional[pulumi.Input[str]] = None, @@ -437,11 +550,14 @@ def get(resource_name: str, :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[str] access_point: The ARN of the s3 access point to use with the external location (AWS). :param pulumi.Input[str] comment: User-supplied free-form text. - :param pulumi.Input[str] credential_name: Name of the StorageCredential to use with this External Location. + :param pulumi.Input[str] credential_name: Name of the StorageCredential to use with this external location. + :param pulumi.Input[pulumi.InputType['ExternalLocationEncryptionDetailsArgs']] encryption_details: The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). :param pulumi.Input[bool] force_destroy: Destroy external location regardless of its dependents. + :param pulumi.Input[bool] force_update: Update external location regardless of its dependents. :param pulumi.Input[str] name: Name of External Location, which must be unique within the databricks_metastore. Change forces creation of a new resource. - :param pulumi.Input[str] owner: Username/groupname/sp application_id of the external Location owner. + :param pulumi.Input[str] owner: Username/groupname/sp application_id of the external location owner. :param pulumi.Input[bool] read_only: Indicates whether the external location is read-only. :param pulumi.Input[bool] skip_validation: Suppress validation errors if any & force save the external location :param pulumi.Input[str] url: Path URL in cloud storage, of the form: `s3://[bucket-host]/[bucket-dir]` (AWS), `abfss://[user]@[host]/[path]` (Azure), `gs://[bucket-host]/[bucket-dir]` (GCP). @@ -450,9 +566,12 @@ def get(resource_name: str, __props__ = _ExternalLocationState.__new__(_ExternalLocationState) + __props__.__dict__["access_point"] = access_point __props__.__dict__["comment"] = comment __props__.__dict__["credential_name"] = credential_name + __props__.__dict__["encryption_details"] = encryption_details __props__.__dict__["force_destroy"] = force_destroy + __props__.__dict__["force_update"] = force_update __props__.__dict__["metastore_id"] = metastore_id __props__.__dict__["name"] = name __props__.__dict__["owner"] = owner @@ -461,6 +580,14 @@ def get(resource_name: str, __props__.__dict__["url"] = url return ExternalLocation(resource_name, opts=opts, __props__=__props__) + @property + @pulumi.getter(name="accessPoint") + def access_point(self) -> pulumi.Output[Optional[str]]: + """ + The ARN of the s3 access point to use with the external location (AWS). + """ + return pulumi.get(self, "access_point") + @property @pulumi.getter def comment(self) -> pulumi.Output[Optional[str]]: @@ -473,10 +600,18 @@ def comment(self) -> pulumi.Output[Optional[str]]: @pulumi.getter(name="credentialName") def credential_name(self) -> pulumi.Output[str]: """ - Name of the StorageCredential to use with this External Location. + Name of the StorageCredential to use with this external location. """ return pulumi.get(self, "credential_name") + @property + @pulumi.getter(name="encryptionDetails") + def encryption_details(self) -> pulumi.Output[Optional['outputs.ExternalLocationEncryptionDetails']]: + """ + The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). + """ + return pulumi.get(self, "encryption_details") + @property @pulumi.getter(name="forceDestroy") def force_destroy(self) -> pulumi.Output[Optional[bool]]: @@ -485,6 +620,14 @@ def force_destroy(self) -> pulumi.Output[Optional[bool]]: """ return pulumi.get(self, "force_destroy") + @property + @pulumi.getter(name="forceUpdate") + def force_update(self) -> pulumi.Output[Optional[bool]]: + """ + Update external location regardless of its dependents. + """ + return pulumi.get(self, "force_update") + @property @pulumi.getter(name="metastoreId") def metastore_id(self) -> pulumi.Output[str]: @@ -502,7 +645,7 @@ def name(self) -> pulumi.Output[str]: @pulumi.getter def owner(self) -> pulumi.Output[str]: """ - Username/groupname/sp application_id of the external Location owner. + Username/groupname/sp application_id of the external location owner. """ return pulumi.get(self, "owner") diff --git a/sdk/python/pulumi_databricks/get_current_user.py b/sdk/python/pulumi_databricks/get_current_user.py index 343692a4..9d7396f3 100644 --- a/sdk/python/pulumi_databricks/get_current_user.py +++ b/sdk/python/pulumi_databricks/get_current_user.py @@ -20,7 +20,10 @@ class GetCurrentUserResult: """ A collection of values returned by getCurrentUser. """ - def __init__(__self__, alphanumeric=None, external_id=None, home=None, id=None, repos=None, user_name=None, workspace_url=None): + def __init__(__self__, acl_principal_id=None, alphanumeric=None, external_id=None, home=None, id=None, repos=None, user_name=None, workspace_url=None): + if acl_principal_id and not isinstance(acl_principal_id, str): + raise TypeError("Expected argument 'acl_principal_id' to be a str") + pulumi.set(__self__, "acl_principal_id", acl_principal_id) if alphanumeric and not isinstance(alphanumeric, str): raise TypeError("Expected argument 'alphanumeric' to be a str") pulumi.set(__self__, "alphanumeric", alphanumeric) @@ -43,6 +46,11 @@ def __init__(__self__, alphanumeric=None, external_id=None, home=None, id=None, raise TypeError("Expected argument 'workspace_url' to be a str") pulumi.set(__self__, "workspace_url", workspace_url) + @property + @pulumi.getter(name="aclPrincipalId") + def acl_principal_id(self) -> str: + return pulumi.get(self, "acl_principal_id") + @property @pulumi.getter def alphanumeric(self) -> str: @@ -88,6 +96,7 @@ def __await__(self): if False: yield self return GetCurrentUserResult( + acl_principal_id=self.acl_principal_id, alphanumeric=self.alphanumeric, external_id=self.external_id, home=self.home, @@ -110,6 +119,7 @@ def get_current_user(opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGe * `repos` - Personal Repos location of the user, e.g. `/Repos/mr.foo@example.com`. * `alphanumeric` - Alphanumeric representation of user local name. e.g. `mr_foo`. * `workspace_url` - URL of the current Databricks workspace. + * `acl_principal_id` - identifier for use in databricks_access_control_rule_set, e.g. `users/mr.foo@example.com` if current user is user, or `servicePrincipals/00000000-0000-0000-0000-000000000000` if current user is service principal. ## Related Resources @@ -125,6 +135,7 @@ def get_current_user(opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGe __ret__ = pulumi.runtime.invoke('databricks:index/getCurrentUser:getCurrentUser', __args__, opts=opts, typ=GetCurrentUserResult).value return AwaitableGetCurrentUserResult( + acl_principal_id=pulumi.get(__ret__, 'acl_principal_id'), alphanumeric=pulumi.get(__ret__, 'alphanumeric'), external_id=pulumi.get(__ret__, 'external_id'), home=pulumi.get(__ret__, 'home'), diff --git a/sdk/python/pulumi_databricks/get_group.py b/sdk/python/pulumi_databricks/get_group.py index cb19f7c8..4c02b4ee 100644 --- a/sdk/python/pulumi_databricks/get_group.py +++ b/sdk/python/pulumi_databricks/get_group.py @@ -21,7 +21,10 @@ class GetGroupResult: """ A collection of values returned by getGroup. """ - def __init__(__self__, allow_cluster_create=None, allow_instance_pool_create=None, child_groups=None, databricks_sql_access=None, display_name=None, external_id=None, groups=None, id=None, instance_profiles=None, members=None, recursive=None, service_principals=None, users=None, workspace_access=None): + def __init__(__self__, acl_principal_id=None, allow_cluster_create=None, allow_instance_pool_create=None, child_groups=None, databricks_sql_access=None, display_name=None, external_id=None, groups=None, id=None, instance_profiles=None, members=None, recursive=None, service_principals=None, users=None, workspace_access=None): + if acl_principal_id and not isinstance(acl_principal_id, str): + raise TypeError("Expected argument 'acl_principal_id' to be a str") + pulumi.set(__self__, "acl_principal_id", acl_principal_id) if allow_cluster_create and not isinstance(allow_cluster_create, bool): raise TypeError("Expected argument 'allow_cluster_create' to be a bool") pulumi.set(__self__, "allow_cluster_create", allow_cluster_create) @@ -65,6 +68,14 @@ def __init__(__self__, allow_cluster_create=None, allow_instance_pool_create=Non raise TypeError("Expected argument 'workspace_access' to be a bool") pulumi.set(__self__, "workspace_access", workspace_access) + @property + @pulumi.getter(name="aclPrincipalId") + def acl_principal_id(self) -> str: + """ + identifier for use in databricks_access_control_rule_set, e.g. `groups/Some Group`. + """ + return pulumi.get(self, "acl_principal_id") + @property @pulumi.getter(name="allowClusterCreate") def allow_cluster_create(self) -> Optional[bool]: @@ -172,6 +183,7 @@ def __await__(self): if False: yield self return GetGroupResult( + acl_principal_id=self.acl_principal_id, allow_cluster_create=self.allow_cluster_create, allow_instance_pool_create=self.allow_instance_pool_create, child_groups=self.child_groups, @@ -188,7 +200,8 @@ def __await__(self): workspace_access=self.workspace_access) -def get_group(allow_cluster_create: Optional[bool] = None, +def get_group(acl_principal_id: Optional[str] = None, + allow_cluster_create: Optional[bool] = None, allow_instance_pool_create: Optional[bool] = None, child_groups: Optional[Sequence[str]] = None, databricks_sql_access: Optional[bool] = None, @@ -233,6 +246,7 @@ def get_group(allow_cluster_create: Optional[bool] = None, * User to [manage users](https://docs.databricks.com/administration-guide/users-groups/users.html), that could be added to Group within the workspace. + :param str acl_principal_id: identifier for use in databricks_access_control_rule_set, e.g. `groups/Some Group`. :param bool allow_cluster_create: True if group members can create clusters :param bool allow_instance_pool_create: True if group members can create instance pools :param Sequence[str] child_groups: Set of Group identifiers, that can be modified with GroupMember resource. @@ -245,6 +259,7 @@ def get_group(allow_cluster_create: Optional[bool] = None, :param Sequence[str] users: Set of User identifiers, that can be modified with GroupMember resource. """ __args__ = dict() + __args__['aclPrincipalId'] = acl_principal_id __args__['allowClusterCreate'] = allow_cluster_create __args__['allowInstancePoolCreate'] = allow_instance_pool_create __args__['childGroups'] = child_groups @@ -262,6 +277,7 @@ def get_group(allow_cluster_create: Optional[bool] = None, __ret__ = pulumi.runtime.invoke('databricks:index/getGroup:getGroup', __args__, opts=opts, typ=GetGroupResult).value return AwaitableGetGroupResult( + acl_principal_id=pulumi.get(__ret__, 'acl_principal_id'), allow_cluster_create=pulumi.get(__ret__, 'allow_cluster_create'), allow_instance_pool_create=pulumi.get(__ret__, 'allow_instance_pool_create'), child_groups=pulumi.get(__ret__, 'child_groups'), @@ -279,7 +295,8 @@ def get_group(allow_cluster_create: Optional[bool] = None, @_utilities.lift_output_func(get_group) -def get_group_output(allow_cluster_create: Optional[pulumi.Input[Optional[bool]]] = None, +def get_group_output(acl_principal_id: Optional[pulumi.Input[Optional[str]]] = None, + allow_cluster_create: Optional[pulumi.Input[Optional[bool]]] = None, allow_instance_pool_create: Optional[pulumi.Input[Optional[bool]]] = None, child_groups: Optional[pulumi.Input[Optional[Sequence[str]]]] = None, databricks_sql_access: Optional[pulumi.Input[Optional[bool]]] = None, @@ -324,6 +341,7 @@ def get_group_output(allow_cluster_create: Optional[pulumi.Input[Optional[bool]] * User to [manage users](https://docs.databricks.com/administration-guide/users-groups/users.html), that could be added to Group within the workspace. + :param str acl_principal_id: identifier for use in databricks_access_control_rule_set, e.g. `groups/Some Group`. :param bool allow_cluster_create: True if group members can create clusters :param bool allow_instance_pool_create: True if group members can create instance pools :param Sequence[str] child_groups: Set of Group identifiers, that can be modified with GroupMember resource. diff --git a/sdk/python/pulumi_databricks/get_service_principal.py b/sdk/python/pulumi_databricks/get_service_principal.py index 86c18ed7..69cef6eb 100644 --- a/sdk/python/pulumi_databricks/get_service_principal.py +++ b/sdk/python/pulumi_databricks/get_service_principal.py @@ -21,7 +21,10 @@ class GetServicePrincipalResult: """ A collection of values returned by getServicePrincipal. """ - def __init__(__self__, active=None, application_id=None, display_name=None, external_id=None, home=None, id=None, repos=None, sp_id=None): + def __init__(__self__, acl_principal_id=None, active=None, application_id=None, display_name=None, external_id=None, home=None, id=None, repos=None, sp_id=None): + if acl_principal_id and not isinstance(acl_principal_id, str): + raise TypeError("Expected argument 'acl_principal_id' to be a str") + pulumi.set(__self__, "acl_principal_id", acl_principal_id) if active and not isinstance(active, bool): raise TypeError("Expected argument 'active' to be a bool") pulumi.set(__self__, "active", active) @@ -47,6 +50,14 @@ def __init__(__self__, active=None, application_id=None, display_name=None, exte raise TypeError("Expected argument 'sp_id' to be a str") pulumi.set(__self__, "sp_id", sp_id) + @property + @pulumi.getter(name="aclPrincipalId") + def acl_principal_id(self) -> str: + """ + identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. + """ + return pulumi.get(self, "acl_principal_id") + @property @pulumi.getter def active(self) -> bool: @@ -112,6 +123,7 @@ def __await__(self): if False: yield self return GetServicePrincipalResult( + acl_principal_id=self.acl_principal_id, active=self.active, application_id=self.application_id, display_name=self.display_name, @@ -122,7 +134,8 @@ def __await__(self): sp_id=self.sp_id) -def get_service_principal(active: Optional[bool] = None, +def get_service_principal(acl_principal_id: Optional[str] = None, + active: Optional[bool] = None, application_id: Optional[str] = None, display_name: Optional[str] = None, external_id: Optional[str] = None, @@ -164,6 +177,7 @@ def get_service_principal(active: Optional[bool] = None, - databricks_service principal to manage service principals + :param str acl_principal_id: identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. :param bool active: Whether service principal is active or not. :param str application_id: ID of the service principal. The service principal must exist before this resource can be retrieved. :param str display_name: Display name of the service principal, e.g. `Foo SPN`. @@ -173,6 +187,7 @@ def get_service_principal(active: Optional[bool] = None, :param str repos: Repos location of the service principal, e.g. `/Repos/11111111-2222-3333-4444-555666777888`. """ __args__ = dict() + __args__['aclPrincipalId'] = acl_principal_id __args__['active'] = active __args__['applicationId'] = application_id __args__['displayName'] = display_name @@ -185,6 +200,7 @@ def get_service_principal(active: Optional[bool] = None, __ret__ = pulumi.runtime.invoke('databricks:index/getServicePrincipal:getServicePrincipal', __args__, opts=opts, typ=GetServicePrincipalResult).value return AwaitableGetServicePrincipalResult( + acl_principal_id=pulumi.get(__ret__, 'acl_principal_id'), active=pulumi.get(__ret__, 'active'), application_id=pulumi.get(__ret__, 'application_id'), display_name=pulumi.get(__ret__, 'display_name'), @@ -196,7 +212,8 @@ def get_service_principal(active: Optional[bool] = None, @_utilities.lift_output_func(get_service_principal) -def get_service_principal_output(active: Optional[pulumi.Input[Optional[bool]]] = None, +def get_service_principal_output(acl_principal_id: Optional[pulumi.Input[Optional[str]]] = None, + active: Optional[pulumi.Input[Optional[bool]]] = None, application_id: Optional[pulumi.Input[Optional[str]]] = None, display_name: Optional[pulumi.Input[Optional[str]]] = None, external_id: Optional[pulumi.Input[Optional[str]]] = None, @@ -238,6 +255,7 @@ def get_service_principal_output(active: Optional[pulumi.Input[Optional[bool]]] - databricks_service principal to manage service principals + :param str acl_principal_id: identifier for use in databricks_access_control_rule_set, e.g. `servicePrincipals/00000000-0000-0000-0000-000000000000`. :param bool active: Whether service principal is active or not. :param str application_id: ID of the service principal. The service principal must exist before this resource can be retrieved. :param str display_name: Display name of the service principal, e.g. `Foo SPN`. diff --git a/sdk/python/pulumi_databricks/get_user.py b/sdk/python/pulumi_databricks/get_user.py index 4079939a..9ee6dc50 100644 --- a/sdk/python/pulumi_databricks/get_user.py +++ b/sdk/python/pulumi_databricks/get_user.py @@ -21,7 +21,10 @@ class GetUserResult: """ A collection of values returned by getUser. """ - def __init__(__self__, alphanumeric=None, application_id=None, display_name=None, external_id=None, home=None, id=None, repos=None, user_id=None, user_name=None): + def __init__(__self__, acl_principal_id=None, alphanumeric=None, application_id=None, display_name=None, external_id=None, home=None, id=None, repos=None, user_id=None, user_name=None): + if acl_principal_id and not isinstance(acl_principal_id, str): + raise TypeError("Expected argument 'acl_principal_id' to be a str") + pulumi.set(__self__, "acl_principal_id", acl_principal_id) if alphanumeric and not isinstance(alphanumeric, str): raise TypeError("Expected argument 'alphanumeric' to be a str") pulumi.set(__self__, "alphanumeric", alphanumeric) @@ -50,6 +53,14 @@ def __init__(__self__, alphanumeric=None, application_id=None, display_name=None raise TypeError("Expected argument 'user_name' to be a str") pulumi.set(__self__, "user_name", user_name) + @property + @pulumi.getter(name="aclPrincipalId") + def acl_principal_id(self) -> str: + """ + identifier for use in databricks_access_control_rule_set, e.g. `users/mr.foo@example.com`. + """ + return pulumi.get(self, "acl_principal_id") + @property @pulumi.getter def alphanumeric(self) -> str: @@ -123,6 +134,7 @@ def __await__(self): if False: yield self return GetUserResult( + acl_principal_id=self.acl_principal_id, alphanumeric=self.alphanumeric, application_id=self.application_id, display_name=self.display_name, @@ -181,6 +193,7 @@ def get_user(user_id: Optional[str] = None, __ret__ = pulumi.runtime.invoke('databricks:index/getUser:getUser', __args__, opts=opts, typ=GetUserResult).value return AwaitableGetUserResult( + acl_principal_id=pulumi.get(__ret__, 'acl_principal_id'), alphanumeric=pulumi.get(__ret__, 'alphanumeric'), application_id=pulumi.get(__ret__, 'application_id'), display_name=pulumi.get(__ret__, 'display_name'), diff --git a/sdk/python/pulumi_databricks/grants.py b/sdk/python/pulumi_databricks/grants.py index 1080afa3..4e626284 100644 --- a/sdk/python/pulumi_databricks/grants.py +++ b/sdk/python/pulumi_databricks/grants.py @@ -19,6 +19,7 @@ def __init__(__self__, *, grants: pulumi.Input[Sequence[pulumi.Input['GrantsGrantArgs']]], catalog: Optional[pulumi.Input[str]] = None, external_location: Optional[pulumi.Input[str]] = None, + foreign_connection: Optional[pulumi.Input[str]] = None, function: Optional[pulumi.Input[str]] = None, materialized_view: Optional[pulumi.Input[str]] = None, metastore: Optional[pulumi.Input[str]] = None, @@ -36,6 +37,8 @@ def __init__(__self__, *, pulumi.set(__self__, "catalog", catalog) if external_location is not None: pulumi.set(__self__, "external_location", external_location) + if foreign_connection is not None: + pulumi.set(__self__, "foreign_connection", foreign_connection) if function is not None: pulumi.set(__self__, "function", function) if materialized_view is not None: @@ -82,6 +85,15 @@ def external_location(self) -> Optional[pulumi.Input[str]]: def external_location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "external_location", value) + @property + @pulumi.getter(name="foreignConnection") + def foreign_connection(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "foreign_connection") + + @foreign_connection.setter + def foreign_connection(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "foreign_connection", value) + @property @pulumi.getter def function(self) -> Optional[pulumi.Input[str]]: @@ -169,6 +181,7 @@ class _GrantsState: def __init__(__self__, *, catalog: Optional[pulumi.Input[str]] = None, external_location: Optional[pulumi.Input[str]] = None, + foreign_connection: Optional[pulumi.Input[str]] = None, function: Optional[pulumi.Input[str]] = None, grants: Optional[pulumi.Input[Sequence[pulumi.Input['GrantsGrantArgs']]]] = None, materialized_view: Optional[pulumi.Input[str]] = None, @@ -186,6 +199,8 @@ def __init__(__self__, *, pulumi.set(__self__, "catalog", catalog) if external_location is not None: pulumi.set(__self__, "external_location", external_location) + if foreign_connection is not None: + pulumi.set(__self__, "foreign_connection", foreign_connection) if function is not None: pulumi.set(__self__, "function", function) if grants is not None: @@ -225,6 +240,15 @@ def external_location(self) -> Optional[pulumi.Input[str]]: def external_location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "external_location", value) + @property + @pulumi.getter(name="foreignConnection") + def foreign_connection(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "foreign_connection") + + @foreign_connection.setter + def foreign_connection(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "foreign_connection", value) + @property @pulumi.getter def function(self) -> Optional[pulumi.Input[str]]: @@ -323,6 +347,7 @@ def __init__(__self__, opts: Optional[pulumi.ResourceOptions] = None, catalog: Optional[pulumi.Input[str]] = None, external_location: Optional[pulumi.Input[str]] = None, + foreign_connection: Optional[pulumi.Input[str]] = None, function: Optional[pulumi.Input[str]] = None, grants: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GrantsGrantArgs']]]]] = None, materialized_view: Optional[pulumi.Input[str]] = None, @@ -364,6 +389,7 @@ def _internal_init(__self__, opts: Optional[pulumi.ResourceOptions] = None, catalog: Optional[pulumi.Input[str]] = None, external_location: Optional[pulumi.Input[str]] = None, + foreign_connection: Optional[pulumi.Input[str]] = None, function: Optional[pulumi.Input[str]] = None, grants: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GrantsGrantArgs']]]]] = None, materialized_view: Optional[pulumi.Input[str]] = None, @@ -385,6 +411,7 @@ def _internal_init(__self__, __props__.__dict__["catalog"] = catalog __props__.__dict__["external_location"] = external_location + __props__.__dict__["foreign_connection"] = foreign_connection __props__.__dict__["function"] = function if grants is None and not opts.urn: raise TypeError("Missing required property 'grants'") @@ -409,6 +436,7 @@ def get(resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, catalog: Optional[pulumi.Input[str]] = None, external_location: Optional[pulumi.Input[str]] = None, + foreign_connection: Optional[pulumi.Input[str]] = None, function: Optional[pulumi.Input[str]] = None, grants: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GrantsGrantArgs']]]]] = None, materialized_view: Optional[pulumi.Input[str]] = None, @@ -433,6 +461,7 @@ def get(resource_name: str, __props__.__dict__["catalog"] = catalog __props__.__dict__["external_location"] = external_location + __props__.__dict__["foreign_connection"] = foreign_connection __props__.__dict__["function"] = function __props__.__dict__["grants"] = grants __props__.__dict__["materialized_view"] = materialized_view @@ -455,6 +484,11 @@ def catalog(self) -> pulumi.Output[Optional[str]]: def external_location(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "external_location") + @property + @pulumi.getter(name="foreignConnection") + def foreign_connection(self) -> pulumi.Output[Optional[str]]: + return pulumi.get(self, "foreign_connection") + @property @pulumi.getter def function(self) -> pulumi.Output[Optional[str]]: diff --git a/sdk/python/pulumi_databricks/metastore.py b/sdk/python/pulumi_databricks/metastore.py index a731cd09..fb021f63 100644 --- a/sdk/python/pulumi_databricks/metastore.py +++ b/sdk/python/pulumi_databricks/metastore.py @@ -24,9 +24,11 @@ def __init__(__self__, *, delta_sharing_scope: Optional[pulumi.Input[str]] = None, force_destroy: Optional[pulumi.Input[bool]] = None, global_metastore_id: Optional[pulumi.Input[str]] = None, + metastore_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, owner: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, + storage_root_credential_id: Optional[pulumi.Input[str]] = None, updated_at: Optional[pulumi.Input[int]] = None, updated_by: Optional[pulumi.Input[str]] = None): """ @@ -38,6 +40,7 @@ def __init__(__self__, *, :param pulumi.Input[bool] force_destroy: Destroy metastore regardless of its contents. :param pulumi.Input[str] name: Name of metastore. :param pulumi.Input[str] owner: Username/groupname/sp application_id of the metastore owner. + :param pulumi.Input[str] region: The region of the metastore """ pulumi.set(__self__, "storage_root", storage_root) if cloud is not None: @@ -58,12 +61,16 @@ def __init__(__self__, *, pulumi.set(__self__, "force_destroy", force_destroy) if global_metastore_id is not None: pulumi.set(__self__, "global_metastore_id", global_metastore_id) + if metastore_id is not None: + pulumi.set(__self__, "metastore_id", metastore_id) if name is not None: pulumi.set(__self__, "name", name) if owner is not None: pulumi.set(__self__, "owner", owner) if region is not None: pulumi.set(__self__, "region", region) + if storage_root_credential_id is not None: + pulumi.set(__self__, "storage_root_credential_id", storage_root_credential_id) if updated_at is not None: pulumi.set(__self__, "updated_at", updated_at) if updated_by is not None: @@ -174,6 +181,15 @@ def global_metastore_id(self) -> Optional[pulumi.Input[str]]: def global_metastore_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "global_metastore_id", value) + @property + @pulumi.getter(name="metastoreId") + def metastore_id(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "metastore_id") + + @metastore_id.setter + def metastore_id(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "metastore_id", value) + @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: @@ -201,12 +217,24 @@ def owner(self, value: Optional[pulumi.Input[str]]): @property @pulumi.getter def region(self) -> Optional[pulumi.Input[str]]: + """ + The region of the metastore + """ return pulumi.get(self, "region") @region.setter def region(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "region", value) + @property + @pulumi.getter(name="storageRootCredentialId") + def storage_root_credential_id(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "storage_root_credential_id") + + @storage_root_credential_id.setter + def storage_root_credential_id(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "storage_root_credential_id", value) + @property @pulumi.getter(name="updatedAt") def updated_at(self) -> Optional[pulumi.Input[int]]: @@ -238,10 +266,12 @@ def __init__(__self__, *, delta_sharing_scope: Optional[pulumi.Input[str]] = None, force_destroy: Optional[pulumi.Input[bool]] = None, global_metastore_id: Optional[pulumi.Input[str]] = None, + metastore_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, owner: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, storage_root: Optional[pulumi.Input[str]] = None, + storage_root_credential_id: Optional[pulumi.Input[str]] = None, updated_at: Optional[pulumi.Input[int]] = None, updated_by: Optional[pulumi.Input[str]] = None): """ @@ -252,6 +282,7 @@ def __init__(__self__, *, :param pulumi.Input[bool] force_destroy: Destroy metastore regardless of its contents. :param pulumi.Input[str] name: Name of metastore. :param pulumi.Input[str] owner: Username/groupname/sp application_id of the metastore owner. + :param pulumi.Input[str] region: The region of the metastore :param pulumi.Input[str] storage_root: Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. """ if cloud is not None: @@ -272,6 +303,8 @@ def __init__(__self__, *, pulumi.set(__self__, "force_destroy", force_destroy) if global_metastore_id is not None: pulumi.set(__self__, "global_metastore_id", global_metastore_id) + if metastore_id is not None: + pulumi.set(__self__, "metastore_id", metastore_id) if name is not None: pulumi.set(__self__, "name", name) if owner is not None: @@ -280,6 +313,8 @@ def __init__(__self__, *, pulumi.set(__self__, "region", region) if storage_root is not None: pulumi.set(__self__, "storage_root", storage_root) + if storage_root_credential_id is not None: + pulumi.set(__self__, "storage_root_credential_id", storage_root_credential_id) if updated_at is not None: pulumi.set(__self__, "updated_at", updated_at) if updated_by is not None: @@ -378,6 +413,15 @@ def global_metastore_id(self) -> Optional[pulumi.Input[str]]: def global_metastore_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "global_metastore_id", value) + @property + @pulumi.getter(name="metastoreId") + def metastore_id(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "metastore_id") + + @metastore_id.setter + def metastore_id(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "metastore_id", value) + @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: @@ -405,6 +449,9 @@ def owner(self, value: Optional[pulumi.Input[str]]): @property @pulumi.getter def region(self) -> Optional[pulumi.Input[str]]: + """ + The region of the metastore + """ return pulumi.get(self, "region") @region.setter @@ -423,6 +470,15 @@ def storage_root(self) -> Optional[pulumi.Input[str]]: def storage_root(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "storage_root", value) + @property + @pulumi.getter(name="storageRootCredentialId") + def storage_root_credential_id(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "storage_root_credential_id") + + @storage_root_credential_id.setter + def storage_root_credential_id(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "storage_root_credential_id", value) + @property @pulumi.getter(name="updatedAt") def updated_at(self) -> Optional[pulumi.Input[int]]: @@ -456,17 +512,16 @@ def __init__(__self__, delta_sharing_scope: Optional[pulumi.Input[str]] = None, force_destroy: Optional[pulumi.Input[bool]] = None, global_metastore_id: Optional[pulumi.Input[str]] = None, + metastore_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, owner: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, storage_root: Optional[pulumi.Input[str]] = None, + storage_root_credential_id: Optional[pulumi.Input[str]] = None, updated_at: Optional[pulumi.Input[int]] = None, updated_by: Optional[pulumi.Input[str]] = None, __props__=None): """ - > **Notes** - Unity Catalog APIs are accessible via **workspace-level APIs**. This design may change in the future. - A metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces in order to control which workloads use each metastore. Unity Catalog offers a new metastore with built in security and auditing. This is distinct to the metastore used in previous versions of Databricks (based on the Hive Metastore). @@ -487,6 +542,7 @@ def __init__(__self__, :param pulumi.Input[bool] force_destroy: Destroy metastore regardless of its contents. :param pulumi.Input[str] name: Name of metastore. :param pulumi.Input[str] owner: Username/groupname/sp application_id of the metastore owner. + :param pulumi.Input[str] region: The region of the metastore :param pulumi.Input[str] storage_root: Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. """ ... @@ -496,9 +552,6 @@ def __init__(__self__, args: MetastoreArgs, opts: Optional[pulumi.ResourceOptions] = None): """ - > **Notes** - Unity Catalog APIs are accessible via **workspace-level APIs**. This design may change in the future. - A metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces in order to control which workloads use each metastore. Unity Catalog offers a new metastore with built in security and auditing. This is distinct to the metastore used in previous versions of Databricks (based on the Hive Metastore). @@ -535,10 +588,12 @@ def _internal_init(__self__, delta_sharing_scope: Optional[pulumi.Input[str]] = None, force_destroy: Optional[pulumi.Input[bool]] = None, global_metastore_id: Optional[pulumi.Input[str]] = None, + metastore_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, owner: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, storage_root: Optional[pulumi.Input[str]] = None, + storage_root_credential_id: Optional[pulumi.Input[str]] = None, updated_at: Optional[pulumi.Input[int]] = None, updated_by: Optional[pulumi.Input[str]] = None, __props__=None): @@ -559,12 +614,14 @@ def _internal_init(__self__, __props__.__dict__["delta_sharing_scope"] = delta_sharing_scope __props__.__dict__["force_destroy"] = force_destroy __props__.__dict__["global_metastore_id"] = global_metastore_id + __props__.__dict__["metastore_id"] = metastore_id __props__.__dict__["name"] = name __props__.__dict__["owner"] = owner __props__.__dict__["region"] = region if storage_root is None and not opts.urn: raise TypeError("Missing required property 'storage_root'") __props__.__dict__["storage_root"] = storage_root + __props__.__dict__["storage_root_credential_id"] = storage_root_credential_id __props__.__dict__["updated_at"] = updated_at __props__.__dict__["updated_by"] = updated_by super(Metastore, __self__).__init__( @@ -586,10 +643,12 @@ def get(resource_name: str, delta_sharing_scope: Optional[pulumi.Input[str]] = None, force_destroy: Optional[pulumi.Input[bool]] = None, global_metastore_id: Optional[pulumi.Input[str]] = None, + metastore_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, owner: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, storage_root: Optional[pulumi.Input[str]] = None, + storage_root_credential_id: Optional[pulumi.Input[str]] = None, updated_at: Optional[pulumi.Input[int]] = None, updated_by: Optional[pulumi.Input[str]] = None) -> 'Metastore': """ @@ -605,6 +664,7 @@ def get(resource_name: str, :param pulumi.Input[bool] force_destroy: Destroy metastore regardless of its contents. :param pulumi.Input[str] name: Name of metastore. :param pulumi.Input[str] owner: Username/groupname/sp application_id of the metastore owner. + :param pulumi.Input[str] region: The region of the metastore :param pulumi.Input[str] storage_root: Path on cloud storage account, where managed `Table` are stored. Change forces creation of a new resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) @@ -620,10 +680,12 @@ def get(resource_name: str, __props__.__dict__["delta_sharing_scope"] = delta_sharing_scope __props__.__dict__["force_destroy"] = force_destroy __props__.__dict__["global_metastore_id"] = global_metastore_id + __props__.__dict__["metastore_id"] = metastore_id __props__.__dict__["name"] = name __props__.__dict__["owner"] = owner __props__.__dict__["region"] = region __props__.__dict__["storage_root"] = storage_root + __props__.__dict__["storage_root_credential_id"] = storage_root_credential_id __props__.__dict__["updated_at"] = updated_at __props__.__dict__["updated_by"] = updated_by return Metastore(resource_name, opts=opts, __props__=__props__) @@ -685,6 +747,11 @@ def force_destroy(self) -> pulumi.Output[Optional[bool]]: def global_metastore_id(self) -> pulumi.Output[str]: return pulumi.get(self, "global_metastore_id") + @property + @pulumi.getter(name="metastoreId") + def metastore_id(self) -> pulumi.Output[str]: + return pulumi.get(self, "metastore_id") + @property @pulumi.getter def name(self) -> pulumi.Output[str]: @@ -704,6 +771,9 @@ def owner(self) -> pulumi.Output[str]: @property @pulumi.getter def region(self) -> pulumi.Output[str]: + """ + The region of the metastore + """ return pulumi.get(self, "region") @property @@ -714,6 +784,11 @@ def storage_root(self) -> pulumi.Output[str]: """ return pulumi.get(self, "storage_root") + @property + @pulumi.getter(name="storageRootCredentialId") + def storage_root_credential_id(self) -> pulumi.Output[Optional[str]]: + return pulumi.get(self, "storage_root_credential_id") + @property @pulumi.getter(name="updatedAt") def updated_at(self) -> pulumi.Output[int]: diff --git a/sdk/python/pulumi_databricks/metastore_assignment.py b/sdk/python/pulumi_databricks/metastore_assignment.py index 8f11e724..8d6fa398 100644 --- a/sdk/python/pulumi_databricks/metastore_assignment.py +++ b/sdk/python/pulumi_databricks/metastore_assignment.py @@ -142,6 +142,7 @@ def __init__(__self__, this_metastore = databricks.Metastore("thisMetastore", storage_root=f"s3://{aws_s3_bucket['metastore']['id']}/metastore", owner="uc admins", + region="us-east-1", force_destroy=True) this_metastore_assignment = databricks.MetastoreAssignment("thisMetastoreAssignment", metastore_id=this_metastore.id, @@ -172,6 +173,7 @@ def __init__(__self__, this_metastore = databricks.Metastore("thisMetastore", storage_root=f"s3://{aws_s3_bucket['metastore']['id']}/metastore", owner="uc admins", + region="us-east-1", force_destroy=True) this_metastore_assignment = databricks.MetastoreAssignment("thisMetastoreAssignment", metastore_id=this_metastore.id, diff --git a/sdk/python/pulumi_databricks/mlflow_model.py b/sdk/python/pulumi_databricks/mlflow_model.py index 56fcd622..f27c4cb5 100644 --- a/sdk/python/pulumi_databricks/mlflow_model.py +++ b/sdk/python/pulumi_databricks/mlflow_model.py @@ -20,7 +20,6 @@ def __init__(__self__, *, description: Optional[pulumi.Input[str]] = None, last_updated_timestamp: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, - registered_model_id: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Sequence[pulumi.Input['MlflowModelTagArgs']]]] = None, user_id: Optional[pulumi.Input[str]] = None): """ @@ -37,8 +36,6 @@ def __init__(__self__, *, pulumi.set(__self__, "last_updated_timestamp", last_updated_timestamp) if name is not None: pulumi.set(__self__, "name", name) - if registered_model_id is not None: - pulumi.set(__self__, "registered_model_id", registered_model_id) if tags is not None: pulumi.set(__self__, "tags", tags) if user_id is not None: @@ -86,15 +83,6 @@ def name(self) -> Optional[pulumi.Input[str]]: def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) - @property - @pulumi.getter(name="registeredModelId") - def registered_model_id(self) -> Optional[pulumi.Input[str]]: - return pulumi.get(self, "registered_model_id") - - @registered_model_id.setter - def registered_model_id(self, value: Optional[pulumi.Input[str]]): - pulumi.set(self, "registered_model_id", value) - @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MlflowModelTagArgs']]]]: @@ -124,7 +112,6 @@ def __init__(__self__, *, description: Optional[pulumi.Input[str]] = None, last_updated_timestamp: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, - registered_model_id: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Sequence[pulumi.Input['MlflowModelTagArgs']]]] = None, user_id: Optional[pulumi.Input[str]] = None): """ @@ -141,8 +128,6 @@ def __init__(__self__, *, pulumi.set(__self__, "last_updated_timestamp", last_updated_timestamp) if name is not None: pulumi.set(__self__, "name", name) - if registered_model_id is not None: - pulumi.set(__self__, "registered_model_id", registered_model_id) if tags is not None: pulumi.set(__self__, "tags", tags) if user_id is not None: @@ -190,15 +175,6 @@ def name(self) -> Optional[pulumi.Input[str]]: def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) - @property - @pulumi.getter(name="registeredModelId") - def registered_model_id(self) -> Optional[pulumi.Input[str]]: - return pulumi.get(self, "registered_model_id") - - @registered_model_id.setter - def registered_model_id(self, value: Optional[pulumi.Input[str]]): - pulumi.set(self, "registered_model_id", value) - @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MlflowModelTagArgs']]]]: @@ -230,7 +206,6 @@ def __init__(__self__, description: Optional[pulumi.Input[str]] = None, last_updated_timestamp: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, - registered_model_id: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MlflowModelTagArgs']]]]] = None, user_id: Optional[pulumi.Input[str]] = None, __props__=None): @@ -357,7 +332,6 @@ def _internal_init(__self__, description: Optional[pulumi.Input[str]] = None, last_updated_timestamp: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, - registered_model_id: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MlflowModelTagArgs']]]]] = None, user_id: Optional[pulumi.Input[str]] = None, __props__=None): @@ -373,7 +347,6 @@ def _internal_init(__self__, __props__.__dict__["description"] = description __props__.__dict__["last_updated_timestamp"] = last_updated_timestamp __props__.__dict__["name"] = name - __props__.__dict__["registered_model_id"] = registered_model_id __props__.__dict__["tags"] = tags __props__.__dict__["user_id"] = user_id super(MlflowModel, __self__).__init__( @@ -390,7 +363,6 @@ def get(resource_name: str, description: Optional[pulumi.Input[str]] = None, last_updated_timestamp: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, - registered_model_id: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MlflowModelTagArgs']]]]] = None, user_id: Optional[pulumi.Input[str]] = None) -> 'MlflowModel': """ @@ -412,14 +384,13 @@ def get(resource_name: str, __props__.__dict__["description"] = description __props__.__dict__["last_updated_timestamp"] = last_updated_timestamp __props__.__dict__["name"] = name - __props__.__dict__["registered_model_id"] = registered_model_id __props__.__dict__["tags"] = tags __props__.__dict__["user_id"] = user_id return MlflowModel(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="creationTimestamp") - def creation_timestamp(self) -> pulumi.Output[int]: + def creation_timestamp(self) -> pulumi.Output[Optional[int]]: return pulumi.get(self, "creation_timestamp") @property @@ -432,7 +403,7 @@ def description(self) -> pulumi.Output[Optional[str]]: @property @pulumi.getter(name="lastUpdatedTimestamp") - def last_updated_timestamp(self) -> pulumi.Output[int]: + def last_updated_timestamp(self) -> pulumi.Output[Optional[int]]: return pulumi.get(self, "last_updated_timestamp") @property @@ -443,11 +414,6 @@ def name(self) -> pulumi.Output[str]: """ return pulumi.get(self, "name") - @property - @pulumi.getter(name="registeredModelId") - def registered_model_id(self) -> pulumi.Output[str]: - return pulumi.get(self, "registered_model_id") - @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Sequence['outputs.MlflowModelTag']]]: @@ -458,6 +424,6 @@ def tags(self) -> pulumi.Output[Optional[Sequence['outputs.MlflowModelTag']]]: @property @pulumi.getter(name="userId") - def user_id(self) -> pulumi.Output[str]: + def user_id(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "user_id") diff --git a/sdk/python/pulumi_databricks/mws_customer_managed_keys.py b/sdk/python/pulumi_databricks/mws_customer_managed_keys.py index b1b716b1..e3ede4ca 100644 --- a/sdk/python/pulumi_databricks/mws_customer_managed_keys.py +++ b/sdk/python/pulumi_databricks/mws_customer_managed_keys.py @@ -26,9 +26,10 @@ def __init__(__self__, *, The set of arguments for constructing a MwsCustomerManagedKeys resource. :param pulumi.Input[str] account_id: Account Id that could be found in the bottom left corner of [Accounts Console](https://accounts.cloud.databricks.com/) :param pulumi.Input[Sequence[pulumi.Input[str]]] use_cases: *(since v0.3.4)* List of use cases for which this key will be used. *If you've used the resource before, please add `use_cases = ["MANAGED_SERVICES"]` to keep the previous behaviour.* Possible values are: - :param pulumi.Input['MwsCustomerManagedKeysAwsKeyInfoArgs'] aws_key_info: This field is a block and is documented below. + :param pulumi.Input['MwsCustomerManagedKeysAwsKeyInfoArgs'] aws_key_info: This field is a block and is documented below. This conflicts with `gcp_key_info` :param pulumi.Input[int] creation_time: (Integer) Time in epoch milliseconds when the customer key was created. :param pulumi.Input[str] customer_managed_key_id: (String) ID of the encryption key configuration object. + :param pulumi.Input['MwsCustomerManagedKeysGcpKeyInfoArgs'] gcp_key_info: This field is a block and is documented below. This conflicts with `aws_key_info` """ pulumi.set(__self__, "account_id", account_id) pulumi.set(__self__, "use_cases", use_cases) @@ -69,7 +70,7 @@ def use_cases(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]): @pulumi.getter(name="awsKeyInfo") def aws_key_info(self) -> Optional[pulumi.Input['MwsCustomerManagedKeysAwsKeyInfoArgs']]: """ - This field is a block and is documented below. + This field is a block and is documented below. This conflicts with `gcp_key_info` """ return pulumi.get(self, "aws_key_info") @@ -104,6 +105,9 @@ def customer_managed_key_id(self, value: Optional[pulumi.Input[str]]): @property @pulumi.getter(name="gcpKeyInfo") def gcp_key_info(self) -> Optional[pulumi.Input['MwsCustomerManagedKeysGcpKeyInfoArgs']]: + """ + This field is a block and is documented below. This conflicts with `aws_key_info` + """ return pulumi.get(self, "gcp_key_info") @gcp_key_info.setter @@ -123,9 +127,10 @@ def __init__(__self__, *, """ Input properties used for looking up and filtering MwsCustomerManagedKeys resources. :param pulumi.Input[str] account_id: Account Id that could be found in the bottom left corner of [Accounts Console](https://accounts.cloud.databricks.com/) - :param pulumi.Input['MwsCustomerManagedKeysAwsKeyInfoArgs'] aws_key_info: This field is a block and is documented below. + :param pulumi.Input['MwsCustomerManagedKeysAwsKeyInfoArgs'] aws_key_info: This field is a block and is documented below. This conflicts with `gcp_key_info` :param pulumi.Input[int] creation_time: (Integer) Time in epoch milliseconds when the customer key was created. :param pulumi.Input[str] customer_managed_key_id: (String) ID of the encryption key configuration object. + :param pulumi.Input['MwsCustomerManagedKeysGcpKeyInfoArgs'] gcp_key_info: This field is a block and is documented below. This conflicts with `aws_key_info` :param pulumi.Input[Sequence[pulumi.Input[str]]] use_cases: *(since v0.3.4)* List of use cases for which this key will be used. *If you've used the resource before, please add `use_cases = ["MANAGED_SERVICES"]` to keep the previous behaviour.* Possible values are: """ if account_id is not None: @@ -157,7 +162,7 @@ def account_id(self, value: Optional[pulumi.Input[str]]): @pulumi.getter(name="awsKeyInfo") def aws_key_info(self) -> Optional[pulumi.Input['MwsCustomerManagedKeysAwsKeyInfoArgs']]: """ - This field is a block and is documented below. + This field is a block and is documented below. This conflicts with `gcp_key_info` """ return pulumi.get(self, "aws_key_info") @@ -192,6 +197,9 @@ def customer_managed_key_id(self, value: Optional[pulumi.Input[str]]): @property @pulumi.getter(name="gcpKeyInfo") def gcp_key_info(self) -> Optional[pulumi.Input['MwsCustomerManagedKeysGcpKeyInfoArgs']]: + """ + This field is a block and is documented below. This conflicts with `aws_key_info` + """ return pulumi.get(self, "gcp_key_info") @gcp_key_info.setter @@ -227,9 +235,11 @@ def __init__(__self__, ## Example Usage > **Note** If you've used the resource before, please add `use_cases = ["MANAGED_SERVICES"]` to keep the previous behaviour. + ### Customer-managed key for managed services You must configure this during workspace creation + ### For AWS ```python import pulumi @@ -275,7 +285,24 @@ def __init__(__self__, ), use_cases=["MANAGED_SERVICES"]) ``` + ### For GCP + + ```python + import pulumi + import pulumi_databricks as databricks + + config = pulumi.Config() + databricks_account_id = config.require_object("databricksAccountId") + cmek_resource_id = config.require_object("cmekResourceId") + managed_services = databricks.MwsCustomerManagedKeys("managedServices", + account_id=databricks_account_id, + gcp_key_info=databricks.MwsCustomerManagedKeysGcpKeyInfoArgs( + kms_key_id=cmek_resource_id, + ), + use_cases=["MANAGED_SERVICES"]) + ``` ### Customer-managed key for workspace storage + ### For AWS ```python import pulumi @@ -363,6 +390,22 @@ def __init__(__self__, ), use_cases=["STORAGE"]) ``` + ### For GCP + + ```python + import pulumi + import pulumi_databricks as databricks + + config = pulumi.Config() + databricks_account_id = config.require_object("databricksAccountId") + cmek_resource_id = config.require_object("cmekResourceId") + storage = databricks.MwsCustomerManagedKeys("storage", + account_id=databricks_account_id, + gcp_key_info=databricks.MwsCustomerManagedKeysGcpKeyInfoArgs( + kms_key_id=cmek_resource_id, + ), + use_cases=["STORAGE"]) + ``` ## Related Resources The following resources are used in the same context: @@ -381,9 +424,10 @@ def __init__(__self__, :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] account_id: Account Id that could be found in the bottom left corner of [Accounts Console](https://accounts.cloud.databricks.com/) - :param pulumi.Input[pulumi.InputType['MwsCustomerManagedKeysAwsKeyInfoArgs']] aws_key_info: This field is a block and is documented below. + :param pulumi.Input[pulumi.InputType['MwsCustomerManagedKeysAwsKeyInfoArgs']] aws_key_info: This field is a block and is documented below. This conflicts with `gcp_key_info` :param pulumi.Input[int] creation_time: (Integer) Time in epoch milliseconds when the customer key was created. :param pulumi.Input[str] customer_managed_key_id: (String) ID of the encryption key configuration object. + :param pulumi.Input[pulumi.InputType['MwsCustomerManagedKeysGcpKeyInfoArgs']] gcp_key_info: This field is a block and is documented below. This conflicts with `aws_key_info` :param pulumi.Input[Sequence[pulumi.Input[str]]] use_cases: *(since v0.3.4)* List of use cases for which this key will be used. *If you've used the resource before, please add `use_cases = ["MANAGED_SERVICES"]` to keep the previous behaviour.* Possible values are: """ ... @@ -396,9 +440,11 @@ def __init__(__self__, ## Example Usage > **Note** If you've used the resource before, please add `use_cases = ["MANAGED_SERVICES"]` to keep the previous behaviour. + ### Customer-managed key for managed services You must configure this during workspace creation + ### For AWS ```python import pulumi @@ -444,7 +490,24 @@ def __init__(__self__, ), use_cases=["MANAGED_SERVICES"]) ``` + ### For GCP + + ```python + import pulumi + import pulumi_databricks as databricks + + config = pulumi.Config() + databricks_account_id = config.require_object("databricksAccountId") + cmek_resource_id = config.require_object("cmekResourceId") + managed_services = databricks.MwsCustomerManagedKeys("managedServices", + account_id=databricks_account_id, + gcp_key_info=databricks.MwsCustomerManagedKeysGcpKeyInfoArgs( + kms_key_id=cmek_resource_id, + ), + use_cases=["MANAGED_SERVICES"]) + ``` ### Customer-managed key for workspace storage + ### For AWS ```python import pulumi @@ -532,6 +595,22 @@ def __init__(__self__, ), use_cases=["STORAGE"]) ``` + ### For GCP + + ```python + import pulumi + import pulumi_databricks as databricks + + config = pulumi.Config() + databricks_account_id = config.require_object("databricksAccountId") + cmek_resource_id = config.require_object("cmekResourceId") + storage = databricks.MwsCustomerManagedKeys("storage", + account_id=databricks_account_id, + gcp_key_info=databricks.MwsCustomerManagedKeysGcpKeyInfoArgs( + kms_key_id=cmek_resource_id, + ), + use_cases=["STORAGE"]) + ``` ## Related Resources The following resources are used in the same context: @@ -611,9 +690,10 @@ def get(resource_name: str, :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] account_id: Account Id that could be found in the bottom left corner of [Accounts Console](https://accounts.cloud.databricks.com/) - :param pulumi.Input[pulumi.InputType['MwsCustomerManagedKeysAwsKeyInfoArgs']] aws_key_info: This field is a block and is documented below. + :param pulumi.Input[pulumi.InputType['MwsCustomerManagedKeysAwsKeyInfoArgs']] aws_key_info: This field is a block and is documented below. This conflicts with `gcp_key_info` :param pulumi.Input[int] creation_time: (Integer) Time in epoch milliseconds when the customer key was created. :param pulumi.Input[str] customer_managed_key_id: (String) ID of the encryption key configuration object. + :param pulumi.Input[pulumi.InputType['MwsCustomerManagedKeysGcpKeyInfoArgs']] gcp_key_info: This field is a block and is documented below. This conflicts with `aws_key_info` :param pulumi.Input[Sequence[pulumi.Input[str]]] use_cases: *(since v0.3.4)* List of use cases for which this key will be used. *If you've used the resource before, please add `use_cases = ["MANAGED_SERVICES"]` to keep the previous behaviour.* Possible values are: """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) @@ -640,7 +720,7 @@ def account_id(self) -> pulumi.Output[str]: @pulumi.getter(name="awsKeyInfo") def aws_key_info(self) -> pulumi.Output[Optional['outputs.MwsCustomerManagedKeysAwsKeyInfo']]: """ - This field is a block and is documented below. + This field is a block and is documented below. This conflicts with `gcp_key_info` """ return pulumi.get(self, "aws_key_info") @@ -663,6 +743,9 @@ def customer_managed_key_id(self) -> pulumi.Output[str]: @property @pulumi.getter(name="gcpKeyInfo") def gcp_key_info(self) -> pulumi.Output[Optional['outputs.MwsCustomerManagedKeysGcpKeyInfo']]: + """ + This field is a block and is documented below. This conflicts with `aws_key_info` + """ return pulumi.get(self, "gcp_key_info") @property diff --git a/sdk/python/pulumi_databricks/mws_workspaces.py b/sdk/python/pulumi_databricks/mws_workspaces.py index e6a100db..31b2861d 100644 --- a/sdk/python/pulumi_databricks/mws_workspaces.py +++ b/sdk/python/pulumi_databricks/mws_workspaces.py @@ -55,6 +55,7 @@ def __init__(__self__, *, :param pulumi.Input[str] network_id: `network_id` from networks. :param pulumi.Input[str] private_access_settings_id: Canonical unique identifier of MwsPrivateAccessSettings in Databricks Account. :param pulumi.Input[str] storage_configuration_id: `storage_configuration_id` from storage configuration. + :param pulumi.Input[str] storage_customer_managed_key_id: `customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. :param pulumi.Input[int] workspace_id: (String) workspace id :param pulumi.Input[str] workspace_status: (String) workspace status :param pulumi.Input[str] workspace_status_message: (String) updates on workspace status @@ -325,6 +326,9 @@ def storage_configuration_id(self, value: Optional[pulumi.Input[str]]): @property @pulumi.getter(name="storageCustomerManagedKeyId") def storage_customer_managed_key_id(self) -> Optional[pulumi.Input[str]]: + """ + `customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. + """ return pulumi.get(self, "storage_customer_managed_key_id") @storage_customer_managed_key_id.setter @@ -430,6 +434,7 @@ def __init__(__self__, *, :param pulumi.Input[str] network_id: `network_id` from networks. :param pulumi.Input[str] private_access_settings_id: Canonical unique identifier of MwsPrivateAccessSettings in Databricks Account. :param pulumi.Input[str] storage_configuration_id: `storage_configuration_id` from storage configuration. + :param pulumi.Input[str] storage_customer_managed_key_id: `customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. :param pulumi.Input[int] workspace_id: (String) workspace id :param pulumi.Input[str] workspace_name: name of the workspace, will appear on UI. :param pulumi.Input[str] workspace_status: (String) workspace status @@ -691,6 +696,9 @@ def storage_configuration_id(self, value: Optional[pulumi.Input[str]]): @property @pulumi.getter(name="storageCustomerManagedKeyId") def storage_customer_managed_key_id(self) -> Optional[pulumi.Input[str]]: + """ + `customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. + """ return pulumi.get(self, "storage_customer_managed_key_id") @storage_customer_managed_key_id.setter @@ -816,6 +824,7 @@ def __init__(__self__, :param pulumi.Input[str] network_id: `network_id` from networks. :param pulumi.Input[str] private_access_settings_id: Canonical unique identifier of MwsPrivateAccessSettings in Databricks Account. :param pulumi.Input[str] storage_configuration_id: `storage_configuration_id` from storage configuration. + :param pulumi.Input[str] storage_customer_managed_key_id: `customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. :param pulumi.Input[int] workspace_id: (String) workspace id :param pulumi.Input[str] workspace_name: name of the workspace, will appear on UI. :param pulumi.Input[str] workspace_status: (String) workspace status @@ -969,6 +978,7 @@ def get(resource_name: str, :param pulumi.Input[str] network_id: `network_id` from networks. :param pulumi.Input[str] private_access_settings_id: Canonical unique identifier of MwsPrivateAccessSettings in Databricks Account. :param pulumi.Input[str] storage_configuration_id: `storage_configuration_id` from storage configuration. + :param pulumi.Input[str] storage_customer_managed_key_id: `customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. :param pulumi.Input[int] workspace_id: (String) workspace id :param pulumi.Input[str] workspace_name: name of the workspace, will appear on UI. :param pulumi.Input[str] workspace_status: (String) workspace status @@ -1135,6 +1145,9 @@ def storage_configuration_id(self) -> pulumi.Output[Optional[str]]: @property @pulumi.getter(name="storageCustomerManagedKeyId") def storage_customer_managed_key_id(self) -> pulumi.Output[Optional[str]]: + """ + `customer_managed_key_id` from customer managed keys with `use_cases` set to `STORAGE`. This is used to encrypt the DBFS Storage & Cluster Volumes. + """ return pulumi.get(self, "storage_customer_managed_key_id") @property diff --git a/sdk/python/pulumi_databricks/outputs.py b/sdk/python/pulumi_databricks/outputs.py index 7a2fba70..fbe0e7ca 100644 --- a/sdk/python/pulumi_databricks/outputs.py +++ b/sdk/python/pulumi_databricks/outputs.py @@ -36,6 +36,8 @@ 'ClusterLibraryPypi', 'ClusterWorkloadType', 'ClusterWorkloadTypeClients', + 'ExternalLocationEncryptionDetails', + 'ExternalLocationEncryptionDetailsSseEncryptionDetails', 'GrantsGrant', 'InstancePoolAwsAttributes', 'InstancePoolAzureAttributes', @@ -444,9 +446,10 @@ def __init__(__self__, *, role: str, principals: Optional[Sequence[str]] = None): """ - :param str role: Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles). + :param str role: Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles) or [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page). * `roles/servicePrincipal.manager` - Manager of a service principal. * `roles/servicePrincipal.user` - User of a service principal. + * `roles/group.manager` - Manager of a group. :param Sequence[str] principals: a list of principals who are granted a role. The following format is supported: * `users/{username}` (also exposed as `acl_principal_id` attribute of `User` resource). * `groups/{groupname}` (also exposed as `acl_principal_id` attribute of `Group` resource). @@ -460,9 +463,10 @@ def __init__(__self__, *, @pulumi.getter def role(self) -> str: """ - Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles). + Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles) or [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page). * `roles/servicePrincipal.manager` - Manager of a service principal. * `roles/servicePrincipal.user` - User of a service principal. + * `roles/group.manager` - Manager of a group. """ return pulumi.get(self, "role") @@ -1358,6 +1362,74 @@ def notebooks(self) -> Optional[bool]: return pulumi.get(self, "notebooks") +@pulumi.output_type +class ExternalLocationEncryptionDetails(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "sseEncryptionDetails": + suggest = "sse_encryption_details" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in ExternalLocationEncryptionDetails. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + ExternalLocationEncryptionDetails.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + ExternalLocationEncryptionDetails.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + sse_encryption_details: Optional['outputs.ExternalLocationEncryptionDetailsSseEncryptionDetails'] = None): + if sse_encryption_details is not None: + pulumi.set(__self__, "sse_encryption_details", sse_encryption_details) + + @property + @pulumi.getter(name="sseEncryptionDetails") + def sse_encryption_details(self) -> Optional['outputs.ExternalLocationEncryptionDetailsSseEncryptionDetails']: + return pulumi.get(self, "sse_encryption_details") + + +@pulumi.output_type +class ExternalLocationEncryptionDetailsSseEncryptionDetails(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "awsKmsKeyArn": + suggest = "aws_kms_key_arn" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in ExternalLocationEncryptionDetailsSseEncryptionDetails. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + ExternalLocationEncryptionDetailsSseEncryptionDetails.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + ExternalLocationEncryptionDetailsSseEncryptionDetails.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + algorithm: Optional[str] = None, + aws_kms_key_arn: Optional[str] = None): + if algorithm is not None: + pulumi.set(__self__, "algorithm", algorithm) + if aws_kms_key_arn is not None: + pulumi.set(__self__, "aws_kms_key_arn", aws_kms_key_arn) + + @property + @pulumi.getter + def algorithm(self) -> Optional[str]: + return pulumi.get(self, "algorithm") + + @property + @pulumi.getter(name="awsKmsKeyArn") + def aws_kms_key_arn(self) -> Optional[str]: + return pulumi.get(self, "aws_kms_key_arn") + + @pulumi.output_type class GrantsGrant(dict): def __init__(__self__, *, @@ -5238,10 +5310,10 @@ def get(self, key: str, default = None) -> Any: return super().get(key, default) def __init__(__self__, *, - job_id: str, + job_id: int, job_parameters: Optional[Mapping[str, Any]] = None): """ - :param str job_id: (String) ID of the job + :param int job_id: (String) ID of the job :param Mapping[str, Any] job_parameters: (Map) Job parameters for the task """ pulumi.set(__self__, "job_id", job_id) @@ -5250,7 +5322,7 @@ def __init__(__self__, *, @property @pulumi.getter(name="jobId") - def job_id(self) -> str: + def job_id(self) -> int: """ (String) ID of the job """ @@ -7709,10 +7781,10 @@ def get(self, key: str, default = None) -> Any: return super().get(key, default) def __init__(__self__, *, - job_id: str, + job_id: int, job_parameters: Optional[Mapping[str, Any]] = None): """ - :param str job_id: (String) ID of the job + :param int job_id: (String) ID of the job :param Mapping[str, Any] job_parameters: (Map) Job parameters for the task """ pulumi.set(__self__, "job_id", job_id) @@ -7721,7 +7793,7 @@ def __init__(__self__, *, @property @pulumi.getter(name="jobId") - def job_id(self) -> str: + def job_id(self) -> int: """ (String) ID of the job """ @@ -8706,7 +8778,7 @@ def __init__(__self__, *, """ :param str role_arn: The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF` - `azure_service_principal` optional configuration block for credential details for Azure: + `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure (Recommended): """ pulumi.set(__self__, "role_arn", role_arn) @@ -8716,7 +8788,7 @@ def role_arn(self) -> str: """ The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF` - `azure_service_principal` optional configuration block for credential details for Azure: + `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure (Recommended): """ return pulumi.get(self, "role_arn") @@ -8790,8 +8862,6 @@ def __init__(__self__, *, """ :param str application_id: The application ID of the application registration within the referenced AAD tenant :param str client_secret: The client secret generated for the above app ID in AAD. **This field is redacted on output** - - `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure: :param str directory_id: The directory ID corresponding to the Azure Active Directory (AAD) tenant of the application """ pulumi.set(__self__, "application_id", application_id) @@ -8811,8 +8881,6 @@ def application_id(self) -> str: def client_secret(self) -> str: """ The client secret generated for the above app ID in AAD. **This field is redacted on output** - - `azure_managed_identity` optional configuration block for using managed identity as credential details for Azure: """ return pulumi.get(self, "client_secret") @@ -8831,6 +8899,8 @@ def __init__(__self__, *, email: Optional[str] = None): """ :param str email: The email of the GCP service account created, to be granted access to relevant buckets. + + `azure_service_principal` optional configuration block for credential details for Azure (Legacy): """ if email is not None: pulumi.set(__self__, "email", email) @@ -8840,6 +8910,8 @@ def __init__(__self__, *, def email(self) -> Optional[str]: """ The email of the GCP service account created, to be granted access to relevant buckets. + + `azure_service_principal` optional configuration block for credential details for Azure (Legacy): """ return pulumi.get(self, "email") @@ -8871,6 +8943,8 @@ def __init__(__self__, *, private_key_id: str): """ :param str email: The email of the GCP service account created, to be granted access to relevant buckets. + + `azure_service_principal` optional configuration block for credential details for Azure (Legacy): """ pulumi.set(__self__, "email", email) pulumi.set(__self__, "private_key", private_key) @@ -8881,6 +8955,8 @@ def __init__(__self__, *, def email(self) -> str: """ The email of the GCP service account created, to be granted access to relevant buckets. + + `azure_service_principal` optional configuration block for credential details for Azure (Legacy): """ return pulumi.get(self, "email") @@ -8898,19 +8974,21 @@ def private_key_id(self) -> str: @pulumi.output_type class MlflowModelTag(dict): def __init__(__self__, *, - key: str, - value: str): - pulumi.set(__self__, "key", key) - pulumi.set(__self__, "value", value) + key: Optional[str] = None, + value: Optional[str] = None): + if key is not None: + pulumi.set(__self__, "key", key) + if value is not None: + pulumi.set(__self__, "value", value) @property @pulumi.getter - def key(self) -> str: + def key(self) -> Optional[str]: return pulumi.get(self, "key") @property @pulumi.getter - def value(self) -> str: + def value(self) -> Optional[str]: return pulumi.get(self, "value") @@ -9137,6 +9215,8 @@ def __init__(__self__, *, :param str model_name: The name of the model in Databricks Model Registry to be served. :param str model_version: The version of the model in Databricks Model Registry to be served. :param str workload_size: The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). + :param Mapping[str, Any] environment_vars: a map of environment variable name/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`. + :param str instance_profile_arn: ARN of the instance profile that the served model will use to access AWS resources. :param str name: The name of a served model. It must be unique across an endpoint. If not specified, this field will default to `modelname-modelversion`. A served model name can consist of alphanumeric characters, dashes, and underscores. :param bool scale_to_zero_enabled: Whether the compute resources for the served model should scale down to zero. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0. The default value is `true`. """ @@ -9179,11 +9259,17 @@ def workload_size(self) -> str: @property @pulumi.getter(name="environmentVars") def environment_vars(self) -> Optional[Mapping[str, Any]]: + """ + a map of environment variable name/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`. + """ return pulumi.get(self, "environment_vars") @property @pulumi.getter(name="instanceProfileArn") def instance_profile_arn(self) -> Optional[str]: + """ + ARN of the instance profile that the served model will use to access AWS resources. + """ return pulumi.get(self, "instance_profile_arn") @property @@ -9685,11 +9771,17 @@ def get(self, key: str, default = None) -> Any: def __init__(__self__, *, kms_key_id: str): + """ + :param str kms_key_id: The GCP KMS key's resource name. + """ pulumi.set(__self__, "kms_key_id", kms_key_id) @property @pulumi.getter(name="kmsKeyId") def kms_key_id(self) -> str: + """ + The GCP KMS key's resource name. + """ return pulumi.get(self, "kms_key_id") @@ -10183,6 +10275,9 @@ def __init__(__self__, *, lifetime_seconds: Optional[int] = None, token_id: Optional[str] = None, token_value: Optional[str] = None): + """ + :param int lifetime_seconds: Token expiry lifetime. By default its 2592000 (30 days). + """ if comment is not None: pulumi.set(__self__, "comment", comment) if lifetime_seconds is not None: @@ -10200,6 +10295,9 @@ def comment(self) -> Optional[str]: @property @pulumi.getter(name="lifetimeSeconds") def lifetime_seconds(self) -> Optional[int]: + """ + Token expiry lifetime. By default its 2592000 (30 days). + """ return pulumi.get(self, "lifetime_seconds") @property @@ -12509,21 +12607,22 @@ def until_date(self) -> Optional[str]: class SqlTableColumn(dict): def __init__(__self__, *, name: str, - type: str, comment: Optional[str] = None, - nullable: Optional[bool] = None): + nullable: Optional[bool] = None, + type: Optional[str] = None): """ :param str name: User-visible name of column - :param str type: Column type spec (with metadata) as SQL text :param str comment: User-supplied free-form text. :param bool nullable: Whether field is nullable (Default: `true`) + :param str type: Column type spec (with metadata) as SQL text. Not supported for `VIEW` table_type. """ pulumi.set(__self__, "name", name) - pulumi.set(__self__, "type", type) if comment is not None: pulumi.set(__self__, "comment", comment) if nullable is not None: pulumi.set(__self__, "nullable", nullable) + if type is not None: + pulumi.set(__self__, "type", type) @property @pulumi.getter @@ -12533,14 +12632,6 @@ def name(self) -> str: """ return pulumi.get(self, "name") - @property - @pulumi.getter - def type(self) -> str: - """ - Column type spec (with metadata) as SQL text - """ - return pulumi.get(self, "type") - @property @pulumi.getter def comment(self) -> Optional[str]: @@ -12557,6 +12648,14 @@ def nullable(self) -> Optional[bool]: """ return pulumi.get(self, "nullable") + @property + @pulumi.getter + def type(self) -> Optional[str]: + """ + Column type spec (with metadata) as SQL text. Not supported for `VIEW` table_type. + """ + return pulumi.get(self, "type") + @pulumi.output_type class SqlWidgetParameter(dict): @@ -17070,7 +17169,7 @@ def user_name(self) -> Optional[str]: @pulumi.output_type class GetJobJobSettingsSettingsRunJobTaskResult(dict): def __init__(__self__, *, - job_id: str, + job_id: int, job_parameters: Optional[Mapping[str, Any]] = None): pulumi.set(__self__, "job_id", job_id) if job_parameters is not None: @@ -17078,7 +17177,7 @@ def __init__(__self__, *, @property @pulumi.getter(name="jobId") - def job_id(self) -> str: + def job_id(self) -> int: return pulumi.get(self, "job_id") @property @@ -18627,7 +18726,7 @@ def parameters(self) -> Optional[Sequence[str]]: @pulumi.output_type class GetJobJobSettingsSettingsTaskRunJobTaskResult(dict): def __init__(__self__, *, - job_id: str, + job_id: int, job_parameters: Optional[Mapping[str, Any]] = None): pulumi.set(__self__, "job_id", job_id) if job_parameters is not None: @@ -18635,7 +18734,7 @@ def __init__(__self__, *, @property @pulumi.getter(name="jobId") - def job_id(self) -> str: + def job_id(self) -> int: return pulumi.get(self, "job_id") @property diff --git a/sdk/python/pulumi_databricks/share.py b/sdk/python/pulumi_databricks/share.py index 707ec977..8e9b4bf5 100644 --- a/sdk/python/pulumi_databricks/share.py +++ b/sdk/python/pulumi_databricks/share.py @@ -19,12 +19,14 @@ def __init__(__self__, *, created_at: Optional[pulumi.Input[int]] = None, created_by: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, - objects: Optional[pulumi.Input[Sequence[pulumi.Input['ShareObjectArgs']]]] = None): + objects: Optional[pulumi.Input[Sequence[pulumi.Input['ShareObjectArgs']]]] = None, + owner: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a Share resource. :param pulumi.Input[int] created_at: Time when the share was created. :param pulumi.Input[str] created_by: The principal that created the share. :param pulumi.Input[str] name: Name of share. Change forces creation of a new resource. + :param pulumi.Input[str] owner: User name/group name/sp application_id of the share owner. """ if created_at is not None: pulumi.set(__self__, "created_at", created_at) @@ -34,6 +36,8 @@ def __init__(__self__, *, pulumi.set(__self__, "name", name) if objects is not None: pulumi.set(__self__, "objects", objects) + if owner is not None: + pulumi.set(__self__, "owner", owner) @property @pulumi.getter(name="createdAt") @@ -80,6 +84,18 @@ def objects(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ShareObjectArg def objects(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ShareObjectArgs']]]]): pulumi.set(self, "objects", value) + @property + @pulumi.getter + def owner(self) -> Optional[pulumi.Input[str]]: + """ + User name/group name/sp application_id of the share owner. + """ + return pulumi.get(self, "owner") + + @owner.setter + def owner(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "owner", value) + @pulumi.input_type class _ShareState: @@ -87,12 +103,14 @@ def __init__(__self__, *, created_at: Optional[pulumi.Input[int]] = None, created_by: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, - objects: Optional[pulumi.Input[Sequence[pulumi.Input['ShareObjectArgs']]]] = None): + objects: Optional[pulumi.Input[Sequence[pulumi.Input['ShareObjectArgs']]]] = None, + owner: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering Share resources. :param pulumi.Input[int] created_at: Time when the share was created. :param pulumi.Input[str] created_by: The principal that created the share. :param pulumi.Input[str] name: Name of share. Change forces creation of a new resource. + :param pulumi.Input[str] owner: User name/group name/sp application_id of the share owner. """ if created_at is not None: pulumi.set(__self__, "created_at", created_at) @@ -102,6 +120,8 @@ def __init__(__self__, *, pulumi.set(__self__, "name", name) if objects is not None: pulumi.set(__self__, "objects", objects) + if owner is not None: + pulumi.set(__self__, "owner", owner) @property @pulumi.getter(name="createdAt") @@ -148,6 +168,18 @@ def objects(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ShareObjectArg def objects(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ShareObjectArgs']]]]): pulumi.set(self, "objects", value) + @property + @pulumi.getter + def owner(self) -> Optional[pulumi.Input[str]]: + """ + User name/group name/sp application_id of the share owner. + """ + return pulumi.get(self, "owner") + + @owner.setter + def owner(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "owner", value) + class Share(pulumi.CustomResource): @overload @@ -158,6 +190,7 @@ def __init__(__self__, created_by: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, objects: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ShareObjectArgs']]]]] = None, + owner: Optional[pulumi.Input[str]] = None, __props__=None): """ Create a Share resource with the given unique name, props, and options. @@ -166,6 +199,7 @@ def __init__(__self__, :param pulumi.Input[int] created_at: Time when the share was created. :param pulumi.Input[str] created_by: The principal that created the share. :param pulumi.Input[str] name: Name of share. Change forces creation of a new resource. + :param pulumi.Input[str] owner: User name/group name/sp application_id of the share owner. """ ... @overload @@ -194,6 +228,7 @@ def _internal_init(__self__, created_by: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, objects: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ShareObjectArgs']]]]] = None, + owner: Optional[pulumi.Input[str]] = None, __props__=None): opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) if not isinstance(opts, pulumi.ResourceOptions): @@ -207,6 +242,7 @@ def _internal_init(__self__, __props__.__dict__["created_by"] = created_by __props__.__dict__["name"] = name __props__.__dict__["objects"] = objects + __props__.__dict__["owner"] = owner super(Share, __self__).__init__( 'databricks:index/share:Share', resource_name, @@ -220,7 +256,8 @@ def get(resource_name: str, created_at: Optional[pulumi.Input[int]] = None, created_by: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, - objects: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ShareObjectArgs']]]]] = None) -> 'Share': + objects: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ShareObjectArgs']]]]] = None, + owner: Optional[pulumi.Input[str]] = None) -> 'Share': """ Get an existing Share resource's state with the given name, id, and optional extra properties used to qualify the lookup. @@ -231,6 +268,7 @@ def get(resource_name: str, :param pulumi.Input[int] created_at: Time when the share was created. :param pulumi.Input[str] created_by: The principal that created the share. :param pulumi.Input[str] name: Name of share. Change forces creation of a new resource. + :param pulumi.Input[str] owner: User name/group name/sp application_id of the share owner. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) @@ -240,6 +278,7 @@ def get(resource_name: str, __props__.__dict__["created_by"] = created_by __props__.__dict__["name"] = name __props__.__dict__["objects"] = objects + __props__.__dict__["owner"] = owner return Share(resource_name, opts=opts, __props__=__props__) @property @@ -271,3 +310,11 @@ def name(self) -> pulumi.Output[str]: def objects(self) -> pulumi.Output[Optional[Sequence['outputs.ShareObject']]]: return pulumi.get(self, "objects") + @property + @pulumi.getter + def owner(self) -> pulumi.Output[Optional[str]]: + """ + User name/group name/sp application_id of the share owner. + """ + return pulumi.get(self, "owner") + diff --git a/sdk/python/pulumi_databricks/sql_alert.py b/sdk/python/pulumi_databricks/sql_alert.py index 4c7f3063..15ed2fe5 100644 --- a/sdk/python/pulumi_databricks/sql_alert.py +++ b/sdk/python/pulumi_databricks/sql_alert.py @@ -18,9 +18,11 @@ class SqlAlertArgs: def __init__(__self__, *, options: pulumi.Input['SqlAlertOptionsArgs'], query_id: pulumi.Input[str], + created_at: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, parent: Optional[pulumi.Input[str]] = None, - rearm: Optional[pulumi.Input[int]] = None): + rearm: Optional[pulumi.Input[int]] = None, + updated_at: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a SqlAlert resource. :param pulumi.Input['SqlAlertOptionsArgs'] options: Alert configuration options. @@ -31,12 +33,16 @@ def __init__(__self__, *, """ pulumi.set(__self__, "options", options) pulumi.set(__self__, "query_id", query_id) + if created_at is not None: + pulumi.set(__self__, "created_at", created_at) if name is not None: pulumi.set(__self__, "name", name) if parent is not None: pulumi.set(__self__, "parent", parent) if rearm is not None: pulumi.set(__self__, "rearm", rearm) + if updated_at is not None: + pulumi.set(__self__, "updated_at", updated_at) @property @pulumi.getter @@ -62,6 +68,15 @@ def query_id(self) -> pulumi.Input[str]: def query_id(self, value: pulumi.Input[str]): pulumi.set(self, "query_id", value) + @property + @pulumi.getter(name="createdAt") + def created_at(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "created_at") + + @created_at.setter + def created_at(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "created_at", value) + @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: @@ -98,15 +113,26 @@ def rearm(self) -> Optional[pulumi.Input[int]]: def rearm(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "rearm", value) + @property + @pulumi.getter(name="updatedAt") + def updated_at(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "updated_at") + + @updated_at.setter + def updated_at(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "updated_at", value) + @pulumi.input_type class _SqlAlertState: def __init__(__self__, *, + created_at: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, options: Optional[pulumi.Input['SqlAlertOptionsArgs']] = None, parent: Optional[pulumi.Input[str]] = None, query_id: Optional[pulumi.Input[str]] = None, - rearm: Optional[pulumi.Input[int]] = None): + rearm: Optional[pulumi.Input[int]] = None, + updated_at: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering SqlAlert resources. :param pulumi.Input[str] name: Name of the alert. @@ -115,6 +141,8 @@ def __init__(__self__, *, :param pulumi.Input[str] query_id: ID of the query evaluated by the alert. :param pulumi.Input[int] rearm: Number of seconds after being triggered before the alert rearms itself and can be triggered again. If not defined, alert will never be triggered again. """ + if created_at is not None: + pulumi.set(__self__, "created_at", created_at) if name is not None: pulumi.set(__self__, "name", name) if options is not None: @@ -125,6 +153,17 @@ def __init__(__self__, *, pulumi.set(__self__, "query_id", query_id) if rearm is not None: pulumi.set(__self__, "rearm", rearm) + if updated_at is not None: + pulumi.set(__self__, "updated_at", updated_at) + + @property + @pulumi.getter(name="createdAt") + def created_at(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "created_at") + + @created_at.setter + def created_at(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "created_at", value) @property @pulumi.getter @@ -186,17 +225,28 @@ def rearm(self) -> Optional[pulumi.Input[int]]: def rearm(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "rearm", value) + @property + @pulumi.getter(name="updatedAt") + def updated_at(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "updated_at") + + @updated_at.setter + def updated_at(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "updated_at", value) + class SqlAlert(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, + created_at: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, options: Optional[pulumi.Input[pulumi.InputType['SqlAlertOptionsArgs']]] = None, parent: Optional[pulumi.Input[str]] = None, query_id: Optional[pulumi.Input[str]] = None, rearm: Optional[pulumi.Input[int]] = None, + updated_at: Optional[pulumi.Input[str]] = None, __props__=None): """ This resource allows you to manage [Databricks SQL Alerts](https://docs.databricks.com/sql/user/queries/index.html). @@ -255,11 +305,13 @@ def __init__(__self__, resource_name: str, *args, **kwargs): def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, + created_at: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, options: Optional[pulumi.Input[pulumi.InputType['SqlAlertOptionsArgs']]] = None, parent: Optional[pulumi.Input[str]] = None, query_id: Optional[pulumi.Input[str]] = None, rearm: Optional[pulumi.Input[int]] = None, + updated_at: Optional[pulumi.Input[str]] = None, __props__=None): opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) if not isinstance(opts, pulumi.ResourceOptions): @@ -269,6 +321,7 @@ def _internal_init(__self__, raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = SqlAlertArgs.__new__(SqlAlertArgs) + __props__.__dict__["created_at"] = created_at __props__.__dict__["name"] = name if options is None and not opts.urn: raise TypeError("Missing required property 'options'") @@ -278,6 +331,7 @@ def _internal_init(__self__, raise TypeError("Missing required property 'query_id'") __props__.__dict__["query_id"] = query_id __props__.__dict__["rearm"] = rearm + __props__.__dict__["updated_at"] = updated_at super(SqlAlert, __self__).__init__( 'databricks:index/sqlAlert:SqlAlert', resource_name, @@ -288,11 +342,13 @@ def _internal_init(__self__, def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, + created_at: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, options: Optional[pulumi.Input[pulumi.InputType['SqlAlertOptionsArgs']]] = None, parent: Optional[pulumi.Input[str]] = None, query_id: Optional[pulumi.Input[str]] = None, - rearm: Optional[pulumi.Input[int]] = None) -> 'SqlAlert': + rearm: Optional[pulumi.Input[int]] = None, + updated_at: Optional[pulumi.Input[str]] = None) -> 'SqlAlert': """ Get an existing SqlAlert resource's state with the given name, id, and optional extra properties used to qualify the lookup. @@ -310,13 +366,20 @@ def get(resource_name: str, __props__ = _SqlAlertState.__new__(_SqlAlertState) + __props__.__dict__["created_at"] = created_at __props__.__dict__["name"] = name __props__.__dict__["options"] = options __props__.__dict__["parent"] = parent __props__.__dict__["query_id"] = query_id __props__.__dict__["rearm"] = rearm + __props__.__dict__["updated_at"] = updated_at return SqlAlert(resource_name, opts=opts, __props__=__props__) + @property + @pulumi.getter(name="createdAt") + def created_at(self) -> pulumi.Output[str]: + return pulumi.get(self, "created_at") + @property @pulumi.getter def name(self) -> pulumi.Output[str]: @@ -357,3 +420,8 @@ def rearm(self) -> pulumi.Output[Optional[int]]: """ return pulumi.get(self, "rearm") + @property + @pulumi.getter(name="updatedAt") + def updated_at(self) -> pulumi.Output[str]: + return pulumi.get(self, "updated_at") + diff --git a/sdk/python/pulumi_databricks/sql_dashboard.py b/sdk/python/pulumi_databricks/sql_dashboard.py index 80a58a64..1fa9f220 100644 --- a/sdk/python/pulumi_databricks/sql_dashboard.py +++ b/sdk/python/pulumi_databricks/sql_dashboard.py @@ -14,18 +14,33 @@ @pulumi.input_type class SqlDashboardArgs: def __init__(__self__, *, + created_at: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, parent: Optional[pulumi.Input[str]] = None, - tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): + tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + updated_at: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a SqlDashboard resource. """ + if created_at is not None: + pulumi.set(__self__, "created_at", created_at) if name is not None: pulumi.set(__self__, "name", name) if parent is not None: pulumi.set(__self__, "parent", parent) if tags is not None: pulumi.set(__self__, "tags", tags) + if updated_at is not None: + pulumi.set(__self__, "updated_at", updated_at) + + @property + @pulumi.getter(name="createdAt") + def created_at(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "created_at") + + @created_at.setter + def created_at(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "created_at", value) @property @pulumi.getter @@ -54,22 +69,46 @@ def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "tags", value) + @property + @pulumi.getter(name="updatedAt") + def updated_at(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "updated_at") + + @updated_at.setter + def updated_at(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "updated_at", value) + @pulumi.input_type class _SqlDashboardState: def __init__(__self__, *, + created_at: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, parent: Optional[pulumi.Input[str]] = None, - tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): + tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + updated_at: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering SqlDashboard resources. """ + if created_at is not None: + pulumi.set(__self__, "created_at", created_at) if name is not None: pulumi.set(__self__, "name", name) if parent is not None: pulumi.set(__self__, "parent", parent) if tags is not None: pulumi.set(__self__, "tags", tags) + if updated_at is not None: + pulumi.set(__self__, "updated_at", updated_at) + + @property + @pulumi.getter(name="createdAt") + def created_at(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "created_at") + + @created_at.setter + def created_at(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "created_at", value) @property @pulumi.getter @@ -98,15 +137,26 @@ def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "tags", value) + @property + @pulumi.getter(name="updatedAt") + def updated_at(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "updated_at") + + @updated_at.setter + def updated_at(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "updated_at", value) + class SqlDashboard(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, + created_at: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, parent: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + updated_at: Optional[pulumi.Input[str]] = None, __props__=None): """ This resource is used to manage [Databricks SQL Dashboards](https://docs.databricks.com/sql/user/dashboards/index.html). To manage [SQLA resources](https://docs.databricks.com/sql/get-started/concepts.html) you must have `databricks_sql_access` on your Group or databricks_user. @@ -236,9 +286,11 @@ def __init__(__self__, resource_name: str, *args, **kwargs): def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, + created_at: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, parent: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + updated_at: Optional[pulumi.Input[str]] = None, __props__=None): opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) if not isinstance(opts, pulumi.ResourceOptions): @@ -248,9 +300,11 @@ def _internal_init(__self__, raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = SqlDashboardArgs.__new__(SqlDashboardArgs) + __props__.__dict__["created_at"] = created_at __props__.__dict__["name"] = name __props__.__dict__["parent"] = parent __props__.__dict__["tags"] = tags + __props__.__dict__["updated_at"] = updated_at super(SqlDashboard, __self__).__init__( 'databricks:index/sqlDashboard:SqlDashboard', resource_name, @@ -261,9 +315,11 @@ def _internal_init(__self__, def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, + created_at: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, parent: Optional[pulumi.Input[str]] = None, - tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'SqlDashboard': + tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + updated_at: Optional[pulumi.Input[str]] = None) -> 'SqlDashboard': """ Get an existing SqlDashboard resource's state with the given name, id, and optional extra properties used to qualify the lookup. @@ -276,11 +332,18 @@ def get(resource_name: str, __props__ = _SqlDashboardState.__new__(_SqlDashboardState) + __props__.__dict__["created_at"] = created_at __props__.__dict__["name"] = name __props__.__dict__["parent"] = parent __props__.__dict__["tags"] = tags + __props__.__dict__["updated_at"] = updated_at return SqlDashboard(resource_name, opts=opts, __props__=__props__) + @property + @pulumi.getter(name="createdAt") + def created_at(self) -> pulumi.Output[str]: + return pulumi.get(self, "created_at") + @property @pulumi.getter def name(self) -> pulumi.Output[str]: @@ -296,3 +359,8 @@ def parent(self) -> pulumi.Output[Optional[str]]: def tags(self) -> pulumi.Output[Optional[Sequence[str]]]: return pulumi.get(self, "tags") + @property + @pulumi.getter(name="updatedAt") + def updated_at(self) -> pulumi.Output[str]: + return pulumi.get(self, "updated_at") + diff --git a/sdk/python/pulumi_databricks/sql_query.py b/sdk/python/pulumi_databricks/sql_query.py index b37b40ef..4937bd30 100644 --- a/sdk/python/pulumi_databricks/sql_query.py +++ b/sdk/python/pulumi_databricks/sql_query.py @@ -18,18 +18,22 @@ class SqlQueryArgs: def __init__(__self__, *, data_source_id: pulumi.Input[str], query: pulumi.Input[str], + created_at: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, parameters: Optional[pulumi.Input[Sequence[pulumi.Input['SqlQueryParameterArgs']]]] = None, parent: Optional[pulumi.Input[str]] = None, run_as_role: Optional[pulumi.Input[str]] = None, schedule: Optional[pulumi.Input['SqlQueryScheduleArgs']] = None, - tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): + tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + updated_at: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a SqlQuery resource. """ pulumi.set(__self__, "data_source_id", data_source_id) pulumi.set(__self__, "query", query) + if created_at is not None: + pulumi.set(__self__, "created_at", created_at) if description is not None: pulumi.set(__self__, "description", description) if name is not None: @@ -47,6 +51,8 @@ def __init__(__self__, *, pulumi.set(__self__, "schedule", schedule) if tags is not None: pulumi.set(__self__, "tags", tags) + if updated_at is not None: + pulumi.set(__self__, "updated_at", updated_at) @property @pulumi.getter(name="dataSourceId") @@ -66,6 +72,15 @@ def query(self) -> pulumi.Input[str]: def query(self, value: pulumi.Input[str]): pulumi.set(self, "query", value) + @property + @pulumi.getter(name="createdAt") + def created_at(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "created_at") + + @created_at.setter + def created_at(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "created_at", value) + @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: @@ -132,10 +147,20 @@ def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "tags", value) + @property + @pulumi.getter(name="updatedAt") + def updated_at(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "updated_at") + + @updated_at.setter + def updated_at(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "updated_at", value) + @pulumi.input_type class _SqlQueryState: def __init__(__self__, *, + created_at: Optional[pulumi.Input[str]] = None, data_source_id: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, @@ -144,10 +169,13 @@ def __init__(__self__, *, query: Optional[pulumi.Input[str]] = None, run_as_role: Optional[pulumi.Input[str]] = None, schedule: Optional[pulumi.Input['SqlQueryScheduleArgs']] = None, - tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): + tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + updated_at: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering SqlQuery resources. """ + if created_at is not None: + pulumi.set(__self__, "created_at", created_at) if data_source_id is not None: pulumi.set(__self__, "data_source_id", data_source_id) if description is not None: @@ -169,6 +197,17 @@ def __init__(__self__, *, pulumi.set(__self__, "schedule", schedule) if tags is not None: pulumi.set(__self__, "tags", tags) + if updated_at is not None: + pulumi.set(__self__, "updated_at", updated_at) + + @property + @pulumi.getter(name="createdAt") + def created_at(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "created_at") + + @created_at.setter + def created_at(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "created_at", value) @property @pulumi.getter(name="dataSourceId") @@ -254,12 +293,22 @@ def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "tags", value) + @property + @pulumi.getter(name="updatedAt") + def updated_at(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "updated_at") + + @updated_at.setter + def updated_at(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "updated_at", value) + class SqlQuery(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, + created_at: Optional[pulumi.Input[str]] = None, data_source_id: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, @@ -269,6 +318,7 @@ def __init__(__self__, run_as_role: Optional[pulumi.Input[str]] = None, schedule: Optional[pulumi.Input[pulumi.InputType['SqlQueryScheduleArgs']]] = None, tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + updated_at: Optional[pulumi.Input[str]] = None, __props__=None): """ To manage [SQLA resources](https://docs.databricks.com/sql/get-started/concepts.html) you must have `databricks_sql_access` on your Group or databricks_user. @@ -506,6 +556,7 @@ def __init__(__self__, resource_name: str, *args, **kwargs): def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, + created_at: Optional[pulumi.Input[str]] = None, data_source_id: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, @@ -515,6 +566,7 @@ def _internal_init(__self__, run_as_role: Optional[pulumi.Input[str]] = None, schedule: Optional[pulumi.Input[pulumi.InputType['SqlQueryScheduleArgs']]] = None, tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + updated_at: Optional[pulumi.Input[str]] = None, __props__=None): opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) if not isinstance(opts, pulumi.ResourceOptions): @@ -524,6 +576,7 @@ def _internal_init(__self__, raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = SqlQueryArgs.__new__(SqlQueryArgs) + __props__.__dict__["created_at"] = created_at if data_source_id is None and not opts.urn: raise TypeError("Missing required property 'data_source_id'") __props__.__dict__["data_source_id"] = data_source_id @@ -540,6 +593,7 @@ def _internal_init(__self__, pulumi.log.warn("""schedule is deprecated: Operations on `databricks_sql_query` schedules are deprecated. Please use `databricks_job` resource to schedule a `sql_task`.""") __props__.__dict__["schedule"] = schedule __props__.__dict__["tags"] = tags + __props__.__dict__["updated_at"] = updated_at super(SqlQuery, __self__).__init__( 'databricks:index/sqlQuery:SqlQuery', resource_name, @@ -550,6 +604,7 @@ def _internal_init(__self__, def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, + created_at: Optional[pulumi.Input[str]] = None, data_source_id: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, @@ -558,7 +613,8 @@ def get(resource_name: str, query: Optional[pulumi.Input[str]] = None, run_as_role: Optional[pulumi.Input[str]] = None, schedule: Optional[pulumi.Input[pulumi.InputType['SqlQueryScheduleArgs']]] = None, - tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'SqlQuery': + tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + updated_at: Optional[pulumi.Input[str]] = None) -> 'SqlQuery': """ Get an existing SqlQuery resource's state with the given name, id, and optional extra properties used to qualify the lookup. @@ -571,6 +627,7 @@ def get(resource_name: str, __props__ = _SqlQueryState.__new__(_SqlQueryState) + __props__.__dict__["created_at"] = created_at __props__.__dict__["data_source_id"] = data_source_id __props__.__dict__["description"] = description __props__.__dict__["name"] = name @@ -580,8 +637,14 @@ def get(resource_name: str, __props__.__dict__["run_as_role"] = run_as_role __props__.__dict__["schedule"] = schedule __props__.__dict__["tags"] = tags + __props__.__dict__["updated_at"] = updated_at return SqlQuery(resource_name, opts=opts, __props__=__props__) + @property + @pulumi.getter(name="createdAt") + def created_at(self) -> pulumi.Output[str]: + return pulumi.get(self, "created_at") + @property @pulumi.getter(name="dataSourceId") def data_source_id(self) -> pulumi.Output[str]: @@ -630,3 +693,8 @@ def schedule(self) -> pulumi.Output[Optional['outputs.SqlQuerySchedule']]: def tags(self) -> pulumi.Output[Optional[Sequence[str]]]: return pulumi.get(self, "tags") + @property + @pulumi.getter(name="updatedAt") + def updated_at(self) -> pulumi.Output[str]: + return pulumi.get(self, "updated_at") + diff --git a/sdk/python/pulumi_databricks/storage_credential.py b/sdk/python/pulumi_databricks/storage_credential.py index 536ab2e4..579b698b 100644 --- a/sdk/python/pulumi_databricks/storage_credential.py +++ b/sdk/python/pulumi_databricks/storage_credential.py @@ -21,6 +21,7 @@ def __init__(__self__, *, azure_service_principal: Optional[pulumi.Input['StorageCredentialAzureServicePrincipalArgs']] = None, comment: Optional[pulumi.Input[str]] = None, databricks_gcp_service_account: Optional[pulumi.Input['StorageCredentialDatabricksGcpServiceAccountArgs']] = None, + force_destroy: Optional[pulumi.Input[bool]] = None, gcp_service_account_key: Optional[pulumi.Input['StorageCredentialGcpServiceAccountKeyArgs']] = None, metastore_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, @@ -44,6 +45,8 @@ def __init__(__self__, *, pulumi.set(__self__, "comment", comment) if databricks_gcp_service_account is not None: pulumi.set(__self__, "databricks_gcp_service_account", databricks_gcp_service_account) + if force_destroy is not None: + pulumi.set(__self__, "force_destroy", force_destroy) if gcp_service_account_key is not None: pulumi.set(__self__, "gcp_service_account_key", gcp_service_account_key) if metastore_id is not None: @@ -100,6 +103,15 @@ def databricks_gcp_service_account(self) -> Optional[pulumi.Input['StorageCreden def databricks_gcp_service_account(self, value: Optional[pulumi.Input['StorageCredentialDatabricksGcpServiceAccountArgs']]): pulumi.set(self, "databricks_gcp_service_account", value) + @property + @pulumi.getter(name="forceDestroy") + def force_destroy(self) -> Optional[pulumi.Input[bool]]: + return pulumi.get(self, "force_destroy") + + @force_destroy.setter + def force_destroy(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "force_destroy", value) + @property @pulumi.getter(name="gcpServiceAccountKey") def gcp_service_account_key(self) -> Optional[pulumi.Input['StorageCredentialGcpServiceAccountKeyArgs']]: @@ -165,6 +177,7 @@ def __init__(__self__, *, azure_service_principal: Optional[pulumi.Input['StorageCredentialAzureServicePrincipalArgs']] = None, comment: Optional[pulumi.Input[str]] = None, databricks_gcp_service_account: Optional[pulumi.Input['StorageCredentialDatabricksGcpServiceAccountArgs']] = None, + force_destroy: Optional[pulumi.Input[bool]] = None, gcp_service_account_key: Optional[pulumi.Input['StorageCredentialGcpServiceAccountKeyArgs']] = None, metastore_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, @@ -188,6 +201,8 @@ def __init__(__self__, *, pulumi.set(__self__, "comment", comment) if databricks_gcp_service_account is not None: pulumi.set(__self__, "databricks_gcp_service_account", databricks_gcp_service_account) + if force_destroy is not None: + pulumi.set(__self__, "force_destroy", force_destroy) if gcp_service_account_key is not None: pulumi.set(__self__, "gcp_service_account_key", gcp_service_account_key) if metastore_id is not None: @@ -244,6 +259,15 @@ def databricks_gcp_service_account(self) -> Optional[pulumi.Input['StorageCreden def databricks_gcp_service_account(self, value: Optional[pulumi.Input['StorageCredentialDatabricksGcpServiceAccountArgs']]): pulumi.set(self, "databricks_gcp_service_account", value) + @property + @pulumi.getter(name="forceDestroy") + def force_destroy(self) -> Optional[pulumi.Input[bool]]: + return pulumi.get(self, "force_destroy") + + @force_destroy.setter + def force_destroy(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "force_destroy", value) + @property @pulumi.getter(name="gcpServiceAccountKey") def gcp_service_account_key(self) -> Optional[pulumi.Input['StorageCredentialGcpServiceAccountKeyArgs']]: @@ -311,6 +335,7 @@ def __init__(__self__, azure_service_principal: Optional[pulumi.Input[pulumi.InputType['StorageCredentialAzureServicePrincipalArgs']]] = None, comment: Optional[pulumi.Input[str]] = None, databricks_gcp_service_account: Optional[pulumi.Input[pulumi.InputType['StorageCredentialDatabricksGcpServiceAccountArgs']]] = None, + force_destroy: Optional[pulumi.Input[bool]] = None, gcp_service_account_key: Optional[pulumi.Input[pulumi.InputType['StorageCredentialGcpServiceAccountKeyArgs']]] = None, metastore_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, @@ -489,6 +514,7 @@ def _internal_init(__self__, azure_service_principal: Optional[pulumi.Input[pulumi.InputType['StorageCredentialAzureServicePrincipalArgs']]] = None, comment: Optional[pulumi.Input[str]] = None, databricks_gcp_service_account: Optional[pulumi.Input[pulumi.InputType['StorageCredentialDatabricksGcpServiceAccountArgs']]] = None, + force_destroy: Optional[pulumi.Input[bool]] = None, gcp_service_account_key: Optional[pulumi.Input[pulumi.InputType['StorageCredentialGcpServiceAccountKeyArgs']]] = None, metastore_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, @@ -508,6 +534,7 @@ def _internal_init(__self__, __props__.__dict__["azure_service_principal"] = azure_service_principal __props__.__dict__["comment"] = comment __props__.__dict__["databricks_gcp_service_account"] = databricks_gcp_service_account + __props__.__dict__["force_destroy"] = force_destroy __props__.__dict__["gcp_service_account_key"] = gcp_service_account_key __props__.__dict__["metastore_id"] = metastore_id __props__.__dict__["name"] = name @@ -528,6 +555,7 @@ def get(resource_name: str, azure_service_principal: Optional[pulumi.Input[pulumi.InputType['StorageCredentialAzureServicePrincipalArgs']]] = None, comment: Optional[pulumi.Input[str]] = None, databricks_gcp_service_account: Optional[pulumi.Input[pulumi.InputType['StorageCredentialDatabricksGcpServiceAccountArgs']]] = None, + force_destroy: Optional[pulumi.Input[bool]] = None, gcp_service_account_key: Optional[pulumi.Input[pulumi.InputType['StorageCredentialGcpServiceAccountKeyArgs']]] = None, metastore_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, @@ -555,6 +583,7 @@ def get(resource_name: str, __props__.__dict__["azure_service_principal"] = azure_service_principal __props__.__dict__["comment"] = comment __props__.__dict__["databricks_gcp_service_account"] = databricks_gcp_service_account + __props__.__dict__["force_destroy"] = force_destroy __props__.__dict__["gcp_service_account_key"] = gcp_service_account_key __props__.__dict__["metastore_id"] = metastore_id __props__.__dict__["name"] = name @@ -587,6 +616,11 @@ def comment(self) -> pulumi.Output[Optional[str]]: def databricks_gcp_service_account(self) -> pulumi.Output['outputs.StorageCredentialDatabricksGcpServiceAccount']: return pulumi.get(self, "databricks_gcp_service_account") + @property + @pulumi.getter(name="forceDestroy") + def force_destroy(self) -> pulumi.Output[Optional[bool]]: + return pulumi.get(self, "force_destroy") + @property @pulumi.getter(name="gcpServiceAccountKey") def gcp_service_account_key(self) -> pulumi.Output[Optional['outputs.StorageCredentialGcpServiceAccountKey']]: