diff --git a/provider/cmd/pulumi-resource-databricks/bridge-metadata.json b/provider/cmd/pulumi-resource-databricks/bridge-metadata.json index 0bb0ad3b..163fc083 100644 --- a/provider/cmd/pulumi-resource-databricks/bridge-metadata.json +++ b/provider/cmd/pulumi-resource-databricks/bridge-metadata.json @@ -546,6 +546,9 @@ "notification_settings": { "maxItemsOne": true }, + "parameter": { + "maxItemsOne": false + }, "pipeline_task": { "maxItemsOne": true }, @@ -565,6 +568,9 @@ "run_as": { "maxItemsOne": true }, + "run_job_task": { + "maxItemsOne": true + }, "schedule": { "maxItemsOne": true }, @@ -779,6 +785,9 @@ } } }, + "run_job_task": { + "maxItemsOne": true + }, "spark_jar_task": { "maxItemsOne": true, "elem": { @@ -2154,6 +2163,9 @@ "notification_settings": { "maxItemsOne": true }, + "parameter": { + "maxItemsOne": false + }, "pipeline_task": { "maxItemsOne": true }, @@ -2173,6 +2185,9 @@ "run_as": { "maxItemsOne": true }, + "run_job_task": { + "maxItemsOne": true + }, "schedule": { "maxItemsOne": true }, @@ -2387,6 +2402,9 @@ } } }, + "run_job_task": { + "maxItemsOne": true + }, "spark_jar_task": { "maxItemsOne": true, "elem": { @@ -2814,6 +2832,7 @@ "databricks:index/ClusterGcpAttributes:ClusterGcpAttributes": { "bootDiskSize": "boot_disk_size", "googleServiceAccount": "google_service_account", + "localSsdCount": "local_ssd_count", "usePreemptibleExecutors": "use_preemptible_executors", "zoneId": "zone_id" }, @@ -2840,7 +2859,8 @@ "ebsVolumeType": "ebs_volume_type" }, "databricks:index/InstancePoolGcpAttributes:InstancePoolGcpAttributes": { - "gcpAvailability": "gcp_availability" + "gcpAvailability": "gcp_availability", + "localSsdCount": "local_ssd_count" }, "databricks:index/InstancePoolInstancePoolFleetAttributes:InstancePoolInstancePoolFleetAttributes": { "fleetOnDemandOption": "fleet_on_demand_option", @@ -2962,6 +2982,7 @@ "databricks:index/JobJobClusterNewClusterGcpAttributes:JobJobClusterNewClusterGcpAttributes": { "bootDiskSize": "boot_disk_size", "googleServiceAccount": "google_service_account", + "localSsdCount": "local_ssd_count", "usePreemptibleExecutors": "use_preemptible_executors", "zoneId": "zone_id" }, @@ -3040,6 +3061,7 @@ "databricks:index/JobNewClusterGcpAttributes:JobNewClusterGcpAttributes": { "bootDiskSize": "boot_disk_size", "googleServiceAccount": "google_service_account", + "localSsdCount": "local_ssd_count", "usePreemptibleExecutors": "use_preemptible_executors", "zoneId": "zone_id" }, @@ -3070,6 +3092,10 @@ "servicePrincipalName": "service_principal_name", "userName": "user_name" }, + "databricks:index/JobRunJobTask:JobRunJobTask": { + "jobId": "job_id", + "jobParameters": "job_parameters" + }, "databricks:index/JobSchedule:JobSchedule": { "pauseStatus": "pause_status", "quartzCronExpression": "quartz_cron_expression", @@ -3100,6 +3126,7 @@ "pythonWheelTask": "python_wheel_task", "retryOnTimeout": "retry_on_timeout", "runIf": "run_if", + "runJobTask": "run_job_task", "sparkJarTask": "spark_jar_task", "sparkPythonTask": "spark_python_task", "sparkSubmitTask": "spark_submit_task", @@ -3192,6 +3219,7 @@ "databricks:index/JobTaskNewClusterGcpAttributes:JobTaskNewClusterGcpAttributes": { "bootDiskSize": "boot_disk_size", "googleServiceAccount": "google_service_account", + "localSsdCount": "local_ssd_count", "usePreemptibleExecutors": "use_preemptible_executors", "zoneId": "zone_id" }, @@ -3219,6 +3247,10 @@ "namedParameters": "named_parameters", "packageName": "package_name" }, + "databricks:index/JobTaskRunJobTask:JobTaskRunJobTask": { + "jobId": "job_id", + "jobParameters": "job_parameters" + }, "databricks:index/JobTaskSparkJarTask:JobTaskSparkJarTask": { "jarUri": "jar_uri", "mainClassName": "main_class_name" @@ -3292,6 +3324,7 @@ }, "databricks:index/ModelServingConfigServedModel:ModelServingConfigServedModel": { "environmentVars": "environment_vars", + "instanceProfileArn": "instance_profile_arn", "modelName": "model_name", "modelVersion": "model_version", "scaleToZeroEnabled": "scale_to_zero_enabled", @@ -3436,6 +3469,7 @@ }, "databricks:index/PipelineClusterGcpAttributes:PipelineClusterGcpAttributes": { "googleServiceAccount": "google_service_account", + "localSsdCount": "local_ssd_count", "zoneId": "zone_id" }, "databricks:index/PipelineClusterInitScriptS3:PipelineClusterInitScriptS3": { @@ -3742,6 +3776,7 @@ "databricks:index/getClusterClusterInfoGcpAttributes:getClusterClusterInfoGcpAttributes": { "bootDiskSize": "boot_disk_size", "googleServiceAccount": "google_service_account", + "localSsdCount": "local_ssd_count", "usePreemptibleExecutors": "use_preemptible_executors", "zoneId": "zone_id" }, @@ -3827,7 +3862,8 @@ "ebsVolumeType": "ebs_volume_type" }, "databricks:index/getInstancePoolPoolInfoGcpAttributes:getInstancePoolPoolInfoGcpAttributes": { - "gcpAvailability": "gcp_availability" + "gcpAvailability": "gcp_availability", + "localSsdCount": "local_ssd_count" }, "databricks:index/getInstancePoolPoolInfoInstancePoolFleetAttribute:getInstancePoolPoolInfoInstancePoolFleetAttribute": { "fleetOnDemandOption": "fleet_on_demand_option", @@ -3880,10 +3916,12 @@ "newCluster": "new_cluster", "notebookTask": "notebook_task", "notificationSettings": "notification_settings", + "parameters": "parameter", "pipelineTask": "pipeline_task", "pythonWheelTask": "python_wheel_task", "retryOnTimeout": "retry_on_timeout", "runAs": "run_as", + "runJobTask": "run_job_task", "sparkJarTask": "spark_jar_task", "sparkPythonTask": "spark_python_task", "sparkSubmitTask": "spark_submit_task", @@ -3991,6 +4029,7 @@ "databricks:index/getJobJobSettingsSettingsJobClusterNewClusterGcpAttributes:getJobJobSettingsSettingsJobClusterNewClusterGcpAttributes": { "bootDiskSize": "boot_disk_size", "googleServiceAccount": "google_service_account", + "localSsdCount": "local_ssd_count", "usePreemptibleExecutors": "use_preemptible_executors", "zoneId": "zone_id" }, @@ -4069,6 +4108,7 @@ "databricks:index/getJobJobSettingsSettingsNewClusterGcpAttributes:getJobJobSettingsSettingsNewClusterGcpAttributes": { "bootDiskSize": "boot_disk_size", "googleServiceAccount": "google_service_account", + "localSsdCount": "local_ssd_count", "usePreemptibleExecutors": "use_preemptible_executors", "zoneId": "zone_id" }, @@ -4099,6 +4139,10 @@ "servicePrincipalName": "service_principal_name", "userName": "user_name" }, + "databricks:index/getJobJobSettingsSettingsRunJobTask:getJobJobSettingsSettingsRunJobTask": { + "jobId": "job_id", + "jobParameters": "job_parameters" + }, "databricks:index/getJobJobSettingsSettingsSchedule:getJobJobSettingsSettingsSchedule": { "pauseStatus": "pause_status", "quartzCronExpression": "quartz_cron_expression", @@ -4129,6 +4173,7 @@ "pythonWheelTask": "python_wheel_task", "retryOnTimeout": "retry_on_timeout", "runIf": "run_if", + "runJobTask": "run_job_task", "sparkJarTask": "spark_jar_task", "sparkPythonTask": "spark_python_task", "sparkSubmitTask": "spark_submit_task", @@ -4221,6 +4266,7 @@ "databricks:index/getJobJobSettingsSettingsTaskNewClusterGcpAttributes:getJobJobSettingsSettingsTaskNewClusterGcpAttributes": { "bootDiskSize": "boot_disk_size", "googleServiceAccount": "google_service_account", + "localSsdCount": "local_ssd_count", "usePreemptibleExecutors": "use_preemptible_executors", "zoneId": "zone_id" }, @@ -4248,6 +4294,10 @@ "namedParameters": "named_parameters", "packageName": "package_name" }, + "databricks:index/getJobJobSettingsSettingsTaskRunJobTask:getJobJobSettingsSettingsTaskRunJobTask": { + "jobId": "job_id", + "jobParameters": "job_parameters" + }, "databricks:index/getJobJobSettingsSettingsTaskSparkJarTask:getJobJobSettingsSettingsTaskSparkJarTask": { "jarUri": "jar_uri", "mainClassName": "main_class_name" @@ -4487,10 +4537,12 @@ "newCluster": "new_cluster", "notebookTask": "notebook_task", "notificationSettings": "notification_settings", + "parameters": "parameter", "pipelineTask": "pipeline_task", "pythonWheelTask": "python_wheel_task", "retryOnTimeout": "retry_on_timeout", "runAs": "run_as", + "runJobTask": "run_job_task", "sparkJarTask": "spark_jar_task", "sparkPythonTask": "spark_python_task", "sparkSubmitTask": "spark_submit_task", diff --git a/provider/cmd/pulumi-resource-databricks/schema.json b/provider/cmd/pulumi-resource-databricks/schema.json index 2d57d6c2..e205f559 100644 --- a/provider/cmd/pulumi-resource-databricks/schema.json +++ b/provider/cmd/pulumi-resource-databricks/schema.json @@ -344,8 +344,12 @@ "googleServiceAccount": { "type": "string" }, + "localSsdCount": { + "type": "integer" + }, "usePreemptibleExecutors": { - "type": "boolean" + "type": "boolean", + "deprecationMessage": "Please use 'availability' instead." }, "zoneId": { "type": "string" @@ -590,7 +594,7 @@ "properties": { "availability": { "type": "string", - "description": "Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`.\n", + "description": "Availability type used for all nodes. Valid values are `SPOT_AZURE` and `ON_DEMAND_AZURE`.\n", "willReplaceOnChanges": true }, "spotBidMaxPrice": { @@ -634,7 +638,12 @@ "properties": { "gcpAvailability": { "type": "string", + "description": "Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`.\n", "willReplaceOnChanges": true + }, + "localSsdCount": { + "type": "integer", + "description": "Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster.\n" } }, "type": "object" @@ -1256,6 +1265,9 @@ "googleServiceAccount": { "type": "string" }, + "localSsdCount": { + "type": "integer" + }, "usePreemptibleExecutors": { "type": "boolean" }, @@ -1762,6 +1774,9 @@ "googleServiceAccount": { "type": "string" }, + "localSsdCount": { + "type": "integer" + }, "usePreemptibleExecutors": { "type": "boolean" }, @@ -1920,7 +1935,20 @@ }, "noAlertForSkippedRuns": { "type": "boolean", - "description": "(Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block).\n" + "description": "(Bool) don't send alert for skipped runs.\n" + } + }, + "type": "object" + }, + "databricks:index/JobParameter:JobParameter": { + "properties": { + "default": { + "type": "string", + "description": "Default value of the parameter.\n" + }, + "name": { + "type": "string", + "description": "An optional name for the job. The default value is Untitled.\n" } }, "type": "object" @@ -1984,6 +2012,25 @@ }, "type": "object" }, + "databricks:index/JobRunJobTask:JobRunJobTask": { + "properties": { + "jobId": { + "type": "string", + "description": "(String) ID of the job\n" + }, + "jobParameters": { + "type": "object", + "additionalProperties": { + "$ref": "pulumi.json#/Any" + }, + "description": "(Map) Job parameters for the task\n" + } + }, + "type": "object", + "required": [ + "jobId" + ] + }, "databricks:index/JobSchedule:JobSchedule": { "properties": { "pauseStatus": { @@ -2141,7 +2188,11 @@ "description": "(Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout.\n" }, "runIf": { - "type": "string" + "type": "string", + "description": "An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`.\n" + }, + "runJobTask": { + "$ref": "#/types/databricks:index/JobTaskRunJobTask:JobTaskRunJobTask" }, "sparkJarTask": { "$ref": "#/types/databricks:index/JobTaskSparkJarTask:JobTaskSparkJarTask" @@ -2690,6 +2741,9 @@ "googleServiceAccount": { "type": "string" }, + "localSsdCount": { + "type": "integer" + }, "usePreemptibleExecutors": { "type": "boolean" }, @@ -2852,7 +2906,7 @@ }, "noAlertForSkippedRuns": { "type": "boolean", - "description": "(Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block).\n" + "description": "(Bool) don't send alert for skipped runs.\n" } }, "type": "object" @@ -2900,6 +2954,25 @@ }, "type": "object" }, + "databricks:index/JobTaskRunJobTask:JobTaskRunJobTask": { + "properties": { + "jobId": { + "type": "string", + "description": "(String) ID of the job\n" + }, + "jobParameters": { + "type": "object", + "additionalProperties": { + "$ref": "pulumi.json#/Any" + }, + "description": "(Map) Job parameters for the task\n" + } + }, + "type": "object", + "required": [ + "jobId" + ] + }, "databricks:index/JobTaskSparkJarTask:JobTaskSparkJarTask": { "properties": { "jarUri": { @@ -3452,6 +3525,9 @@ "$ref": "pulumi.json#/Any" } }, + "instanceProfileArn": { + "type": "string" + }, "modelName": { "type": "string", "description": "The name of the model in Databricks Model Registry to be served.\n" @@ -4200,6 +4276,9 @@ "googleServiceAccount": { "type": "string" }, + "localSsdCount": { + "type": "integer" + }, "zoneId": { "type": "string" } @@ -5660,6 +5739,9 @@ "googleServiceAccount": { "type": "string" }, + "localSsdCount": { + "type": "integer" + }, "usePreemptibleExecutors": { "type": "boolean" }, @@ -5943,6 +6025,9 @@ "properties": { "gcpAvailability": { "type": "string" + }, + "localSsdCount": { + "type": "integer" } }, "type": "object" @@ -6148,6 +6233,12 @@ "notificationSettings": { "$ref": "#/types/databricks:index/getJobJobSettingsSettingsNotificationSettings:getJobJobSettingsSettingsNotificationSettings" }, + "parameters": { + "type": "array", + "items": { + "$ref": "#/types/databricks:index/getJobJobSettingsSettingsParameter:getJobJobSettingsSettingsParameter" + } + }, "pipelineTask": { "$ref": "#/types/databricks:index/getJobJobSettingsSettingsPipelineTask:getJobJobSettingsSettingsPipelineTask" }, @@ -6163,6 +6254,9 @@ "runAs": { "$ref": "#/types/databricks:index/getJobJobSettingsSettingsRunAs:getJobJobSettingsSettingsRunAs" }, + "runJobTask": { + "$ref": "#/types/databricks:index/getJobJobSettingsSettingsRunJobTask:getJobJobSettingsSettingsRunJobTask" + }, "schedule": { "$ref": "#/types/databricks:index/getJobJobSettingsSettingsSchedule:getJobJobSettingsSettingsSchedule" }, @@ -6697,6 +6791,9 @@ "googleServiceAccount": { "type": "string" }, + "localSsdCount": { + "type": "integer" + }, "usePreemptibleExecutors": { "type": "boolean" }, @@ -7202,6 +7299,9 @@ "googleServiceAccount": { "type": "string" }, + "localSsdCount": { + "type": "integer" + }, "usePreemptibleExecutors": { "type": "boolean" }, @@ -7359,6 +7459,18 @@ }, "type": "object" }, + "databricks:index/getJobJobSettingsSettingsParameter:getJobJobSettingsSettingsParameter": { + "properties": { + "default": { + "type": "string" + }, + "name": { + "type": "string", + "description": "the job name of databricks.Job if the resource was matched by id.\n" + } + }, + "type": "object" + }, "databricks:index/getJobJobSettingsSettingsPipelineTask:getJobJobSettingsSettingsPipelineTask": { "properties": { "fullRefresh": { @@ -7410,6 +7522,23 @@ }, "type": "object" }, + "databricks:index/getJobJobSettingsSettingsRunJobTask:getJobJobSettingsSettingsRunJobTask": { + "properties": { + "jobId": { + "type": "string" + }, + "jobParameters": { + "type": "object", + "additionalProperties": { + "$ref": "pulumi.json#/Any" + } + } + }, + "type": "object", + "required": [ + "jobId" + ] + }, "databricks:index/getJobJobSettingsSettingsSchedule:getJobJobSettingsSettingsSchedule": { "properties": { "pauseStatus": { @@ -7550,6 +7679,9 @@ "runIf": { "type": "string" }, + "runJobTask": { + "$ref": "#/types/databricks:index/getJobJobSettingsSettingsTaskRunJobTask:getJobJobSettingsSettingsTaskRunJobTask" + }, "sparkJarTask": { "$ref": "#/types/databricks:index/getJobJobSettingsSettingsTaskSparkJarTask:getJobJobSettingsSettingsTaskSparkJarTask" }, @@ -8078,6 +8210,9 @@ "googleServiceAccount": { "type": "string" }, + "localSsdCount": { + "type": "integer" + }, "usePreemptibleExecutors": { "type": "boolean" }, @@ -8275,6 +8410,23 @@ }, "type": "object" }, + "databricks:index/getJobJobSettingsSettingsTaskRunJobTask:getJobJobSettingsSettingsTaskRunJobTask": { + "properties": { + "jobId": { + "type": "string" + }, + "jobParameters": { + "type": "object", + "additionalProperties": { + "$ref": "pulumi.json#/Any" + } + } + }, + "type": "object", + "required": [ + "jobId" + ] + }, "databricks:index/getJobJobSettingsSettingsTaskSparkJarTask:getJobJobSettingsSettingsTaskSparkJarTask": { "properties": { "jarUri": { @@ -11231,6 +11383,12 @@ "$ref": "#/types/databricks:index/JobNotificationSettings:JobNotificationSettings", "description": "An optional block controlling the notification settings on the job level (described below).\n" }, + "parameters": { + "type": "array", + "items": { + "$ref": "#/types/databricks:index/JobParameter:JobParameter" + } + }, "pipelineTask": { "$ref": "#/types/databricks:index/JobPipelineTask:JobPipelineTask" }, @@ -11247,6 +11405,9 @@ "runAs": { "$ref": "#/types/databricks:index/JobRunAs:JobRunAs" }, + "runJobTask": { + "$ref": "#/types/databricks:index/JobRunJobTask:JobRunJobTask" + }, "schedule": { "$ref": "#/types/databricks:index/JobSchedule:JobSchedule", "description": "(List) An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. This field is a block and is documented below.\n" @@ -11373,6 +11534,12 @@ "$ref": "#/types/databricks:index/JobNotificationSettings:JobNotificationSettings", "description": "An optional block controlling the notification settings on the job level (described below).\n" }, + "parameters": { + "type": "array", + "items": { + "$ref": "#/types/databricks:index/JobParameter:JobParameter" + } + }, "pipelineTask": { "$ref": "#/types/databricks:index/JobPipelineTask:JobPipelineTask" }, @@ -11389,6 +11556,9 @@ "runAs": { "$ref": "#/types/databricks:index/JobRunAs:JobRunAs" }, + "runJobTask": { + "$ref": "#/types/databricks:index/JobRunJobTask:JobRunJobTask" + }, "schedule": { "$ref": "#/types/databricks:index/JobSchedule:JobSchedule", "description": "(List) An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. This field is a block and is documented below.\n" @@ -11508,6 +11678,12 @@ "$ref": "#/types/databricks:index/JobNotificationSettings:JobNotificationSettings", "description": "An optional block controlling the notification settings on the job level (described below).\n" }, + "parameters": { + "type": "array", + "items": { + "$ref": "#/types/databricks:index/JobParameter:JobParameter" + } + }, "pipelineTask": { "$ref": "#/types/databricks:index/JobPipelineTask:JobPipelineTask" }, @@ -11524,6 +11700,9 @@ "runAs": { "$ref": "#/types/databricks:index/JobRunAs:JobRunAs" }, + "runJobTask": { + "$ref": "#/types/databricks:index/JobRunJobTask:JobRunJobTask" + }, "schedule": { "$ref": "#/types/databricks:index/JobSchedule:JobSchedule", "description": "(List) An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. This field is a block and is documented below.\n" diff --git a/provider/go.mod b/provider/go.mod index 08cec748..bc698705 100644 --- a/provider/go.mod +++ b/provider/go.mod @@ -5,9 +5,9 @@ go 1.20 replace github.com/hashicorp/terraform-plugin-sdk/v2 => github.com/pulumi/terraform-plugin-sdk/v2 v2.0.0-20230710100801-03a71d0fca3d require ( - github.com/databricks/databricks-sdk-go v0.14.1 - github.com/databricks/terraform-provider-databricks v1.22.0 - github.com/pulumi/pulumi-terraform-bridge/v3 v3.55.0 + github.com/databricks/databricks-sdk-go v0.15.0 + github.com/databricks/terraform-provider-databricks v1.23.0 + github.com/pulumi/pulumi-terraform-bridge/v3 v3.56.2 ) require ( @@ -223,11 +223,11 @@ require ( golang.org/x/text v0.11.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/api v0.132.0 // indirect + google.golang.org/api v0.134.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771 // indirect google.golang.org/grpc v1.56.2 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/provider/go.sum b/provider/go.sum index 986d676d..7f95a911 100644 --- a/provider/go.sum +++ b/provider/go.sum @@ -1137,10 +1137,10 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= -github.com/databricks/databricks-sdk-go v0.14.1 h1:s9x18c2i6XbJxem6zKdTrrwEUXQX/Nzn0iVM+qGlRus= -github.com/databricks/databricks-sdk-go v0.14.1/go.mod h1:0iuEtPIoD6oqw7OuFbPskhlEryt2FPH+Ies1UYiiDy8= -github.com/databricks/terraform-provider-databricks v1.22.0 h1:kuP5/PilkG9aMyyuM3QQHmTyQ6PvOk/Y2w+xYXeU2DE= -github.com/databricks/terraform-provider-databricks v1.22.0/go.mod h1:raNj9LLxigx7Z13jZaYGBEB+CXa7BIpccxqyG/hTzSY= +github.com/databricks/databricks-sdk-go v0.15.0 h1:xnJvrEklzSqclFqc+YN4tknfT34dCN6bRdusQDqZOqw= +github.com/databricks/databricks-sdk-go v0.15.0/go.mod h1:Xupq4Bo+/NHWvrMOKQkCvafZYgXgKGANcVLCzMS/OSE= +github.com/databricks/terraform-provider-databricks v1.23.0 h1:8IaphOmfdxVVxHAuSxYP95PSMm/tyB37Oj/W/hSTykM= +github.com/databricks/terraform-provider-databricks v1.23.0/go.mod h1:XhIE+XC41AHPQV8SF+lc6vGPfvar19sWn8pY4+l+nzM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -2310,8 +2310,8 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T github.com/pulumi/pulumi-java/pkg v0.9.4 h1:gIQZmlUI1o9ye8CL2XFqtmAX6Lwr9uj/+HzjboiSmK4= github.com/pulumi/pulumi-java/pkg v0.9.4/go.mod h1:c6rSw/+q4O0IImgJ9axxoC6QesbPYWBaG5gimbHouUQ= github.com/pulumi/pulumi-terraform-bridge/testing v0.0.1 h1:SCg1gjfY9N4yn8U8peIUYATifjoDABkyR7H9lmefsfc= -github.com/pulumi/pulumi-terraform-bridge/v3 v3.55.0 h1:A33Ji/QSCYy2Jk5+1BzA5vFmK7Rvq6XFo8jS69QahVo= -github.com/pulumi/pulumi-terraform-bridge/v3 v3.55.0/go.mod h1:ykaml8e6XS/yI9JOcNZ+6gLirs6EWTB0FmjbT+JyEdU= +github.com/pulumi/pulumi-terraform-bridge/v3 v3.56.2 h1:NY9kPxzquV8rW/YYYlu0o7LLF/NmfUGEY/uZ06h/CMw= +github.com/pulumi/pulumi-terraform-bridge/v3 v3.56.2/go.mod h1:ykaml8e6XS/yI9JOcNZ+6gLirs6EWTB0FmjbT+JyEdU= github.com/pulumi/pulumi-terraform-bridge/x/muxer v0.0.4 h1:rIzMmtcVpPX8ynaz6/nW5AHNY63DiNfCohqmxWvMpM4= github.com/pulumi/pulumi-terraform-bridge/x/muxer v0.0.4/go.mod h1:Kt8RIZWa/N8rW3+0g6NrqCBmF3o+HuIhFaZpssEkG6w= github.com/pulumi/pulumi-yaml v1.1.1 h1:8pyBNIU8+ym0wYpjhsCqN+cutygfK1XbhY2YEeNfyXY= @@ -3380,8 +3380,8 @@ google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/ google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= -google.golang.org/api v0.132.0 h1:8t2/+qZ26kAOGSmOiHwVycqVaDg7q3JDILrNi/Z6rvc= -google.golang.org/api v0.132.0/go.mod h1:AeTBC6GpJnJSRJjktDcPX0QwtS8pGYZOV6MSuSCusw0= +google.golang.org/api v0.134.0 h1:ktL4Goua+UBgoP1eL1/60LwZJqa1sIzkLmvoR3hR6Gw= +google.golang.org/api v0.134.0/go.mod h1:sjRL3UnjTx5UqNQS9EWr9N8p7xbHpy1k0XGRLCf3Spk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -3549,8 +3549,8 @@ google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 h1:Au6te5hbKUV8pIY google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 h1:XVeBY8d/FaK4848myy41HBqnDwvxeV3zMZhwN1TvAMU= google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 h1:XUODHrpzJEUeWmVo/jfNTLj0YyVveOo28oE6vkFbkO4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771 h1:Z8qdAF9GFsmcUuWQ5KVYIpP3PCKydn/YKORnghIalu4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= diff --git a/sdk/dotnet/Inputs/ClusterGcpAttributesArgs.cs b/sdk/dotnet/Inputs/ClusterGcpAttributesArgs.cs index 28e0e1fb..b4488f85 100644 --- a/sdk/dotnet/Inputs/ClusterGcpAttributesArgs.cs +++ b/sdk/dotnet/Inputs/ClusterGcpAttributesArgs.cs @@ -21,6 +21,9 @@ public sealed class ClusterGcpAttributesArgs : global::Pulumi.ResourceArgs [Input("googleServiceAccount")] public Input? GoogleServiceAccount { get; set; } + [Input("localSsdCount")] + public Input? LocalSsdCount { get; set; } + [Input("usePreemptibleExecutors")] public Input? UsePreemptibleExecutors { get; set; } diff --git a/sdk/dotnet/Inputs/ClusterGcpAttributesGetArgs.cs b/sdk/dotnet/Inputs/ClusterGcpAttributesGetArgs.cs index cf7b1e3d..7fd31cf7 100644 --- a/sdk/dotnet/Inputs/ClusterGcpAttributesGetArgs.cs +++ b/sdk/dotnet/Inputs/ClusterGcpAttributesGetArgs.cs @@ -21,6 +21,9 @@ public sealed class ClusterGcpAttributesGetArgs : global::Pulumi.ResourceArgs [Input("googleServiceAccount")] public Input? GoogleServiceAccount { get; set; } + [Input("localSsdCount")] + public Input? LocalSsdCount { get; set; } + [Input("usePreemptibleExecutors")] public Input? UsePreemptibleExecutors { get; set; } diff --git a/sdk/dotnet/Inputs/GetClusterClusterInfoGcpAttributes.cs b/sdk/dotnet/Inputs/GetClusterClusterInfoGcpAttributes.cs index fdb7f706..d93b883a 100644 --- a/sdk/dotnet/Inputs/GetClusterClusterInfoGcpAttributes.cs +++ b/sdk/dotnet/Inputs/GetClusterClusterInfoGcpAttributes.cs @@ -21,6 +21,9 @@ public sealed class GetClusterClusterInfoGcpAttributesArgs : global::Pulumi.Invo [Input("googleServiceAccount")] public string? GoogleServiceAccount { get; set; } + [Input("localSsdCount")] + public int? LocalSsdCount { get; set; } + [Input("usePreemptibleExecutors")] public bool? UsePreemptibleExecutors { get; set; } diff --git a/sdk/dotnet/Inputs/GetClusterClusterInfoGcpAttributesArgs.cs b/sdk/dotnet/Inputs/GetClusterClusterInfoGcpAttributesArgs.cs index 1cec6846..c4e8f782 100644 --- a/sdk/dotnet/Inputs/GetClusterClusterInfoGcpAttributesArgs.cs +++ b/sdk/dotnet/Inputs/GetClusterClusterInfoGcpAttributesArgs.cs @@ -21,6 +21,9 @@ public sealed class GetClusterClusterInfoGcpAttributesInputArgs : global::Pulumi [Input("googleServiceAccount")] public Input? GoogleServiceAccount { get; set; } + [Input("localSsdCount")] + public Input? LocalSsdCount { get; set; } + [Input("usePreemptibleExecutors")] public Input? UsePreemptibleExecutors { get; set; } diff --git a/sdk/dotnet/Inputs/GetInstancePoolPoolInfoGcpAttributes.cs b/sdk/dotnet/Inputs/GetInstancePoolPoolInfoGcpAttributes.cs index 556f5fd8..8022623f 100644 --- a/sdk/dotnet/Inputs/GetInstancePoolPoolInfoGcpAttributes.cs +++ b/sdk/dotnet/Inputs/GetInstancePoolPoolInfoGcpAttributes.cs @@ -15,6 +15,9 @@ public sealed class GetInstancePoolPoolInfoGcpAttributesArgs : global::Pulumi.In [Input("gcpAvailability")] public string? GcpAvailability { get; set; } + [Input("localSsdCount")] + public int? LocalSsdCount { get; set; } + public GetInstancePoolPoolInfoGcpAttributesArgs() { } diff --git a/sdk/dotnet/Inputs/GetInstancePoolPoolInfoGcpAttributesArgs.cs b/sdk/dotnet/Inputs/GetInstancePoolPoolInfoGcpAttributesArgs.cs index 0d73e8b9..ca817ec7 100644 --- a/sdk/dotnet/Inputs/GetInstancePoolPoolInfoGcpAttributesArgs.cs +++ b/sdk/dotnet/Inputs/GetInstancePoolPoolInfoGcpAttributesArgs.cs @@ -15,6 +15,9 @@ public sealed class GetInstancePoolPoolInfoGcpAttributesInputArgs : global::Pulu [Input("gcpAvailability")] public Input? GcpAvailability { get; set; } + [Input("localSsdCount")] + public Input? LocalSsdCount { get; set; } + public GetInstancePoolPoolInfoGcpAttributesInputArgs() { } diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettings.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettings.cs index f7b67f3b..cf74a54d 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettings.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettings.cs @@ -81,6 +81,14 @@ public List Libraries [Input("notificationSettings")] public Inputs.GetJobJobSettingsSettingsNotificationSettingsArgs? NotificationSettings { get; set; } + [Input("parameters")] + private List? _parameters; + public List Parameters + { + get => _parameters ?? (_parameters = new List()); + set => _parameters = value; + } + [Input("pipelineTask")] public Inputs.GetJobJobSettingsSettingsPipelineTaskArgs? PipelineTask { get; set; } @@ -96,6 +104,9 @@ public List Libraries [Input("runAs")] public Inputs.GetJobJobSettingsSettingsRunAsArgs? RunAs { get; set; } + [Input("runJobTask")] + public Inputs.GetJobJobSettingsSettingsRunJobTaskArgs? RunJobTask { get; set; } + [Input("schedule")] public Inputs.GetJobJobSettingsSettingsScheduleArgs? Schedule { get; set; } diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsArgs.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsArgs.cs index 76f25ee8..2897a57c 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsArgs.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsArgs.cs @@ -81,6 +81,14 @@ public InputList Libraries [Input("notificationSettings")] public Input? NotificationSettings { get; set; } + [Input("parameters")] + private InputList? _parameters; + public InputList Parameters + { + get => _parameters ?? (_parameters = new InputList()); + set => _parameters = value; + } + [Input("pipelineTask")] public Input? PipelineTask { get; set; } @@ -96,6 +104,9 @@ public InputList Libraries [Input("runAs")] public Input? RunAs { get; set; } + [Input("runJobTask")] + public Input? RunJobTask { get; set; } + [Input("schedule")] public Input? Schedule { get; set; } diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes.cs index a2143663..72f51874 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes.cs @@ -21,6 +21,9 @@ public sealed class GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesAr [Input("googleServiceAccount")] public string? GoogleServiceAccount { get; set; } + [Input("localSsdCount")] + public int? LocalSsdCount { get; set; } + [Input("usePreemptibleExecutors")] public bool? UsePreemptibleExecutors { get; set; } diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesArgs.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesArgs.cs index 61bfe1a6..fe1bb413 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesArgs.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesArgs.cs @@ -21,6 +21,9 @@ public sealed class GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesIn [Input("googleServiceAccount")] public Input? GoogleServiceAccount { get; set; } + [Input("localSsdCount")] + public Input? LocalSsdCount { get; set; } + [Input("usePreemptibleExecutors")] public Input? UsePreemptibleExecutors { get; set; } diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsNewClusterGcpAttributes.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsNewClusterGcpAttributes.cs index 66207eaa..cf4df0e8 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsNewClusterGcpAttributes.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsNewClusterGcpAttributes.cs @@ -21,6 +21,9 @@ public sealed class GetJobJobSettingsSettingsNewClusterGcpAttributesArgs : globa [Input("googleServiceAccount")] public string? GoogleServiceAccount { get; set; } + [Input("localSsdCount")] + public int? LocalSsdCount { get; set; } + [Input("usePreemptibleExecutors")] public bool? UsePreemptibleExecutors { get; set; } diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsNewClusterGcpAttributesArgs.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsNewClusterGcpAttributesArgs.cs index 45e85f7b..7fe5fcf3 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsNewClusterGcpAttributesArgs.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsNewClusterGcpAttributesArgs.cs @@ -21,6 +21,9 @@ public sealed class GetJobJobSettingsSettingsNewClusterGcpAttributesInputArgs : [Input("googleServiceAccount")] public Input? GoogleServiceAccount { get; set; } + [Input("localSsdCount")] + public Input? LocalSsdCount { get; set; } + [Input("usePreemptibleExecutors")] public Input? UsePreemptibleExecutors { get; set; } diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsParameter.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsParameter.cs new file mode 100644 index 00000000..b50de933 --- /dev/null +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsParameter.cs @@ -0,0 +1,29 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class GetJobJobSettingsSettingsParameterArgs : global::Pulumi.InvokeArgs + { + [Input("default")] + public string? Default { get; set; } + + /// + /// the job name of databricks.Job if the resource was matched by id. + /// + [Input("name")] + public string? Name { get; set; } + + public GetJobJobSettingsSettingsParameterArgs() + { + } + public static new GetJobJobSettingsSettingsParameterArgs Empty => new GetJobJobSettingsSettingsParameterArgs(); + } +} diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsParameterArgs.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsParameterArgs.cs new file mode 100644 index 00000000..bda82c24 --- /dev/null +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsParameterArgs.cs @@ -0,0 +1,29 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class GetJobJobSettingsSettingsParameterInputArgs : global::Pulumi.ResourceArgs + { + [Input("default")] + public Input? Default { get; set; } + + /// + /// the job name of databricks.Job if the resource was matched by id. + /// + [Input("name")] + public Input? Name { get; set; } + + public GetJobJobSettingsSettingsParameterInputArgs() + { + } + public static new GetJobJobSettingsSettingsParameterInputArgs Empty => new GetJobJobSettingsSettingsParameterInputArgs(); + } +} diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsRunJobTask.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsRunJobTask.cs new file mode 100644 index 00000000..5bb1485c --- /dev/null +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsRunJobTask.cs @@ -0,0 +1,31 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class GetJobJobSettingsSettingsRunJobTaskArgs : global::Pulumi.InvokeArgs + { + [Input("jobId", required: true)] + public string JobId { get; set; } = null!; + + [Input("jobParameters")] + private Dictionary? _jobParameters; + public Dictionary JobParameters + { + get => _jobParameters ?? (_jobParameters = new Dictionary()); + set => _jobParameters = value; + } + + public GetJobJobSettingsSettingsRunJobTaskArgs() + { + } + public static new GetJobJobSettingsSettingsRunJobTaskArgs Empty => new GetJobJobSettingsSettingsRunJobTaskArgs(); + } +} diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsRunJobTaskArgs.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsRunJobTaskArgs.cs new file mode 100644 index 00000000..84c9bb4c --- /dev/null +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsRunJobTaskArgs.cs @@ -0,0 +1,31 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class GetJobJobSettingsSettingsRunJobTaskInputArgs : global::Pulumi.ResourceArgs + { + [Input("jobId", required: true)] + public Input JobId { get; set; } = null!; + + [Input("jobParameters")] + private InputMap? _jobParameters; + public InputMap JobParameters + { + get => _jobParameters ?? (_jobParameters = new InputMap()); + set => _jobParameters = value; + } + + public GetJobJobSettingsSettingsRunJobTaskInputArgs() + { + } + public static new GetJobJobSettingsSettingsRunJobTaskInputArgs Empty => new GetJobJobSettingsSettingsRunJobTaskInputArgs(); + } +} diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTask.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTask.cs index 2c1a8b40..93b7c902 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTask.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTask.cs @@ -79,6 +79,9 @@ public List Libraries [Input("runIf")] public string? RunIf { get; set; } + [Input("runJobTask")] + public Inputs.GetJobJobSettingsSettingsTaskRunJobTaskArgs? RunJobTask { get; set; } + [Input("sparkJarTask")] public Inputs.GetJobJobSettingsSettingsTaskSparkJarTaskArgs? SparkJarTask { get; set; } diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskArgs.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskArgs.cs index 1315c860..7dc4745b 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskArgs.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskArgs.cs @@ -79,6 +79,9 @@ public InputList Libraries [Input("runIf")] public Input? RunIf { get; set; } + [Input("runJobTask")] + public Input? RunJobTask { get; set; } + [Input("sparkJarTask")] public Input? SparkJarTask { get; set; } diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributes.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributes.cs index 6f6412ca..8f97c5cf 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributes.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributes.cs @@ -21,6 +21,9 @@ public sealed class GetJobJobSettingsSettingsTaskNewClusterGcpAttributesArgs : g [Input("googleServiceAccount")] public string? GoogleServiceAccount { get; set; } + [Input("localSsdCount")] + public int? LocalSsdCount { get; set; } + [Input("usePreemptibleExecutors")] public bool? UsePreemptibleExecutors { get; set; } diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributesArgs.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributesArgs.cs index 7558de55..a9aafec4 100644 --- a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributesArgs.cs +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributesArgs.cs @@ -21,6 +21,9 @@ public sealed class GetJobJobSettingsSettingsTaskNewClusterGcpAttributesInputArg [Input("googleServiceAccount")] public Input? GoogleServiceAccount { get; set; } + [Input("localSsdCount")] + public Input? LocalSsdCount { get; set; } + [Input("usePreemptibleExecutors")] public Input? UsePreemptibleExecutors { get; set; } diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskRunJobTask.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskRunJobTask.cs new file mode 100644 index 00000000..f3858422 --- /dev/null +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskRunJobTask.cs @@ -0,0 +1,31 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class GetJobJobSettingsSettingsTaskRunJobTaskArgs : global::Pulumi.InvokeArgs + { + [Input("jobId", required: true)] + public string JobId { get; set; } = null!; + + [Input("jobParameters")] + private Dictionary? _jobParameters; + public Dictionary JobParameters + { + get => _jobParameters ?? (_jobParameters = new Dictionary()); + set => _jobParameters = value; + } + + public GetJobJobSettingsSettingsTaskRunJobTaskArgs() + { + } + public static new GetJobJobSettingsSettingsTaskRunJobTaskArgs Empty => new GetJobJobSettingsSettingsTaskRunJobTaskArgs(); + } +} diff --git a/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskRunJobTaskArgs.cs b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskRunJobTaskArgs.cs new file mode 100644 index 00000000..bc01109a --- /dev/null +++ b/sdk/dotnet/Inputs/GetJobJobSettingsSettingsTaskRunJobTaskArgs.cs @@ -0,0 +1,31 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class GetJobJobSettingsSettingsTaskRunJobTaskInputArgs : global::Pulumi.ResourceArgs + { + [Input("jobId", required: true)] + public Input JobId { get; set; } = null!; + + [Input("jobParameters")] + private InputMap? _jobParameters; + public InputMap JobParameters + { + get => _jobParameters ?? (_jobParameters = new InputMap()); + set => _jobParameters = value; + } + + public GetJobJobSettingsSettingsTaskRunJobTaskInputArgs() + { + } + public static new GetJobJobSettingsSettingsTaskRunJobTaskInputArgs Empty => new GetJobJobSettingsSettingsTaskRunJobTaskInputArgs(); + } +} diff --git a/sdk/dotnet/Inputs/InstancePoolAzureAttributesArgs.cs b/sdk/dotnet/Inputs/InstancePoolAzureAttributesArgs.cs index 83f36992..76db8513 100644 --- a/sdk/dotnet/Inputs/InstancePoolAzureAttributesArgs.cs +++ b/sdk/dotnet/Inputs/InstancePoolAzureAttributesArgs.cs @@ -13,7 +13,7 @@ namespace Pulumi.Databricks.Inputs public sealed class InstancePoolAzureAttributesArgs : global::Pulumi.ResourceArgs { /// - /// Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + /// Availability type used for all nodes. Valid values are `SPOT_AZURE` and `ON_DEMAND_AZURE`. /// [Input("availability")] public Input? Availability { get; set; } diff --git a/sdk/dotnet/Inputs/InstancePoolAzureAttributesGetArgs.cs b/sdk/dotnet/Inputs/InstancePoolAzureAttributesGetArgs.cs index c3220979..bd429f94 100644 --- a/sdk/dotnet/Inputs/InstancePoolAzureAttributesGetArgs.cs +++ b/sdk/dotnet/Inputs/InstancePoolAzureAttributesGetArgs.cs @@ -13,7 +13,7 @@ namespace Pulumi.Databricks.Inputs public sealed class InstancePoolAzureAttributesGetArgs : global::Pulumi.ResourceArgs { /// - /// Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + /// Availability type used for all nodes. Valid values are `SPOT_AZURE` and `ON_DEMAND_AZURE`. /// [Input("availability")] public Input? Availability { get; set; } diff --git a/sdk/dotnet/Inputs/InstancePoolGcpAttributesArgs.cs b/sdk/dotnet/Inputs/InstancePoolGcpAttributesArgs.cs index 4fab410a..608daa44 100644 --- a/sdk/dotnet/Inputs/InstancePoolGcpAttributesArgs.cs +++ b/sdk/dotnet/Inputs/InstancePoolGcpAttributesArgs.cs @@ -12,9 +12,18 @@ namespace Pulumi.Databricks.Inputs public sealed class InstancePoolGcpAttributesArgs : global::Pulumi.ResourceArgs { + /// + /// Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + /// [Input("gcpAvailability")] public Input? GcpAvailability { get; set; } + /// + /// Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. + /// + [Input("localSsdCount")] + public Input? LocalSsdCount { get; set; } + public InstancePoolGcpAttributesArgs() { } diff --git a/sdk/dotnet/Inputs/InstancePoolGcpAttributesGetArgs.cs b/sdk/dotnet/Inputs/InstancePoolGcpAttributesGetArgs.cs index c80f2d23..2d56d75a 100644 --- a/sdk/dotnet/Inputs/InstancePoolGcpAttributesGetArgs.cs +++ b/sdk/dotnet/Inputs/InstancePoolGcpAttributesGetArgs.cs @@ -12,9 +12,18 @@ namespace Pulumi.Databricks.Inputs public sealed class InstancePoolGcpAttributesGetArgs : global::Pulumi.ResourceArgs { + /// + /// Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + /// [Input("gcpAvailability")] public Input? GcpAvailability { get; set; } + /// + /// Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. + /// + [Input("localSsdCount")] + public Input? LocalSsdCount { get; set; } + public InstancePoolGcpAttributesGetArgs() { } diff --git a/sdk/dotnet/Inputs/JobJobClusterNewClusterGcpAttributesArgs.cs b/sdk/dotnet/Inputs/JobJobClusterNewClusterGcpAttributesArgs.cs index 48b61cf5..871ce1d3 100644 --- a/sdk/dotnet/Inputs/JobJobClusterNewClusterGcpAttributesArgs.cs +++ b/sdk/dotnet/Inputs/JobJobClusterNewClusterGcpAttributesArgs.cs @@ -21,6 +21,9 @@ public sealed class JobJobClusterNewClusterGcpAttributesArgs : global::Pulumi.Re [Input("googleServiceAccount")] public Input? GoogleServiceAccount { get; set; } + [Input("localSsdCount")] + public Input? LocalSsdCount { get; set; } + [Input("usePreemptibleExecutors")] public Input? UsePreemptibleExecutors { get; set; } diff --git a/sdk/dotnet/Inputs/JobJobClusterNewClusterGcpAttributesGetArgs.cs b/sdk/dotnet/Inputs/JobJobClusterNewClusterGcpAttributesGetArgs.cs index 8f73402f..69603130 100644 --- a/sdk/dotnet/Inputs/JobJobClusterNewClusterGcpAttributesGetArgs.cs +++ b/sdk/dotnet/Inputs/JobJobClusterNewClusterGcpAttributesGetArgs.cs @@ -21,6 +21,9 @@ public sealed class JobJobClusterNewClusterGcpAttributesGetArgs : global::Pulumi [Input("googleServiceAccount")] public Input? GoogleServiceAccount { get; set; } + [Input("localSsdCount")] + public Input? LocalSsdCount { get; set; } + [Input("usePreemptibleExecutors")] public Input? UsePreemptibleExecutors { get; set; } diff --git a/sdk/dotnet/Inputs/JobNewClusterGcpAttributesArgs.cs b/sdk/dotnet/Inputs/JobNewClusterGcpAttributesArgs.cs index dbb64c85..7fc46d44 100644 --- a/sdk/dotnet/Inputs/JobNewClusterGcpAttributesArgs.cs +++ b/sdk/dotnet/Inputs/JobNewClusterGcpAttributesArgs.cs @@ -21,6 +21,9 @@ public sealed class JobNewClusterGcpAttributesArgs : global::Pulumi.ResourceArgs [Input("googleServiceAccount")] public Input? GoogleServiceAccount { get; set; } + [Input("localSsdCount")] + public Input? LocalSsdCount { get; set; } + [Input("usePreemptibleExecutors")] public Input? UsePreemptibleExecutors { get; set; } diff --git a/sdk/dotnet/Inputs/JobNewClusterGcpAttributesGetArgs.cs b/sdk/dotnet/Inputs/JobNewClusterGcpAttributesGetArgs.cs index a2c9809f..3414740c 100644 --- a/sdk/dotnet/Inputs/JobNewClusterGcpAttributesGetArgs.cs +++ b/sdk/dotnet/Inputs/JobNewClusterGcpAttributesGetArgs.cs @@ -21,6 +21,9 @@ public sealed class JobNewClusterGcpAttributesGetArgs : global::Pulumi.ResourceA [Input("googleServiceAccount")] public Input? GoogleServiceAccount { get; set; } + [Input("localSsdCount")] + public Input? LocalSsdCount { get; set; } + [Input("usePreemptibleExecutors")] public Input? UsePreemptibleExecutors { get; set; } diff --git a/sdk/dotnet/Inputs/JobNotificationSettingsArgs.cs b/sdk/dotnet/Inputs/JobNotificationSettingsArgs.cs index b83339d4..ee80abbb 100644 --- a/sdk/dotnet/Inputs/JobNotificationSettingsArgs.cs +++ b/sdk/dotnet/Inputs/JobNotificationSettingsArgs.cs @@ -19,7 +19,7 @@ public sealed class JobNotificationSettingsArgs : global::Pulumi.ResourceArgs public Input? NoAlertForCanceledRuns { get; set; } /// - /// (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + /// (Bool) don't send alert for skipped runs. /// [Input("noAlertForSkippedRuns")] public Input? NoAlertForSkippedRuns { get; set; } diff --git a/sdk/dotnet/Inputs/JobNotificationSettingsGetArgs.cs b/sdk/dotnet/Inputs/JobNotificationSettingsGetArgs.cs index 133f5c85..b3504e36 100644 --- a/sdk/dotnet/Inputs/JobNotificationSettingsGetArgs.cs +++ b/sdk/dotnet/Inputs/JobNotificationSettingsGetArgs.cs @@ -19,7 +19,7 @@ public sealed class JobNotificationSettingsGetArgs : global::Pulumi.ResourceArgs public Input? NoAlertForCanceledRuns { get; set; } /// - /// (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + /// (Bool) don't send alert for skipped runs. /// [Input("noAlertForSkippedRuns")] public Input? NoAlertForSkippedRuns { get; set; } diff --git a/sdk/dotnet/Inputs/JobParameterArgs.cs b/sdk/dotnet/Inputs/JobParameterArgs.cs new file mode 100644 index 00000000..105020e1 --- /dev/null +++ b/sdk/dotnet/Inputs/JobParameterArgs.cs @@ -0,0 +1,32 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class JobParameterArgs : global::Pulumi.ResourceArgs + { + /// + /// Default value of the parameter. + /// + [Input("default")] + public Input? Default { get; set; } + + /// + /// An optional name for the job. The default value is Untitled. + /// + [Input("name")] + public Input? Name { get; set; } + + public JobParameterArgs() + { + } + public static new JobParameterArgs Empty => new JobParameterArgs(); + } +} diff --git a/sdk/dotnet/Inputs/JobParameterGetArgs.cs b/sdk/dotnet/Inputs/JobParameterGetArgs.cs new file mode 100644 index 00000000..9b7bcb28 --- /dev/null +++ b/sdk/dotnet/Inputs/JobParameterGetArgs.cs @@ -0,0 +1,32 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class JobParameterGetArgs : global::Pulumi.ResourceArgs + { + /// + /// Default value of the parameter. + /// + [Input("default")] + public Input? Default { get; set; } + + /// + /// An optional name for the job. The default value is Untitled. + /// + [Input("name")] + public Input? Name { get; set; } + + public JobParameterGetArgs() + { + } + public static new JobParameterGetArgs Empty => new JobParameterGetArgs(); + } +} diff --git a/sdk/dotnet/Inputs/JobRunJobTaskArgs.cs b/sdk/dotnet/Inputs/JobRunJobTaskArgs.cs new file mode 100644 index 00000000..6ea74a8b --- /dev/null +++ b/sdk/dotnet/Inputs/JobRunJobTaskArgs.cs @@ -0,0 +1,38 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class JobRunJobTaskArgs : global::Pulumi.ResourceArgs + { + /// + /// (String) ID of the job + /// + [Input("jobId", required: true)] + public Input JobId { get; set; } = null!; + + [Input("jobParameters")] + private InputMap? _jobParameters; + + /// + /// (Map) Job parameters for the task + /// + public InputMap JobParameters + { + get => _jobParameters ?? (_jobParameters = new InputMap()); + set => _jobParameters = value; + } + + public JobRunJobTaskArgs() + { + } + public static new JobRunJobTaskArgs Empty => new JobRunJobTaskArgs(); + } +} diff --git a/sdk/dotnet/Inputs/JobRunJobTaskGetArgs.cs b/sdk/dotnet/Inputs/JobRunJobTaskGetArgs.cs new file mode 100644 index 00000000..3fd98e39 --- /dev/null +++ b/sdk/dotnet/Inputs/JobRunJobTaskGetArgs.cs @@ -0,0 +1,38 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class JobRunJobTaskGetArgs : global::Pulumi.ResourceArgs + { + /// + /// (String) ID of the job + /// + [Input("jobId", required: true)] + public Input JobId { get; set; } = null!; + + [Input("jobParameters")] + private InputMap? _jobParameters; + + /// + /// (Map) Job parameters for the task + /// + public InputMap JobParameters + { + get => _jobParameters ?? (_jobParameters = new InputMap()); + set => _jobParameters = value; + } + + public JobRunJobTaskGetArgs() + { + } + public static new JobRunJobTaskGetArgs Empty => new JobRunJobTaskGetArgs(); + } +} diff --git a/sdk/dotnet/Inputs/JobTaskArgs.cs b/sdk/dotnet/Inputs/JobTaskArgs.cs index 30cdc55f..114d569f 100644 --- a/sdk/dotnet/Inputs/JobTaskArgs.cs +++ b/sdk/dotnet/Inputs/JobTaskArgs.cs @@ -108,9 +108,15 @@ public InputList Libraries [Input("retryOnTimeout")] public Input? RetryOnTimeout { get; set; } + /// + /// An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. + /// [Input("runIf")] public Input? RunIf { get; set; } + [Input("runJobTask")] + public Input? RunJobTask { get; set; } + [Input("sparkJarTask")] public Input? SparkJarTask { get; set; } diff --git a/sdk/dotnet/Inputs/JobTaskGetArgs.cs b/sdk/dotnet/Inputs/JobTaskGetArgs.cs index 74940809..1143ea7e 100644 --- a/sdk/dotnet/Inputs/JobTaskGetArgs.cs +++ b/sdk/dotnet/Inputs/JobTaskGetArgs.cs @@ -108,9 +108,15 @@ public InputList Libraries [Input("retryOnTimeout")] public Input? RetryOnTimeout { get; set; } + /// + /// An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. + /// [Input("runIf")] public Input? RunIf { get; set; } + [Input("runJobTask")] + public Input? RunJobTask { get; set; } + [Input("sparkJarTask")] public Input? SparkJarTask { get; set; } diff --git a/sdk/dotnet/Inputs/JobTaskNewClusterGcpAttributesArgs.cs b/sdk/dotnet/Inputs/JobTaskNewClusterGcpAttributesArgs.cs index f34e0bd3..3bd5976f 100644 --- a/sdk/dotnet/Inputs/JobTaskNewClusterGcpAttributesArgs.cs +++ b/sdk/dotnet/Inputs/JobTaskNewClusterGcpAttributesArgs.cs @@ -21,6 +21,9 @@ public sealed class JobTaskNewClusterGcpAttributesArgs : global::Pulumi.Resource [Input("googleServiceAccount")] public Input? GoogleServiceAccount { get; set; } + [Input("localSsdCount")] + public Input? LocalSsdCount { get; set; } + [Input("usePreemptibleExecutors")] public Input? UsePreemptibleExecutors { get; set; } diff --git a/sdk/dotnet/Inputs/JobTaskNewClusterGcpAttributesGetArgs.cs b/sdk/dotnet/Inputs/JobTaskNewClusterGcpAttributesGetArgs.cs index 485d584b..80bd0945 100644 --- a/sdk/dotnet/Inputs/JobTaskNewClusterGcpAttributesGetArgs.cs +++ b/sdk/dotnet/Inputs/JobTaskNewClusterGcpAttributesGetArgs.cs @@ -21,6 +21,9 @@ public sealed class JobTaskNewClusterGcpAttributesGetArgs : global::Pulumi.Resou [Input("googleServiceAccount")] public Input? GoogleServiceAccount { get; set; } + [Input("localSsdCount")] + public Input? LocalSsdCount { get; set; } + [Input("usePreemptibleExecutors")] public Input? UsePreemptibleExecutors { get; set; } diff --git a/sdk/dotnet/Inputs/JobTaskNotificationSettingsArgs.cs b/sdk/dotnet/Inputs/JobTaskNotificationSettingsArgs.cs index 54f25687..08e065b0 100644 --- a/sdk/dotnet/Inputs/JobTaskNotificationSettingsArgs.cs +++ b/sdk/dotnet/Inputs/JobTaskNotificationSettingsArgs.cs @@ -25,7 +25,7 @@ public sealed class JobTaskNotificationSettingsArgs : global::Pulumi.ResourceArg public Input? NoAlertForCanceledRuns { get; set; } /// - /// (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + /// (Bool) don't send alert for skipped runs. /// [Input("noAlertForSkippedRuns")] public Input? NoAlertForSkippedRuns { get; set; } diff --git a/sdk/dotnet/Inputs/JobTaskNotificationSettingsGetArgs.cs b/sdk/dotnet/Inputs/JobTaskNotificationSettingsGetArgs.cs index c2ceb3d7..7dacb042 100644 --- a/sdk/dotnet/Inputs/JobTaskNotificationSettingsGetArgs.cs +++ b/sdk/dotnet/Inputs/JobTaskNotificationSettingsGetArgs.cs @@ -25,7 +25,7 @@ public sealed class JobTaskNotificationSettingsGetArgs : global::Pulumi.Resource public Input? NoAlertForCanceledRuns { get; set; } /// - /// (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + /// (Bool) don't send alert for skipped runs. /// [Input("noAlertForSkippedRuns")] public Input? NoAlertForSkippedRuns { get; set; } diff --git a/sdk/dotnet/Inputs/JobTaskRunJobTaskArgs.cs b/sdk/dotnet/Inputs/JobTaskRunJobTaskArgs.cs new file mode 100644 index 00000000..8972ed67 --- /dev/null +++ b/sdk/dotnet/Inputs/JobTaskRunJobTaskArgs.cs @@ -0,0 +1,38 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class JobTaskRunJobTaskArgs : global::Pulumi.ResourceArgs + { + /// + /// (String) ID of the job + /// + [Input("jobId", required: true)] + public Input JobId { get; set; } = null!; + + [Input("jobParameters")] + private InputMap? _jobParameters; + + /// + /// (Map) Job parameters for the task + /// + public InputMap JobParameters + { + get => _jobParameters ?? (_jobParameters = new InputMap()); + set => _jobParameters = value; + } + + public JobTaskRunJobTaskArgs() + { + } + public static new JobTaskRunJobTaskArgs Empty => new JobTaskRunJobTaskArgs(); + } +} diff --git a/sdk/dotnet/Inputs/JobTaskRunJobTaskGetArgs.cs b/sdk/dotnet/Inputs/JobTaskRunJobTaskGetArgs.cs new file mode 100644 index 00000000..4340862a --- /dev/null +++ b/sdk/dotnet/Inputs/JobTaskRunJobTaskGetArgs.cs @@ -0,0 +1,38 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Inputs +{ + + public sealed class JobTaskRunJobTaskGetArgs : global::Pulumi.ResourceArgs + { + /// + /// (String) ID of the job + /// + [Input("jobId", required: true)] + public Input JobId { get; set; } = null!; + + [Input("jobParameters")] + private InputMap? _jobParameters; + + /// + /// (Map) Job parameters for the task + /// + public InputMap JobParameters + { + get => _jobParameters ?? (_jobParameters = new InputMap()); + set => _jobParameters = value; + } + + public JobTaskRunJobTaskGetArgs() + { + } + public static new JobTaskRunJobTaskGetArgs Empty => new JobTaskRunJobTaskGetArgs(); + } +} diff --git a/sdk/dotnet/Inputs/ModelServingConfigServedModelArgs.cs b/sdk/dotnet/Inputs/ModelServingConfigServedModelArgs.cs index 20986367..d4681788 100644 --- a/sdk/dotnet/Inputs/ModelServingConfigServedModelArgs.cs +++ b/sdk/dotnet/Inputs/ModelServingConfigServedModelArgs.cs @@ -20,6 +20,9 @@ public InputMap EnvironmentVars set => _environmentVars = value; } + [Input("instanceProfileArn")] + public Input? InstanceProfileArn { get; set; } + /// /// The name of the model in Databricks Model Registry to be served. /// diff --git a/sdk/dotnet/Inputs/ModelServingConfigServedModelGetArgs.cs b/sdk/dotnet/Inputs/ModelServingConfigServedModelGetArgs.cs index 0dad2235..a17f3f66 100644 --- a/sdk/dotnet/Inputs/ModelServingConfigServedModelGetArgs.cs +++ b/sdk/dotnet/Inputs/ModelServingConfigServedModelGetArgs.cs @@ -20,6 +20,9 @@ public InputMap EnvironmentVars set => _environmentVars = value; } + [Input("instanceProfileArn")] + public Input? InstanceProfileArn { get; set; } + /// /// The name of the model in Databricks Model Registry to be served. /// diff --git a/sdk/dotnet/Inputs/PipelineClusterGcpAttributesArgs.cs b/sdk/dotnet/Inputs/PipelineClusterGcpAttributesArgs.cs index 28921f65..25aff59e 100644 --- a/sdk/dotnet/Inputs/PipelineClusterGcpAttributesArgs.cs +++ b/sdk/dotnet/Inputs/PipelineClusterGcpAttributesArgs.cs @@ -18,6 +18,9 @@ public sealed class PipelineClusterGcpAttributesArgs : global::Pulumi.ResourceAr [Input("googleServiceAccount")] public Input? GoogleServiceAccount { get; set; } + [Input("localSsdCount")] + public Input? LocalSsdCount { get; set; } + [Input("zoneId")] public Input? ZoneId { get; set; } diff --git a/sdk/dotnet/Inputs/PipelineClusterGcpAttributesGetArgs.cs b/sdk/dotnet/Inputs/PipelineClusterGcpAttributesGetArgs.cs index fbf1ca3c..fb129af1 100644 --- a/sdk/dotnet/Inputs/PipelineClusterGcpAttributesGetArgs.cs +++ b/sdk/dotnet/Inputs/PipelineClusterGcpAttributesGetArgs.cs @@ -18,6 +18,9 @@ public sealed class PipelineClusterGcpAttributesGetArgs : global::Pulumi.Resourc [Input("googleServiceAccount")] public Input? GoogleServiceAccount { get; set; } + [Input("localSsdCount")] + public Input? LocalSsdCount { get; set; } + [Input("zoneId")] public Input? ZoneId { get; set; } diff --git a/sdk/dotnet/Job.cs b/sdk/dotnet/Job.cs index c1894c31..85b7ebb5 100644 --- a/sdk/dotnet/Job.cs +++ b/sdk/dotnet/Job.cs @@ -125,6 +125,9 @@ public partial class Job : global::Pulumi.CustomResource [Output("notificationSettings")] public Output NotificationSettings { get; private set; } = null!; + [Output("parameters")] + public Output> Parameters { get; private set; } = null!; + [Output("pipelineTask")] public Output PipelineTask { get; private set; } = null!; @@ -143,6 +146,9 @@ public partial class Job : global::Pulumi.CustomResource [Output("runAs")] public Output RunAs { get; private set; } = null!; + [Output("runJobTask")] + public Output RunJobTask { get; private set; } = null!; + /// /// (List) An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. This field is a block and is documented below. /// @@ -352,6 +358,14 @@ public InputList Libraries [Input("notificationSettings")] public Input? NotificationSettings { get; set; } + [Input("parameters")] + private InputList? _parameters; + public InputList Parameters + { + get => _parameters ?? (_parameters = new InputList()); + set => _parameters = value; + } + [Input("pipelineTask")] public Input? PipelineTask { get; set; } @@ -370,6 +384,9 @@ public InputList Libraries [Input("runAs")] public Input? RunAs { get; set; } + [Input("runJobTask")] + public Input? RunJobTask { get; set; } + /// /// (List) An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. This field is a block and is documented below. /// @@ -545,6 +562,14 @@ public InputList Libraries [Input("notificationSettings")] public Input? NotificationSettings { get; set; } + [Input("parameters")] + private InputList? _parameters; + public InputList Parameters + { + get => _parameters ?? (_parameters = new InputList()); + set => _parameters = value; + } + [Input("pipelineTask")] public Input? PipelineTask { get; set; } @@ -563,6 +588,9 @@ public InputList Libraries [Input("runAs")] public Input? RunAs { get; set; } + [Input("runJobTask")] + public Input? RunJobTask { get; set; } + /// /// (List) An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. This field is a block and is documented below. /// diff --git a/sdk/dotnet/Outputs/ClusterGcpAttributes.cs b/sdk/dotnet/Outputs/ClusterGcpAttributes.cs index fb6bdce6..7294bc98 100644 --- a/sdk/dotnet/Outputs/ClusterGcpAttributes.cs +++ b/sdk/dotnet/Outputs/ClusterGcpAttributes.cs @@ -16,6 +16,7 @@ public sealed class ClusterGcpAttributes public readonly string? Availability; public readonly int? BootDiskSize; public readonly string? GoogleServiceAccount; + public readonly int? LocalSsdCount; public readonly bool? UsePreemptibleExecutors; public readonly string? ZoneId; @@ -27,6 +28,8 @@ private ClusterGcpAttributes( string? googleServiceAccount, + int? localSsdCount, + bool? usePreemptibleExecutors, string? zoneId) @@ -34,6 +37,7 @@ private ClusterGcpAttributes( Availability = availability; BootDiskSize = bootDiskSize; GoogleServiceAccount = googleServiceAccount; + LocalSsdCount = localSsdCount; UsePreemptibleExecutors = usePreemptibleExecutors; ZoneId = zoneId; } diff --git a/sdk/dotnet/Outputs/GetClusterClusterInfoGcpAttributesResult.cs b/sdk/dotnet/Outputs/GetClusterClusterInfoGcpAttributesResult.cs index 03dbc378..47079645 100644 --- a/sdk/dotnet/Outputs/GetClusterClusterInfoGcpAttributesResult.cs +++ b/sdk/dotnet/Outputs/GetClusterClusterInfoGcpAttributesResult.cs @@ -16,6 +16,7 @@ public sealed class GetClusterClusterInfoGcpAttributesResult public readonly string? Availability; public readonly int? BootDiskSize; public readonly string? GoogleServiceAccount; + public readonly int? LocalSsdCount; public readonly bool? UsePreemptibleExecutors; public readonly string? ZoneId; @@ -27,6 +28,8 @@ private GetClusterClusterInfoGcpAttributesResult( string? googleServiceAccount, + int? localSsdCount, + bool? usePreemptibleExecutors, string? zoneId) @@ -34,6 +37,7 @@ private GetClusterClusterInfoGcpAttributesResult( Availability = availability; BootDiskSize = bootDiskSize; GoogleServiceAccount = googleServiceAccount; + LocalSsdCount = localSsdCount; UsePreemptibleExecutors = usePreemptibleExecutors; ZoneId = zoneId; } diff --git a/sdk/dotnet/Outputs/GetInstancePoolPoolInfoGcpAttributesResult.cs b/sdk/dotnet/Outputs/GetInstancePoolPoolInfoGcpAttributesResult.cs index f8c2cd5d..4ebc2648 100644 --- a/sdk/dotnet/Outputs/GetInstancePoolPoolInfoGcpAttributesResult.cs +++ b/sdk/dotnet/Outputs/GetInstancePoolPoolInfoGcpAttributesResult.cs @@ -14,11 +14,16 @@ namespace Pulumi.Databricks.Outputs public sealed class GetInstancePoolPoolInfoGcpAttributesResult { public readonly string? GcpAvailability; + public readonly int? LocalSsdCount; [OutputConstructor] - private GetInstancePoolPoolInfoGcpAttributesResult(string? gcpAvailability) + private GetInstancePoolPoolInfoGcpAttributesResult( + string? gcpAvailability, + + int? localSsdCount) { GcpAvailability = gcpAvailability; + LocalSsdCount = localSsdCount; } } } diff --git a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesResult.cs b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesResult.cs index 2ba72e7e..346b71ff 100644 --- a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesResult.cs +++ b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesResult.cs @@ -16,6 +16,7 @@ public sealed class GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesRe public readonly string? Availability; public readonly int? BootDiskSize; public readonly string? GoogleServiceAccount; + public readonly int? LocalSsdCount; public readonly bool? UsePreemptibleExecutors; public readonly string? ZoneId; @@ -27,6 +28,8 @@ private GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesResult( string? googleServiceAccount, + int? localSsdCount, + bool? usePreemptibleExecutors, string? zoneId) @@ -34,6 +37,7 @@ private GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesResult( Availability = availability; BootDiskSize = bootDiskSize; GoogleServiceAccount = googleServiceAccount; + LocalSsdCount = localSsdCount; UsePreemptibleExecutors = usePreemptibleExecutors; ZoneId = zoneId; } diff --git a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsNewClusterGcpAttributesResult.cs b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsNewClusterGcpAttributesResult.cs index df86181a..b8093c21 100644 --- a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsNewClusterGcpAttributesResult.cs +++ b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsNewClusterGcpAttributesResult.cs @@ -16,6 +16,7 @@ public sealed class GetJobJobSettingsSettingsNewClusterGcpAttributesResult public readonly string? Availability; public readonly int? BootDiskSize; public readonly string? GoogleServiceAccount; + public readonly int? LocalSsdCount; public readonly bool? UsePreemptibleExecutors; public readonly string? ZoneId; @@ -27,6 +28,8 @@ private GetJobJobSettingsSettingsNewClusterGcpAttributesResult( string? googleServiceAccount, + int? localSsdCount, + bool? usePreemptibleExecutors, string? zoneId) @@ -34,6 +37,7 @@ private GetJobJobSettingsSettingsNewClusterGcpAttributesResult( Availability = availability; BootDiskSize = bootDiskSize; GoogleServiceAccount = googleServiceAccount; + LocalSsdCount = localSsdCount; UsePreemptibleExecutors = usePreemptibleExecutors; ZoneId = zoneId; } diff --git a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsParameterResult.cs b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsParameterResult.cs new file mode 100644 index 00000000..5b29e1ef --- /dev/null +++ b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsParameterResult.cs @@ -0,0 +1,32 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Outputs +{ + + [OutputType] + public sealed class GetJobJobSettingsSettingsParameterResult + { + public readonly string? Default; + /// + /// the job name of databricks.Job if the resource was matched by id. + /// + public readonly string? Name; + + [OutputConstructor] + private GetJobJobSettingsSettingsParameterResult( + string? @default, + + string? name) + { + Default = @default; + Name = name; + } + } +} diff --git a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsResult.cs b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsResult.cs index 8669fe40..99dab922 100644 --- a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsResult.cs +++ b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsResult.cs @@ -33,11 +33,13 @@ public sealed class GetJobJobSettingsSettingsResult public readonly Outputs.GetJobJobSettingsSettingsNewClusterResult? NewCluster; public readonly Outputs.GetJobJobSettingsSettingsNotebookTaskResult? NotebookTask; public readonly Outputs.GetJobJobSettingsSettingsNotificationSettingsResult? NotificationSettings; + public readonly ImmutableArray Parameters; public readonly Outputs.GetJobJobSettingsSettingsPipelineTaskResult? PipelineTask; public readonly Outputs.GetJobJobSettingsSettingsPythonWheelTaskResult? PythonWheelTask; public readonly Outputs.GetJobJobSettingsSettingsQueueResult? Queue; public readonly bool? RetryOnTimeout; public readonly Outputs.GetJobJobSettingsSettingsRunAsResult? RunAs; + public readonly Outputs.GetJobJobSettingsSettingsRunJobTaskResult? RunJobTask; public readonly Outputs.GetJobJobSettingsSettingsScheduleResult? Schedule; public readonly Outputs.GetJobJobSettingsSettingsSparkJarTaskResult? SparkJarTask; public readonly Outputs.GetJobJobSettingsSettingsSparkPythonTaskResult? SparkPythonTask; @@ -84,6 +86,8 @@ private GetJobJobSettingsSettingsResult( Outputs.GetJobJobSettingsSettingsNotificationSettingsResult? notificationSettings, + ImmutableArray parameters, + Outputs.GetJobJobSettingsSettingsPipelineTaskResult? pipelineTask, Outputs.GetJobJobSettingsSettingsPythonWheelTaskResult? pythonWheelTask, @@ -94,6 +98,8 @@ private GetJobJobSettingsSettingsResult( Outputs.GetJobJobSettingsSettingsRunAsResult? runAs, + Outputs.GetJobJobSettingsSettingsRunJobTaskResult? runJobTask, + Outputs.GetJobJobSettingsSettingsScheduleResult? schedule, Outputs.GetJobJobSettingsSettingsSparkJarTaskResult? sparkJarTask, @@ -129,11 +135,13 @@ private GetJobJobSettingsSettingsResult( NewCluster = newCluster; NotebookTask = notebookTask; NotificationSettings = notificationSettings; + Parameters = parameters; PipelineTask = pipelineTask; PythonWheelTask = pythonWheelTask; Queue = queue; RetryOnTimeout = retryOnTimeout; RunAs = runAs; + RunJobTask = runJobTask; Schedule = schedule; SparkJarTask = sparkJarTask; SparkPythonTask = sparkPythonTask; diff --git a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsRunJobTaskResult.cs b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsRunJobTaskResult.cs new file mode 100644 index 00000000..dd8a7475 --- /dev/null +++ b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsRunJobTaskResult.cs @@ -0,0 +1,29 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Outputs +{ + + [OutputType] + public sealed class GetJobJobSettingsSettingsRunJobTaskResult + { + public readonly string JobId; + public readonly ImmutableDictionary? JobParameters; + + [OutputConstructor] + private GetJobJobSettingsSettingsRunJobTaskResult( + string jobId, + + ImmutableDictionary? jobParameters) + { + JobId = jobId; + JobParameters = jobParameters; + } + } +} diff --git a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributesResult.cs b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributesResult.cs index 5ba55563..9cd71648 100644 --- a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributesResult.cs +++ b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributesResult.cs @@ -16,6 +16,7 @@ public sealed class GetJobJobSettingsSettingsTaskNewClusterGcpAttributesResult public readonly string? Availability; public readonly int? BootDiskSize; public readonly string? GoogleServiceAccount; + public readonly int? LocalSsdCount; public readonly bool? UsePreemptibleExecutors; public readonly string? ZoneId; @@ -27,6 +28,8 @@ private GetJobJobSettingsSettingsTaskNewClusterGcpAttributesResult( string? googleServiceAccount, + int? localSsdCount, + bool? usePreemptibleExecutors, string? zoneId) @@ -34,6 +37,7 @@ private GetJobJobSettingsSettingsTaskNewClusterGcpAttributesResult( Availability = availability; BootDiskSize = bootDiskSize; GoogleServiceAccount = googleServiceAccount; + LocalSsdCount = localSsdCount; UsePreemptibleExecutors = usePreemptibleExecutors; ZoneId = zoneId; } diff --git a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskResult.cs b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskResult.cs index 51be882d..f2574f8f 100644 --- a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskResult.cs +++ b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskResult.cs @@ -32,6 +32,7 @@ public sealed class GetJobJobSettingsSettingsTaskResult public readonly Outputs.GetJobJobSettingsSettingsTaskPythonWheelTaskResult? PythonWheelTask; public readonly bool RetryOnTimeout; public readonly string? RunIf; + public readonly Outputs.GetJobJobSettingsSettingsTaskRunJobTaskResult? RunJobTask; public readonly Outputs.GetJobJobSettingsSettingsTaskSparkJarTaskResult? SparkJarTask; public readonly Outputs.GetJobJobSettingsSettingsTaskSparkPythonTaskResult? SparkPythonTask; public readonly Outputs.GetJobJobSettingsSettingsTaskSparkSubmitTaskResult? SparkSubmitTask; @@ -79,6 +80,8 @@ private GetJobJobSettingsSettingsTaskResult( string? runIf, + Outputs.GetJobJobSettingsSettingsTaskRunJobTaskResult? runJobTask, + Outputs.GetJobJobSettingsSettingsTaskSparkJarTaskResult? sparkJarTask, Outputs.GetJobJobSettingsSettingsTaskSparkPythonTaskResult? sparkPythonTask, @@ -110,6 +113,7 @@ private GetJobJobSettingsSettingsTaskResult( PythonWheelTask = pythonWheelTask; RetryOnTimeout = retryOnTimeout; RunIf = runIf; + RunJobTask = runJobTask; SparkJarTask = sparkJarTask; SparkPythonTask = sparkPythonTask; SparkSubmitTask = sparkSubmitTask; diff --git a/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskRunJobTaskResult.cs b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskRunJobTaskResult.cs new file mode 100644 index 00000000..433ff661 --- /dev/null +++ b/sdk/dotnet/Outputs/GetJobJobSettingsSettingsTaskRunJobTaskResult.cs @@ -0,0 +1,29 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Outputs +{ + + [OutputType] + public sealed class GetJobJobSettingsSettingsTaskRunJobTaskResult + { + public readonly string JobId; + public readonly ImmutableDictionary? JobParameters; + + [OutputConstructor] + private GetJobJobSettingsSettingsTaskRunJobTaskResult( + string jobId, + + ImmutableDictionary? jobParameters) + { + JobId = jobId; + JobParameters = jobParameters; + } + } +} diff --git a/sdk/dotnet/Outputs/InstancePoolAzureAttributes.cs b/sdk/dotnet/Outputs/InstancePoolAzureAttributes.cs index 43ea0183..88bfe647 100644 --- a/sdk/dotnet/Outputs/InstancePoolAzureAttributes.cs +++ b/sdk/dotnet/Outputs/InstancePoolAzureAttributes.cs @@ -14,7 +14,7 @@ namespace Pulumi.Databricks.Outputs public sealed class InstancePoolAzureAttributes { /// - /// Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + /// Availability type used for all nodes. Valid values are `SPOT_AZURE` and `ON_DEMAND_AZURE`. /// public readonly string? Availability; /// diff --git a/sdk/dotnet/Outputs/InstancePoolGcpAttributes.cs b/sdk/dotnet/Outputs/InstancePoolGcpAttributes.cs index f67dbe7b..bfce1969 100644 --- a/sdk/dotnet/Outputs/InstancePoolGcpAttributes.cs +++ b/sdk/dotnet/Outputs/InstancePoolGcpAttributes.cs @@ -13,12 +13,23 @@ namespace Pulumi.Databricks.Outputs [OutputType] public sealed class InstancePoolGcpAttributes { + /// + /// Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + /// public readonly string? GcpAvailability; + /// + /// Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. + /// + public readonly int? LocalSsdCount; [OutputConstructor] - private InstancePoolGcpAttributes(string? gcpAvailability) + private InstancePoolGcpAttributes( + string? gcpAvailability, + + int? localSsdCount) { GcpAvailability = gcpAvailability; + LocalSsdCount = localSsdCount; } } } diff --git a/sdk/dotnet/Outputs/JobJobClusterNewClusterGcpAttributes.cs b/sdk/dotnet/Outputs/JobJobClusterNewClusterGcpAttributes.cs index b08e23f6..56b514af 100644 --- a/sdk/dotnet/Outputs/JobJobClusterNewClusterGcpAttributes.cs +++ b/sdk/dotnet/Outputs/JobJobClusterNewClusterGcpAttributes.cs @@ -16,6 +16,7 @@ public sealed class JobJobClusterNewClusterGcpAttributes public readonly string? Availability; public readonly int? BootDiskSize; public readonly string? GoogleServiceAccount; + public readonly int? LocalSsdCount; public readonly bool? UsePreemptibleExecutors; public readonly string? ZoneId; @@ -27,6 +28,8 @@ private JobJobClusterNewClusterGcpAttributes( string? googleServiceAccount, + int? localSsdCount, + bool? usePreemptibleExecutors, string? zoneId) @@ -34,6 +37,7 @@ private JobJobClusterNewClusterGcpAttributes( Availability = availability; BootDiskSize = bootDiskSize; GoogleServiceAccount = googleServiceAccount; + LocalSsdCount = localSsdCount; UsePreemptibleExecutors = usePreemptibleExecutors; ZoneId = zoneId; } diff --git a/sdk/dotnet/Outputs/JobNewClusterGcpAttributes.cs b/sdk/dotnet/Outputs/JobNewClusterGcpAttributes.cs index df3be3f3..11fd79e2 100644 --- a/sdk/dotnet/Outputs/JobNewClusterGcpAttributes.cs +++ b/sdk/dotnet/Outputs/JobNewClusterGcpAttributes.cs @@ -16,6 +16,7 @@ public sealed class JobNewClusterGcpAttributes public readonly string? Availability; public readonly int? BootDiskSize; public readonly string? GoogleServiceAccount; + public readonly int? LocalSsdCount; public readonly bool? UsePreemptibleExecutors; public readonly string? ZoneId; @@ -27,6 +28,8 @@ private JobNewClusterGcpAttributes( string? googleServiceAccount, + int? localSsdCount, + bool? usePreemptibleExecutors, string? zoneId) @@ -34,6 +37,7 @@ private JobNewClusterGcpAttributes( Availability = availability; BootDiskSize = bootDiskSize; GoogleServiceAccount = googleServiceAccount; + LocalSsdCount = localSsdCount; UsePreemptibleExecutors = usePreemptibleExecutors; ZoneId = zoneId; } diff --git a/sdk/dotnet/Outputs/JobNotificationSettings.cs b/sdk/dotnet/Outputs/JobNotificationSettings.cs index 79d89fa3..ae8bb58e 100644 --- a/sdk/dotnet/Outputs/JobNotificationSettings.cs +++ b/sdk/dotnet/Outputs/JobNotificationSettings.cs @@ -18,7 +18,7 @@ public sealed class JobNotificationSettings /// public readonly bool? NoAlertForCanceledRuns; /// - /// (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + /// (Bool) don't send alert for skipped runs. /// public readonly bool? NoAlertForSkippedRuns; diff --git a/sdk/dotnet/Outputs/JobParameter.cs b/sdk/dotnet/Outputs/JobParameter.cs new file mode 100644 index 00000000..bc812b80 --- /dev/null +++ b/sdk/dotnet/Outputs/JobParameter.cs @@ -0,0 +1,35 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Outputs +{ + + [OutputType] + public sealed class JobParameter + { + /// + /// Default value of the parameter. + /// + public readonly string? Default; + /// + /// An optional name for the job. The default value is Untitled. + /// + public readonly string? Name; + + [OutputConstructor] + private JobParameter( + string? @default, + + string? name) + { + Default = @default; + Name = name; + } + } +} diff --git a/sdk/dotnet/Outputs/JobRunJobTask.cs b/sdk/dotnet/Outputs/JobRunJobTask.cs new file mode 100644 index 00000000..c75f9aee --- /dev/null +++ b/sdk/dotnet/Outputs/JobRunJobTask.cs @@ -0,0 +1,35 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Outputs +{ + + [OutputType] + public sealed class JobRunJobTask + { + /// + /// (String) ID of the job + /// + public readonly string JobId; + /// + /// (Map) Job parameters for the task + /// + public readonly ImmutableDictionary? JobParameters; + + [OutputConstructor] + private JobRunJobTask( + string jobId, + + ImmutableDictionary? jobParameters) + { + JobId = jobId; + JobParameters = jobParameters; + } + } +} diff --git a/sdk/dotnet/Outputs/JobTask.cs b/sdk/dotnet/Outputs/JobTask.cs index 3d7cbb84..d5955887 100644 --- a/sdk/dotnet/Outputs/JobTask.cs +++ b/sdk/dotnet/Outputs/JobTask.cs @@ -61,7 +61,11 @@ public sealed class JobTask /// (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout. /// public readonly bool? RetryOnTimeout; + /// + /// An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. + /// public readonly string? RunIf; + public readonly Outputs.JobTaskRunJobTask? RunJobTask; public readonly Outputs.JobTaskSparkJarTask? SparkJarTask; public readonly Outputs.JobTaskSparkPythonTask? SparkPythonTask; public readonly Outputs.JobTaskSparkSubmitTask? SparkSubmitTask; @@ -116,6 +120,8 @@ private JobTask( string? runIf, + Outputs.JobTaskRunJobTask? runJobTask, + Outputs.JobTaskSparkJarTask? sparkJarTask, Outputs.JobTaskSparkPythonTask? sparkPythonTask, @@ -147,6 +153,7 @@ private JobTask( PythonWheelTask = pythonWheelTask; RetryOnTimeout = retryOnTimeout; RunIf = runIf; + RunJobTask = runJobTask; SparkJarTask = sparkJarTask; SparkPythonTask = sparkPythonTask; SparkSubmitTask = sparkSubmitTask; diff --git a/sdk/dotnet/Outputs/JobTaskNewClusterGcpAttributes.cs b/sdk/dotnet/Outputs/JobTaskNewClusterGcpAttributes.cs index 111dd231..bff73e33 100644 --- a/sdk/dotnet/Outputs/JobTaskNewClusterGcpAttributes.cs +++ b/sdk/dotnet/Outputs/JobTaskNewClusterGcpAttributes.cs @@ -16,6 +16,7 @@ public sealed class JobTaskNewClusterGcpAttributes public readonly string? Availability; public readonly int? BootDiskSize; public readonly string? GoogleServiceAccount; + public readonly int? LocalSsdCount; public readonly bool? UsePreemptibleExecutors; public readonly string? ZoneId; @@ -27,6 +28,8 @@ private JobTaskNewClusterGcpAttributes( string? googleServiceAccount, + int? localSsdCount, + bool? usePreemptibleExecutors, string? zoneId) @@ -34,6 +37,7 @@ private JobTaskNewClusterGcpAttributes( Availability = availability; BootDiskSize = bootDiskSize; GoogleServiceAccount = googleServiceAccount; + LocalSsdCount = localSsdCount; UsePreemptibleExecutors = usePreemptibleExecutors; ZoneId = zoneId; } diff --git a/sdk/dotnet/Outputs/JobTaskNotificationSettings.cs b/sdk/dotnet/Outputs/JobTaskNotificationSettings.cs index 9c82673b..fe2cf1a7 100644 --- a/sdk/dotnet/Outputs/JobTaskNotificationSettings.cs +++ b/sdk/dotnet/Outputs/JobTaskNotificationSettings.cs @@ -22,7 +22,7 @@ public sealed class JobTaskNotificationSettings /// public readonly bool? NoAlertForCanceledRuns; /// - /// (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + /// (Bool) don't send alert for skipped runs. /// public readonly bool? NoAlertForSkippedRuns; diff --git a/sdk/dotnet/Outputs/JobTaskRunJobTask.cs b/sdk/dotnet/Outputs/JobTaskRunJobTask.cs new file mode 100644 index 00000000..ac74cb9e --- /dev/null +++ b/sdk/dotnet/Outputs/JobTaskRunJobTask.cs @@ -0,0 +1,35 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Databricks.Outputs +{ + + [OutputType] + public sealed class JobTaskRunJobTask + { + /// + /// (String) ID of the job + /// + public readonly string JobId; + /// + /// (Map) Job parameters for the task + /// + public readonly ImmutableDictionary? JobParameters; + + [OutputConstructor] + private JobTaskRunJobTask( + string jobId, + + ImmutableDictionary? jobParameters) + { + JobId = jobId; + JobParameters = jobParameters; + } + } +} diff --git a/sdk/dotnet/Outputs/ModelServingConfigServedModel.cs b/sdk/dotnet/Outputs/ModelServingConfigServedModel.cs index c97c3d97..24d0c170 100644 --- a/sdk/dotnet/Outputs/ModelServingConfigServedModel.cs +++ b/sdk/dotnet/Outputs/ModelServingConfigServedModel.cs @@ -14,6 +14,7 @@ namespace Pulumi.Databricks.Outputs public sealed class ModelServingConfigServedModel { public readonly ImmutableDictionary? EnvironmentVars; + public readonly string? InstanceProfileArn; /// /// The name of the model in Databricks Model Registry to be served. /// @@ -39,6 +40,8 @@ public sealed class ModelServingConfigServedModel private ModelServingConfigServedModel( ImmutableDictionary? environmentVars, + string? instanceProfileArn, + string modelName, string modelVersion, @@ -50,6 +53,7 @@ private ModelServingConfigServedModel( string workloadSize) { EnvironmentVars = environmentVars; + InstanceProfileArn = instanceProfileArn; ModelName = modelName; ModelVersion = modelVersion; Name = name; diff --git a/sdk/dotnet/Outputs/PipelineClusterGcpAttributes.cs b/sdk/dotnet/Outputs/PipelineClusterGcpAttributes.cs index c1d08439..5c4c426e 100644 --- a/sdk/dotnet/Outputs/PipelineClusterGcpAttributes.cs +++ b/sdk/dotnet/Outputs/PipelineClusterGcpAttributes.cs @@ -15,6 +15,7 @@ public sealed class PipelineClusterGcpAttributes { public readonly string? Availability; public readonly string? GoogleServiceAccount; + public readonly int? LocalSsdCount; public readonly string? ZoneId; [OutputConstructor] @@ -23,10 +24,13 @@ private PipelineClusterGcpAttributes( string? googleServiceAccount, + int? localSsdCount, + string? zoneId) { Availability = availability; GoogleServiceAccount = googleServiceAccount; + LocalSsdCount = localSsdCount; ZoneId = zoneId; } } diff --git a/sdk/go/databricks/job.go b/sdk/go/databricks/job.go index 2a57d02b..f2382b86 100644 --- a/sdk/go/databricks/job.go +++ b/sdk/go/databricks/job.go @@ -71,12 +71,14 @@ type Job struct { NotebookTask JobNotebookTaskPtrOutput `pulumi:"notebookTask"` // An optional block controlling the notification settings on the job level (described below). NotificationSettings JobNotificationSettingsPtrOutput `pulumi:"notificationSettings"` + Parameters JobParameterArrayOutput `pulumi:"parameters"` PipelineTask JobPipelineTaskPtrOutput `pulumi:"pipelineTask"` PythonWheelTask JobPythonWheelTaskPtrOutput `pulumi:"pythonWheelTask"` Queue JobQueuePtrOutput `pulumi:"queue"` // (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout. - RetryOnTimeout pulumi.BoolPtrOutput `pulumi:"retryOnTimeout"` - RunAs JobRunAsPtrOutput `pulumi:"runAs"` + RetryOnTimeout pulumi.BoolPtrOutput `pulumi:"retryOnTimeout"` + RunAs JobRunAsPtrOutput `pulumi:"runAs"` + RunJobTask JobRunJobTaskPtrOutput `pulumi:"runJobTask"` // (List) An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. This field is a block and is documented below. Schedule JobSchedulePtrOutput `pulumi:"schedule"` SparkJarTask JobSparkJarTaskPtrOutput `pulumi:"sparkJarTask"` @@ -171,12 +173,14 @@ type jobState struct { NotebookTask *JobNotebookTask `pulumi:"notebookTask"` // An optional block controlling the notification settings on the job level (described below). NotificationSettings *JobNotificationSettings `pulumi:"notificationSettings"` + Parameters []JobParameter `pulumi:"parameters"` PipelineTask *JobPipelineTask `pulumi:"pipelineTask"` PythonWheelTask *JobPythonWheelTask `pulumi:"pythonWheelTask"` Queue *JobQueue `pulumi:"queue"` // (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout. - RetryOnTimeout *bool `pulumi:"retryOnTimeout"` - RunAs *JobRunAs `pulumi:"runAs"` + RetryOnTimeout *bool `pulumi:"retryOnTimeout"` + RunAs *JobRunAs `pulumi:"runAs"` + RunJobTask *JobRunJobTask `pulumi:"runJobTask"` // (List) An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. This field is a block and is documented below. Schedule *JobSchedule `pulumi:"schedule"` SparkJarTask *JobSparkJarTask `pulumi:"sparkJarTask"` @@ -242,12 +246,14 @@ type JobState struct { NotebookTask JobNotebookTaskPtrInput // An optional block controlling the notification settings on the job level (described below). NotificationSettings JobNotificationSettingsPtrInput + Parameters JobParameterArrayInput PipelineTask JobPipelineTaskPtrInput PythonWheelTask JobPythonWheelTaskPtrInput Queue JobQueuePtrInput // (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout. RetryOnTimeout pulumi.BoolPtrInput RunAs JobRunAsPtrInput + RunJobTask JobRunJobTaskPtrInput // (List) An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. This field is a block and is documented below. Schedule JobSchedulePtrInput SparkJarTask JobSparkJarTaskPtrInput @@ -317,12 +323,14 @@ type jobArgs struct { NotebookTask *JobNotebookTask `pulumi:"notebookTask"` // An optional block controlling the notification settings on the job level (described below). NotificationSettings *JobNotificationSettings `pulumi:"notificationSettings"` + Parameters []JobParameter `pulumi:"parameters"` PipelineTask *JobPipelineTask `pulumi:"pipelineTask"` PythonWheelTask *JobPythonWheelTask `pulumi:"pythonWheelTask"` Queue *JobQueue `pulumi:"queue"` // (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout. - RetryOnTimeout *bool `pulumi:"retryOnTimeout"` - RunAs *JobRunAs `pulumi:"runAs"` + RetryOnTimeout *bool `pulumi:"retryOnTimeout"` + RunAs *JobRunAs `pulumi:"runAs"` + RunJobTask *JobRunJobTask `pulumi:"runJobTask"` // (List) An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. This field is a block and is documented below. Schedule *JobSchedule `pulumi:"schedule"` SparkJarTask *JobSparkJarTask `pulumi:"sparkJarTask"` @@ -387,12 +395,14 @@ type JobArgs struct { NotebookTask JobNotebookTaskPtrInput // An optional block controlling the notification settings on the job level (described below). NotificationSettings JobNotificationSettingsPtrInput + Parameters JobParameterArrayInput PipelineTask JobPipelineTaskPtrInput PythonWheelTask JobPythonWheelTaskPtrInput Queue JobQueuePtrInput // (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout. RetryOnTimeout pulumi.BoolPtrInput RunAs JobRunAsPtrInput + RunJobTask JobRunJobTaskPtrInput // (List) An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. This field is a block and is documented below. Schedule JobSchedulePtrInput SparkJarTask JobSparkJarTaskPtrInput @@ -602,6 +612,10 @@ func (o JobOutput) NotificationSettings() JobNotificationSettingsPtrOutput { return o.ApplyT(func(v *Job) JobNotificationSettingsPtrOutput { return v.NotificationSettings }).(JobNotificationSettingsPtrOutput) } +func (o JobOutput) Parameters() JobParameterArrayOutput { + return o.ApplyT(func(v *Job) JobParameterArrayOutput { return v.Parameters }).(JobParameterArrayOutput) +} + func (o JobOutput) PipelineTask() JobPipelineTaskPtrOutput { return o.ApplyT(func(v *Job) JobPipelineTaskPtrOutput { return v.PipelineTask }).(JobPipelineTaskPtrOutput) } @@ -623,6 +637,10 @@ func (o JobOutput) RunAs() JobRunAsPtrOutput { return o.ApplyT(func(v *Job) JobRunAsPtrOutput { return v.RunAs }).(JobRunAsPtrOutput) } +func (o JobOutput) RunJobTask() JobRunJobTaskPtrOutput { + return o.ApplyT(func(v *Job) JobRunJobTaskPtrOutput { return v.RunJobTask }).(JobRunJobTaskPtrOutput) +} + // (List) An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. This field is a block and is documented below. func (o JobOutput) Schedule() JobSchedulePtrOutput { return o.ApplyT(func(v *Job) JobSchedulePtrOutput { return v.Schedule }).(JobSchedulePtrOutput) diff --git a/sdk/go/databricks/pulumiTypes.go b/sdk/go/databricks/pulumiTypes.go index 23bf49b9..93b083e4 100644 --- a/sdk/go/databricks/pulumiTypes.go +++ b/sdk/go/databricks/pulumiTypes.go @@ -1647,9 +1647,11 @@ func (o ClusterDockerImageBasicAuthPtrOutput) Username() pulumi.StringPtrOutput } type ClusterGcpAttributes struct { - Availability *string `pulumi:"availability"` - BootDiskSize *int `pulumi:"bootDiskSize"` - GoogleServiceAccount *string `pulumi:"googleServiceAccount"` + Availability *string `pulumi:"availability"` + BootDiskSize *int `pulumi:"bootDiskSize"` + GoogleServiceAccount *string `pulumi:"googleServiceAccount"` + LocalSsdCount *int `pulumi:"localSsdCount"` + // Deprecated: Please use 'availability' instead. UsePreemptibleExecutors *bool `pulumi:"usePreemptibleExecutors"` ZoneId *string `pulumi:"zoneId"` } @@ -1666,9 +1668,11 @@ type ClusterGcpAttributesInput interface { } type ClusterGcpAttributesArgs struct { - Availability pulumi.StringPtrInput `pulumi:"availability"` - BootDiskSize pulumi.IntPtrInput `pulumi:"bootDiskSize"` - GoogleServiceAccount pulumi.StringPtrInput `pulumi:"googleServiceAccount"` + Availability pulumi.StringPtrInput `pulumi:"availability"` + BootDiskSize pulumi.IntPtrInput `pulumi:"bootDiskSize"` + GoogleServiceAccount pulumi.StringPtrInput `pulumi:"googleServiceAccount"` + LocalSsdCount pulumi.IntPtrInput `pulumi:"localSsdCount"` + // Deprecated: Please use 'availability' instead. UsePreemptibleExecutors pulumi.BoolPtrInput `pulumi:"usePreemptibleExecutors"` ZoneId pulumi.StringPtrInput `pulumi:"zoneId"` } @@ -1762,6 +1766,11 @@ func (o ClusterGcpAttributesOutput) GoogleServiceAccount() pulumi.StringPtrOutpu return o.ApplyT(func(v ClusterGcpAttributes) *string { return v.GoogleServiceAccount }).(pulumi.StringPtrOutput) } +func (o ClusterGcpAttributesOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v ClusterGcpAttributes) *int { return v.LocalSsdCount }).(pulumi.IntPtrOutput) +} + +// Deprecated: Please use 'availability' instead. func (o ClusterGcpAttributesOutput) UsePreemptibleExecutors() pulumi.BoolPtrOutput { return o.ApplyT(func(v ClusterGcpAttributes) *bool { return v.UsePreemptibleExecutors }).(pulumi.BoolPtrOutput) } @@ -1821,6 +1830,16 @@ func (o ClusterGcpAttributesPtrOutput) GoogleServiceAccount() pulumi.StringPtrOu }).(pulumi.StringPtrOutput) } +func (o ClusterGcpAttributesPtrOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v *ClusterGcpAttributes) *int { + if v == nil { + return nil + } + return v.LocalSsdCount + }).(pulumi.IntPtrOutput) +} + +// Deprecated: Please use 'availability' instead. func (o ClusterGcpAttributesPtrOutput) UsePreemptibleExecutors() pulumi.BoolPtrOutput { return o.ApplyT(func(v *ClusterGcpAttributes) *bool { if v == nil { @@ -3991,7 +4010,7 @@ func (o InstancePoolAwsAttributesPtrOutput) ZoneId() pulumi.StringPtrOutput { } type InstancePoolAzureAttributes struct { - // Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + // Availability type used for all nodes. Valid values are `SPOT_AZURE` and `ON_DEMAND_AZURE`. Availability *string `pulumi:"availability"` // The max price for Azure spot instances. Use `-1` to specify the lowest price. SpotBidMaxPrice *float64 `pulumi:"spotBidMaxPrice"` @@ -4009,7 +4028,7 @@ type InstancePoolAzureAttributesInput interface { } type InstancePoolAzureAttributesArgs struct { - // Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + // Availability type used for all nodes. Valid values are `SPOT_AZURE` and `ON_DEMAND_AZURE`. Availability pulumi.StringPtrInput `pulumi:"availability"` // The max price for Azure spot instances. Use `-1` to specify the lowest price. SpotBidMaxPrice pulumi.Float64PtrInput `pulumi:"spotBidMaxPrice"` @@ -4092,7 +4111,7 @@ func (o InstancePoolAzureAttributesOutput) ToInstancePoolAzureAttributesPtrOutpu }).(InstancePoolAzureAttributesPtrOutput) } -// Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. +// Availability type used for all nodes. Valid values are `SPOT_AZURE` and `ON_DEMAND_AZURE`. func (o InstancePoolAzureAttributesOutput) Availability() pulumi.StringPtrOutput { return o.ApplyT(func(v InstancePoolAzureAttributes) *string { return v.Availability }).(pulumi.StringPtrOutput) } @@ -4126,7 +4145,7 @@ func (o InstancePoolAzureAttributesPtrOutput) Elem() InstancePoolAzureAttributes }).(InstancePoolAzureAttributesOutput) } -// Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. +// Availability type used for all nodes. Valid values are `SPOT_AZURE` and `ON_DEMAND_AZURE`. func (o InstancePoolAzureAttributesPtrOutput) Availability() pulumi.StringPtrOutput { return o.ApplyT(func(v *InstancePoolAzureAttributes) *string { if v == nil { @@ -4466,7 +4485,10 @@ func (o InstancePoolDiskSpecDiskTypePtrOutput) EbsVolumeType() pulumi.StringPtrO } type InstancePoolGcpAttributes struct { + // Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. GcpAvailability *string `pulumi:"gcpAvailability"` + // Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. + LocalSsdCount *int `pulumi:"localSsdCount"` } // InstancePoolGcpAttributesInput is an input type that accepts InstancePoolGcpAttributesArgs and InstancePoolGcpAttributesOutput values. @@ -4481,7 +4503,10 @@ type InstancePoolGcpAttributesInput interface { } type InstancePoolGcpAttributesArgs struct { + // Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. GcpAvailability pulumi.StringPtrInput `pulumi:"gcpAvailability"` + // Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. + LocalSsdCount pulumi.IntPtrInput `pulumi:"localSsdCount"` } func (InstancePoolGcpAttributesArgs) ElementType() reflect.Type { @@ -4561,10 +4586,16 @@ func (o InstancePoolGcpAttributesOutput) ToInstancePoolGcpAttributesPtrOutputWit }).(InstancePoolGcpAttributesPtrOutput) } +// Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. func (o InstancePoolGcpAttributesOutput) GcpAvailability() pulumi.StringPtrOutput { return o.ApplyT(func(v InstancePoolGcpAttributes) *string { return v.GcpAvailability }).(pulumi.StringPtrOutput) } +// Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. +func (o InstancePoolGcpAttributesOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v InstancePoolGcpAttributes) *int { return v.LocalSsdCount }).(pulumi.IntPtrOutput) +} + type InstancePoolGcpAttributesPtrOutput struct{ *pulumi.OutputState } func (InstancePoolGcpAttributesPtrOutput) ElementType() reflect.Type { @@ -4589,6 +4620,7 @@ func (o InstancePoolGcpAttributesPtrOutput) Elem() InstancePoolGcpAttributesOutp }).(InstancePoolGcpAttributesOutput) } +// Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. func (o InstancePoolGcpAttributesPtrOutput) GcpAvailability() pulumi.StringPtrOutput { return o.ApplyT(func(v *InstancePoolGcpAttributes) *string { if v == nil { @@ -4598,6 +4630,16 @@ func (o InstancePoolGcpAttributesPtrOutput) GcpAvailability() pulumi.StringPtrOu }).(pulumi.StringPtrOutput) } +// Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. +func (o InstancePoolGcpAttributesPtrOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v *InstancePoolGcpAttributes) *int { + if v == nil { + return nil + } + return v.LocalSsdCount + }).(pulumi.IntPtrOutput) +} + type InstancePoolInstancePoolFleetAttributes struct { FleetOnDemandOption *InstancePoolInstancePoolFleetAttributesFleetOnDemandOption `pulumi:"fleetOnDemandOption"` FleetSpotOption *InstancePoolInstancePoolFleetAttributesFleetSpotOption `pulumi:"fleetSpotOption"` @@ -9100,6 +9142,7 @@ type JobJobClusterNewClusterGcpAttributes struct { Availability *string `pulumi:"availability"` BootDiskSize *int `pulumi:"bootDiskSize"` GoogleServiceAccount *string `pulumi:"googleServiceAccount"` + LocalSsdCount *int `pulumi:"localSsdCount"` UsePreemptibleExecutors *bool `pulumi:"usePreemptibleExecutors"` ZoneId *string `pulumi:"zoneId"` } @@ -9119,6 +9162,7 @@ type JobJobClusterNewClusterGcpAttributesArgs struct { Availability pulumi.StringPtrInput `pulumi:"availability"` BootDiskSize pulumi.IntPtrInput `pulumi:"bootDiskSize"` GoogleServiceAccount pulumi.StringPtrInput `pulumi:"googleServiceAccount"` + LocalSsdCount pulumi.IntPtrInput `pulumi:"localSsdCount"` UsePreemptibleExecutors pulumi.BoolPtrInput `pulumi:"usePreemptibleExecutors"` ZoneId pulumi.StringPtrInput `pulumi:"zoneId"` } @@ -9212,6 +9256,10 @@ func (o JobJobClusterNewClusterGcpAttributesOutput) GoogleServiceAccount() pulum return o.ApplyT(func(v JobJobClusterNewClusterGcpAttributes) *string { return v.GoogleServiceAccount }).(pulumi.StringPtrOutput) } +func (o JobJobClusterNewClusterGcpAttributesOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v JobJobClusterNewClusterGcpAttributes) *int { return v.LocalSsdCount }).(pulumi.IntPtrOutput) +} + func (o JobJobClusterNewClusterGcpAttributesOutput) UsePreemptibleExecutors() pulumi.BoolPtrOutput { return o.ApplyT(func(v JobJobClusterNewClusterGcpAttributes) *bool { return v.UsePreemptibleExecutors }).(pulumi.BoolPtrOutput) } @@ -9271,6 +9319,15 @@ func (o JobJobClusterNewClusterGcpAttributesPtrOutput) GoogleServiceAccount() pu }).(pulumi.StringPtrOutput) } +func (o JobJobClusterNewClusterGcpAttributesPtrOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v *JobJobClusterNewClusterGcpAttributes) *int { + if v == nil { + return nil + } + return v.LocalSsdCount + }).(pulumi.IntPtrOutput) +} + func (o JobJobClusterNewClusterGcpAttributesPtrOutput) UsePreemptibleExecutors() pulumi.BoolPtrOutput { return o.ApplyT(func(v *JobJobClusterNewClusterGcpAttributes) *bool { if v == nil { @@ -13446,6 +13503,7 @@ type JobNewClusterGcpAttributes struct { Availability *string `pulumi:"availability"` BootDiskSize *int `pulumi:"bootDiskSize"` GoogleServiceAccount *string `pulumi:"googleServiceAccount"` + LocalSsdCount *int `pulumi:"localSsdCount"` UsePreemptibleExecutors *bool `pulumi:"usePreemptibleExecutors"` ZoneId *string `pulumi:"zoneId"` } @@ -13465,6 +13523,7 @@ type JobNewClusterGcpAttributesArgs struct { Availability pulumi.StringPtrInput `pulumi:"availability"` BootDiskSize pulumi.IntPtrInput `pulumi:"bootDiskSize"` GoogleServiceAccount pulumi.StringPtrInput `pulumi:"googleServiceAccount"` + LocalSsdCount pulumi.IntPtrInput `pulumi:"localSsdCount"` UsePreemptibleExecutors pulumi.BoolPtrInput `pulumi:"usePreemptibleExecutors"` ZoneId pulumi.StringPtrInput `pulumi:"zoneId"` } @@ -13558,6 +13617,10 @@ func (o JobNewClusterGcpAttributesOutput) GoogleServiceAccount() pulumi.StringPt return o.ApplyT(func(v JobNewClusterGcpAttributes) *string { return v.GoogleServiceAccount }).(pulumi.StringPtrOutput) } +func (o JobNewClusterGcpAttributesOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v JobNewClusterGcpAttributes) *int { return v.LocalSsdCount }).(pulumi.IntPtrOutput) +} + func (o JobNewClusterGcpAttributesOutput) UsePreemptibleExecutors() pulumi.BoolPtrOutput { return o.ApplyT(func(v JobNewClusterGcpAttributes) *bool { return v.UsePreemptibleExecutors }).(pulumi.BoolPtrOutput) } @@ -13617,6 +13680,15 @@ func (o JobNewClusterGcpAttributesPtrOutput) GoogleServiceAccount() pulumi.Strin }).(pulumi.StringPtrOutput) } +func (o JobNewClusterGcpAttributesPtrOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v *JobNewClusterGcpAttributes) *int { + if v == nil { + return nil + } + return v.LocalSsdCount + }).(pulumi.IntPtrOutput) +} + func (o JobNewClusterGcpAttributesPtrOutput) UsePreemptibleExecutors() pulumi.BoolPtrOutput { return o.ApplyT(func(v *JobNewClusterGcpAttributes) *bool { if v == nil { @@ -15295,7 +15367,7 @@ func (o JobNotebookTaskPtrOutput) Source() pulumi.StringPtrOutput { type JobNotificationSettings struct { // (Bool) don't send alert for cancelled runs. NoAlertForCanceledRuns *bool `pulumi:"noAlertForCanceledRuns"` - // (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notificationSettings` configuration block). + // (Bool) don't send alert for skipped runs. NoAlertForSkippedRuns *bool `pulumi:"noAlertForSkippedRuns"` } @@ -15313,7 +15385,7 @@ type JobNotificationSettingsInput interface { type JobNotificationSettingsArgs struct { // (Bool) don't send alert for cancelled runs. NoAlertForCanceledRuns pulumi.BoolPtrInput `pulumi:"noAlertForCanceledRuns"` - // (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notificationSettings` configuration block). + // (Bool) don't send alert for skipped runs. NoAlertForSkippedRuns pulumi.BoolPtrInput `pulumi:"noAlertForSkippedRuns"` } @@ -15399,7 +15471,7 @@ func (o JobNotificationSettingsOutput) NoAlertForCanceledRuns() pulumi.BoolPtrOu return o.ApplyT(func(v JobNotificationSettings) *bool { return v.NoAlertForCanceledRuns }).(pulumi.BoolPtrOutput) } -// (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notificationSettings` configuration block). +// (Bool) don't send alert for skipped runs. func (o JobNotificationSettingsOutput) NoAlertForSkippedRuns() pulumi.BoolPtrOutput { return o.ApplyT(func(v JobNotificationSettings) *bool { return v.NoAlertForSkippedRuns }).(pulumi.BoolPtrOutput) } @@ -15438,7 +15510,7 @@ func (o JobNotificationSettingsPtrOutput) NoAlertForCanceledRuns() pulumi.BoolPt }).(pulumi.BoolPtrOutput) } -// (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notificationSettings` configuration block). +// (Bool) don't send alert for skipped runs. func (o JobNotificationSettingsPtrOutput) NoAlertForSkippedRuns() pulumi.BoolPtrOutput { return o.ApplyT(func(v *JobNotificationSettings) *bool { if v == nil { @@ -15448,6 +15520,112 @@ func (o JobNotificationSettingsPtrOutput) NoAlertForSkippedRuns() pulumi.BoolPtr }).(pulumi.BoolPtrOutput) } +type JobParameter struct { + // Default value of the parameter. + Default *string `pulumi:"default"` + // An optional name for the job. The default value is Untitled. + Name *string `pulumi:"name"` +} + +// JobParameterInput is an input type that accepts JobParameterArgs and JobParameterOutput values. +// You can construct a concrete instance of `JobParameterInput` via: +// +// JobParameterArgs{...} +type JobParameterInput interface { + pulumi.Input + + ToJobParameterOutput() JobParameterOutput + ToJobParameterOutputWithContext(context.Context) JobParameterOutput +} + +type JobParameterArgs struct { + // Default value of the parameter. + Default pulumi.StringPtrInput `pulumi:"default"` + // An optional name for the job. The default value is Untitled. + Name pulumi.StringPtrInput `pulumi:"name"` +} + +func (JobParameterArgs) ElementType() reflect.Type { + return reflect.TypeOf((*JobParameter)(nil)).Elem() +} + +func (i JobParameterArgs) ToJobParameterOutput() JobParameterOutput { + return i.ToJobParameterOutputWithContext(context.Background()) +} + +func (i JobParameterArgs) ToJobParameterOutputWithContext(ctx context.Context) JobParameterOutput { + return pulumi.ToOutputWithContext(ctx, i).(JobParameterOutput) +} + +// JobParameterArrayInput is an input type that accepts JobParameterArray and JobParameterArrayOutput values. +// You can construct a concrete instance of `JobParameterArrayInput` via: +// +// JobParameterArray{ JobParameterArgs{...} } +type JobParameterArrayInput interface { + pulumi.Input + + ToJobParameterArrayOutput() JobParameterArrayOutput + ToJobParameterArrayOutputWithContext(context.Context) JobParameterArrayOutput +} + +type JobParameterArray []JobParameterInput + +func (JobParameterArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]JobParameter)(nil)).Elem() +} + +func (i JobParameterArray) ToJobParameterArrayOutput() JobParameterArrayOutput { + return i.ToJobParameterArrayOutputWithContext(context.Background()) +} + +func (i JobParameterArray) ToJobParameterArrayOutputWithContext(ctx context.Context) JobParameterArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(JobParameterArrayOutput) +} + +type JobParameterOutput struct{ *pulumi.OutputState } + +func (JobParameterOutput) ElementType() reflect.Type { + return reflect.TypeOf((*JobParameter)(nil)).Elem() +} + +func (o JobParameterOutput) ToJobParameterOutput() JobParameterOutput { + return o +} + +func (o JobParameterOutput) ToJobParameterOutputWithContext(ctx context.Context) JobParameterOutput { + return o +} + +// Default value of the parameter. +func (o JobParameterOutput) Default() pulumi.StringPtrOutput { + return o.ApplyT(func(v JobParameter) *string { return v.Default }).(pulumi.StringPtrOutput) +} + +// An optional name for the job. The default value is Untitled. +func (o JobParameterOutput) Name() pulumi.StringPtrOutput { + return o.ApplyT(func(v JobParameter) *string { return v.Name }).(pulumi.StringPtrOutput) +} + +type JobParameterArrayOutput struct{ *pulumi.OutputState } + +func (JobParameterArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]JobParameter)(nil)).Elem() +} + +func (o JobParameterArrayOutput) ToJobParameterArrayOutput() JobParameterArrayOutput { + return o +} + +func (o JobParameterArrayOutput) ToJobParameterArrayOutputWithContext(ctx context.Context) JobParameterArrayOutput { + return o +} + +func (o JobParameterArrayOutput) Index(i pulumi.IntInput) JobParameterOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) JobParameter { + return vs[0].([]JobParameter)[vs[1].(int)] + }).(JobParameterOutput) +} + type JobPipelineTask struct { // (Bool) Specifies if there should be full refresh of the pipeline. // @@ -16186,6 +16364,162 @@ func (o JobRunAsPtrOutput) UserName() pulumi.StringPtrOutput { }).(pulumi.StringPtrOutput) } +type JobRunJobTask struct { + // (String) ID of the job + JobId string `pulumi:"jobId"` + // (Map) Job parameters for the task + JobParameters map[string]interface{} `pulumi:"jobParameters"` +} + +// JobRunJobTaskInput is an input type that accepts JobRunJobTaskArgs and JobRunJobTaskOutput values. +// You can construct a concrete instance of `JobRunJobTaskInput` via: +// +// JobRunJobTaskArgs{...} +type JobRunJobTaskInput interface { + pulumi.Input + + ToJobRunJobTaskOutput() JobRunJobTaskOutput + ToJobRunJobTaskOutputWithContext(context.Context) JobRunJobTaskOutput +} + +type JobRunJobTaskArgs struct { + // (String) ID of the job + JobId pulumi.StringInput `pulumi:"jobId"` + // (Map) Job parameters for the task + JobParameters pulumi.MapInput `pulumi:"jobParameters"` +} + +func (JobRunJobTaskArgs) ElementType() reflect.Type { + return reflect.TypeOf((*JobRunJobTask)(nil)).Elem() +} + +func (i JobRunJobTaskArgs) ToJobRunJobTaskOutput() JobRunJobTaskOutput { + return i.ToJobRunJobTaskOutputWithContext(context.Background()) +} + +func (i JobRunJobTaskArgs) ToJobRunJobTaskOutputWithContext(ctx context.Context) JobRunJobTaskOutput { + return pulumi.ToOutputWithContext(ctx, i).(JobRunJobTaskOutput) +} + +func (i JobRunJobTaskArgs) ToJobRunJobTaskPtrOutput() JobRunJobTaskPtrOutput { + return i.ToJobRunJobTaskPtrOutputWithContext(context.Background()) +} + +func (i JobRunJobTaskArgs) ToJobRunJobTaskPtrOutputWithContext(ctx context.Context) JobRunJobTaskPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(JobRunJobTaskOutput).ToJobRunJobTaskPtrOutputWithContext(ctx) +} + +// JobRunJobTaskPtrInput is an input type that accepts JobRunJobTaskArgs, JobRunJobTaskPtr and JobRunJobTaskPtrOutput values. +// You can construct a concrete instance of `JobRunJobTaskPtrInput` via: +// +// JobRunJobTaskArgs{...} +// +// or: +// +// nil +type JobRunJobTaskPtrInput interface { + pulumi.Input + + ToJobRunJobTaskPtrOutput() JobRunJobTaskPtrOutput + ToJobRunJobTaskPtrOutputWithContext(context.Context) JobRunJobTaskPtrOutput +} + +type jobRunJobTaskPtrType JobRunJobTaskArgs + +func JobRunJobTaskPtr(v *JobRunJobTaskArgs) JobRunJobTaskPtrInput { + return (*jobRunJobTaskPtrType)(v) +} + +func (*jobRunJobTaskPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**JobRunJobTask)(nil)).Elem() +} + +func (i *jobRunJobTaskPtrType) ToJobRunJobTaskPtrOutput() JobRunJobTaskPtrOutput { + return i.ToJobRunJobTaskPtrOutputWithContext(context.Background()) +} + +func (i *jobRunJobTaskPtrType) ToJobRunJobTaskPtrOutputWithContext(ctx context.Context) JobRunJobTaskPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(JobRunJobTaskPtrOutput) +} + +type JobRunJobTaskOutput struct{ *pulumi.OutputState } + +func (JobRunJobTaskOutput) ElementType() reflect.Type { + return reflect.TypeOf((*JobRunJobTask)(nil)).Elem() +} + +func (o JobRunJobTaskOutput) ToJobRunJobTaskOutput() JobRunJobTaskOutput { + return o +} + +func (o JobRunJobTaskOutput) ToJobRunJobTaskOutputWithContext(ctx context.Context) JobRunJobTaskOutput { + return o +} + +func (o JobRunJobTaskOutput) ToJobRunJobTaskPtrOutput() JobRunJobTaskPtrOutput { + return o.ToJobRunJobTaskPtrOutputWithContext(context.Background()) +} + +func (o JobRunJobTaskOutput) ToJobRunJobTaskPtrOutputWithContext(ctx context.Context) JobRunJobTaskPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v JobRunJobTask) *JobRunJobTask { + return &v + }).(JobRunJobTaskPtrOutput) +} + +// (String) ID of the job +func (o JobRunJobTaskOutput) JobId() pulumi.StringOutput { + return o.ApplyT(func(v JobRunJobTask) string { return v.JobId }).(pulumi.StringOutput) +} + +// (Map) Job parameters for the task +func (o JobRunJobTaskOutput) JobParameters() pulumi.MapOutput { + return o.ApplyT(func(v JobRunJobTask) map[string]interface{} { return v.JobParameters }).(pulumi.MapOutput) +} + +type JobRunJobTaskPtrOutput struct{ *pulumi.OutputState } + +func (JobRunJobTaskPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**JobRunJobTask)(nil)).Elem() +} + +func (o JobRunJobTaskPtrOutput) ToJobRunJobTaskPtrOutput() JobRunJobTaskPtrOutput { + return o +} + +func (o JobRunJobTaskPtrOutput) ToJobRunJobTaskPtrOutputWithContext(ctx context.Context) JobRunJobTaskPtrOutput { + return o +} + +func (o JobRunJobTaskPtrOutput) Elem() JobRunJobTaskOutput { + return o.ApplyT(func(v *JobRunJobTask) JobRunJobTask { + if v != nil { + return *v + } + var ret JobRunJobTask + return ret + }).(JobRunJobTaskOutput) +} + +// (String) ID of the job +func (o JobRunJobTaskPtrOutput) JobId() pulumi.StringPtrOutput { + return o.ApplyT(func(v *JobRunJobTask) *string { + if v == nil { + return nil + } + return &v.JobId + }).(pulumi.StringPtrOutput) +} + +// (Map) Job parameters for the task +func (o JobRunJobTaskPtrOutput) JobParameters() pulumi.MapOutput { + return o.ApplyT(func(v *JobRunJobTask) map[string]interface{} { + if v == nil { + return nil + } + return v.JobParameters + }).(pulumi.MapOutput) +} + type JobSchedule struct { // Indicate whether this schedule is paused or not. Either `PAUSED` or `UNPAUSED`. When the `pauseStatus` field is omitted and a schedule is provided, the server will default to using `UNPAUSED` as a value for `pauseStatus`. PauseStatus *string `pulumi:"pauseStatus"` @@ -16872,8 +17206,10 @@ type JobTask struct { PipelineTask *JobTaskPipelineTask `pulumi:"pipelineTask"` PythonWheelTask *JobTaskPythonWheelTask `pulumi:"pythonWheelTask"` // (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout. - RetryOnTimeout *bool `pulumi:"retryOnTimeout"` + RetryOnTimeout *bool `pulumi:"retryOnTimeout"` + // An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. RunIf *string `pulumi:"runIf"` + RunJobTask *JobTaskRunJobTask `pulumi:"runJobTask"` SparkJarTask *JobTaskSparkJarTask `pulumi:"sparkJarTask"` SparkPythonTask *JobTaskSparkPythonTask `pulumi:"sparkPythonTask"` SparkSubmitTask *JobTaskSparkSubmitTask `pulumi:"sparkSubmitTask"` @@ -16924,8 +17260,10 @@ type JobTaskArgs struct { PipelineTask JobTaskPipelineTaskPtrInput `pulumi:"pipelineTask"` PythonWheelTask JobTaskPythonWheelTaskPtrInput `pulumi:"pythonWheelTask"` // (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout. - RetryOnTimeout pulumi.BoolPtrInput `pulumi:"retryOnTimeout"` + RetryOnTimeout pulumi.BoolPtrInput `pulumi:"retryOnTimeout"` + // An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. RunIf pulumi.StringPtrInput `pulumi:"runIf"` + RunJobTask JobTaskRunJobTaskPtrInput `pulumi:"runJobTask"` SparkJarTask JobTaskSparkJarTaskPtrInput `pulumi:"sparkJarTask"` SparkPythonTask JobTaskSparkPythonTaskPtrInput `pulumi:"sparkPythonTask"` SparkSubmitTask JobTaskSparkSubmitTaskPtrInput `pulumi:"sparkSubmitTask"` @@ -17070,10 +17408,15 @@ func (o JobTaskOutput) RetryOnTimeout() pulumi.BoolPtrOutput { return o.ApplyT(func(v JobTask) *bool { return v.RetryOnTimeout }).(pulumi.BoolPtrOutput) } +// An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. func (o JobTaskOutput) RunIf() pulumi.StringPtrOutput { return o.ApplyT(func(v JobTask) *string { return v.RunIf }).(pulumi.StringPtrOutput) } +func (o JobTaskOutput) RunJobTask() JobTaskRunJobTaskPtrOutput { + return o.ApplyT(func(v JobTask) *JobTaskRunJobTask { return v.RunJobTask }).(JobTaskRunJobTaskPtrOutput) +} + func (o JobTaskOutput) SparkJarTask() JobTaskSparkJarTaskPtrOutput { return o.ApplyT(func(v JobTask) *JobTaskSparkJarTask { return v.SparkJarTask }).(JobTaskSparkJarTaskPtrOutput) } @@ -20786,6 +21129,7 @@ type JobTaskNewClusterGcpAttributes struct { Availability *string `pulumi:"availability"` BootDiskSize *int `pulumi:"bootDiskSize"` GoogleServiceAccount *string `pulumi:"googleServiceAccount"` + LocalSsdCount *int `pulumi:"localSsdCount"` UsePreemptibleExecutors *bool `pulumi:"usePreemptibleExecutors"` ZoneId *string `pulumi:"zoneId"` } @@ -20805,6 +21149,7 @@ type JobTaskNewClusterGcpAttributesArgs struct { Availability pulumi.StringPtrInput `pulumi:"availability"` BootDiskSize pulumi.IntPtrInput `pulumi:"bootDiskSize"` GoogleServiceAccount pulumi.StringPtrInput `pulumi:"googleServiceAccount"` + LocalSsdCount pulumi.IntPtrInput `pulumi:"localSsdCount"` UsePreemptibleExecutors pulumi.BoolPtrInput `pulumi:"usePreemptibleExecutors"` ZoneId pulumi.StringPtrInput `pulumi:"zoneId"` } @@ -20898,6 +21243,10 @@ func (o JobTaskNewClusterGcpAttributesOutput) GoogleServiceAccount() pulumi.Stri return o.ApplyT(func(v JobTaskNewClusterGcpAttributes) *string { return v.GoogleServiceAccount }).(pulumi.StringPtrOutput) } +func (o JobTaskNewClusterGcpAttributesOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v JobTaskNewClusterGcpAttributes) *int { return v.LocalSsdCount }).(pulumi.IntPtrOutput) +} + func (o JobTaskNewClusterGcpAttributesOutput) UsePreemptibleExecutors() pulumi.BoolPtrOutput { return o.ApplyT(func(v JobTaskNewClusterGcpAttributes) *bool { return v.UsePreemptibleExecutors }).(pulumi.BoolPtrOutput) } @@ -20957,6 +21306,15 @@ func (o JobTaskNewClusterGcpAttributesPtrOutput) GoogleServiceAccount() pulumi.S }).(pulumi.StringPtrOutput) } +func (o JobTaskNewClusterGcpAttributesPtrOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v *JobTaskNewClusterGcpAttributes) *int { + if v == nil { + return nil + } + return v.LocalSsdCount + }).(pulumi.IntPtrOutput) +} + func (o JobTaskNewClusterGcpAttributesPtrOutput) UsePreemptibleExecutors() pulumi.BoolPtrOutput { return o.ApplyT(func(v *JobTaskNewClusterGcpAttributes) *bool { if v == nil { @@ -22637,7 +22995,7 @@ type JobTaskNotificationSettings struct { AlertOnLastAttempt *bool `pulumi:"alertOnLastAttempt"` // (Bool) don't send alert for cancelled runs. NoAlertForCanceledRuns *bool `pulumi:"noAlertForCanceledRuns"` - // (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notificationSettings` configuration block). + // (Bool) don't send alert for skipped runs. NoAlertForSkippedRuns *bool `pulumi:"noAlertForSkippedRuns"` } @@ -22657,7 +23015,7 @@ type JobTaskNotificationSettingsArgs struct { AlertOnLastAttempt pulumi.BoolPtrInput `pulumi:"alertOnLastAttempt"` // (Bool) don't send alert for cancelled runs. NoAlertForCanceledRuns pulumi.BoolPtrInput `pulumi:"noAlertForCanceledRuns"` - // (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notificationSettings` configuration block). + // (Bool) don't send alert for skipped runs. NoAlertForSkippedRuns pulumi.BoolPtrInput `pulumi:"noAlertForSkippedRuns"` } @@ -22748,7 +23106,7 @@ func (o JobTaskNotificationSettingsOutput) NoAlertForCanceledRuns() pulumi.BoolP return o.ApplyT(func(v JobTaskNotificationSettings) *bool { return v.NoAlertForCanceledRuns }).(pulumi.BoolPtrOutput) } -// (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notificationSettings` configuration block). +// (Bool) don't send alert for skipped runs. func (o JobTaskNotificationSettingsOutput) NoAlertForSkippedRuns() pulumi.BoolPtrOutput { return o.ApplyT(func(v JobTaskNotificationSettings) *bool { return v.NoAlertForSkippedRuns }).(pulumi.BoolPtrOutput) } @@ -22797,7 +23155,7 @@ func (o JobTaskNotificationSettingsPtrOutput) NoAlertForCanceledRuns() pulumi.Bo }).(pulumi.BoolPtrOutput) } -// (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notificationSettings` configuration block). +// (Bool) don't send alert for skipped runs. func (o JobTaskNotificationSettingsPtrOutput) NoAlertForSkippedRuns() pulumi.BoolPtrOutput { return o.ApplyT(func(v *JobTaskNotificationSettings) *bool { if v == nil { @@ -23165,6 +23523,162 @@ func (o JobTaskPythonWheelTaskPtrOutput) Parameters() pulumi.StringArrayOutput { }).(pulumi.StringArrayOutput) } +type JobTaskRunJobTask struct { + // (String) ID of the job + JobId string `pulumi:"jobId"` + // (Map) Job parameters for the task + JobParameters map[string]interface{} `pulumi:"jobParameters"` +} + +// JobTaskRunJobTaskInput is an input type that accepts JobTaskRunJobTaskArgs and JobTaskRunJobTaskOutput values. +// You can construct a concrete instance of `JobTaskRunJobTaskInput` via: +// +// JobTaskRunJobTaskArgs{...} +type JobTaskRunJobTaskInput interface { + pulumi.Input + + ToJobTaskRunJobTaskOutput() JobTaskRunJobTaskOutput + ToJobTaskRunJobTaskOutputWithContext(context.Context) JobTaskRunJobTaskOutput +} + +type JobTaskRunJobTaskArgs struct { + // (String) ID of the job + JobId pulumi.StringInput `pulumi:"jobId"` + // (Map) Job parameters for the task + JobParameters pulumi.MapInput `pulumi:"jobParameters"` +} + +func (JobTaskRunJobTaskArgs) ElementType() reflect.Type { + return reflect.TypeOf((*JobTaskRunJobTask)(nil)).Elem() +} + +func (i JobTaskRunJobTaskArgs) ToJobTaskRunJobTaskOutput() JobTaskRunJobTaskOutput { + return i.ToJobTaskRunJobTaskOutputWithContext(context.Background()) +} + +func (i JobTaskRunJobTaskArgs) ToJobTaskRunJobTaskOutputWithContext(ctx context.Context) JobTaskRunJobTaskOutput { + return pulumi.ToOutputWithContext(ctx, i).(JobTaskRunJobTaskOutput) +} + +func (i JobTaskRunJobTaskArgs) ToJobTaskRunJobTaskPtrOutput() JobTaskRunJobTaskPtrOutput { + return i.ToJobTaskRunJobTaskPtrOutputWithContext(context.Background()) +} + +func (i JobTaskRunJobTaskArgs) ToJobTaskRunJobTaskPtrOutputWithContext(ctx context.Context) JobTaskRunJobTaskPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(JobTaskRunJobTaskOutput).ToJobTaskRunJobTaskPtrOutputWithContext(ctx) +} + +// JobTaskRunJobTaskPtrInput is an input type that accepts JobTaskRunJobTaskArgs, JobTaskRunJobTaskPtr and JobTaskRunJobTaskPtrOutput values. +// You can construct a concrete instance of `JobTaskRunJobTaskPtrInput` via: +// +// JobTaskRunJobTaskArgs{...} +// +// or: +// +// nil +type JobTaskRunJobTaskPtrInput interface { + pulumi.Input + + ToJobTaskRunJobTaskPtrOutput() JobTaskRunJobTaskPtrOutput + ToJobTaskRunJobTaskPtrOutputWithContext(context.Context) JobTaskRunJobTaskPtrOutput +} + +type jobTaskRunJobTaskPtrType JobTaskRunJobTaskArgs + +func JobTaskRunJobTaskPtr(v *JobTaskRunJobTaskArgs) JobTaskRunJobTaskPtrInput { + return (*jobTaskRunJobTaskPtrType)(v) +} + +func (*jobTaskRunJobTaskPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**JobTaskRunJobTask)(nil)).Elem() +} + +func (i *jobTaskRunJobTaskPtrType) ToJobTaskRunJobTaskPtrOutput() JobTaskRunJobTaskPtrOutput { + return i.ToJobTaskRunJobTaskPtrOutputWithContext(context.Background()) +} + +func (i *jobTaskRunJobTaskPtrType) ToJobTaskRunJobTaskPtrOutputWithContext(ctx context.Context) JobTaskRunJobTaskPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(JobTaskRunJobTaskPtrOutput) +} + +type JobTaskRunJobTaskOutput struct{ *pulumi.OutputState } + +func (JobTaskRunJobTaskOutput) ElementType() reflect.Type { + return reflect.TypeOf((*JobTaskRunJobTask)(nil)).Elem() +} + +func (o JobTaskRunJobTaskOutput) ToJobTaskRunJobTaskOutput() JobTaskRunJobTaskOutput { + return o +} + +func (o JobTaskRunJobTaskOutput) ToJobTaskRunJobTaskOutputWithContext(ctx context.Context) JobTaskRunJobTaskOutput { + return o +} + +func (o JobTaskRunJobTaskOutput) ToJobTaskRunJobTaskPtrOutput() JobTaskRunJobTaskPtrOutput { + return o.ToJobTaskRunJobTaskPtrOutputWithContext(context.Background()) +} + +func (o JobTaskRunJobTaskOutput) ToJobTaskRunJobTaskPtrOutputWithContext(ctx context.Context) JobTaskRunJobTaskPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v JobTaskRunJobTask) *JobTaskRunJobTask { + return &v + }).(JobTaskRunJobTaskPtrOutput) +} + +// (String) ID of the job +func (o JobTaskRunJobTaskOutput) JobId() pulumi.StringOutput { + return o.ApplyT(func(v JobTaskRunJobTask) string { return v.JobId }).(pulumi.StringOutput) +} + +// (Map) Job parameters for the task +func (o JobTaskRunJobTaskOutput) JobParameters() pulumi.MapOutput { + return o.ApplyT(func(v JobTaskRunJobTask) map[string]interface{} { return v.JobParameters }).(pulumi.MapOutput) +} + +type JobTaskRunJobTaskPtrOutput struct{ *pulumi.OutputState } + +func (JobTaskRunJobTaskPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**JobTaskRunJobTask)(nil)).Elem() +} + +func (o JobTaskRunJobTaskPtrOutput) ToJobTaskRunJobTaskPtrOutput() JobTaskRunJobTaskPtrOutput { + return o +} + +func (o JobTaskRunJobTaskPtrOutput) ToJobTaskRunJobTaskPtrOutputWithContext(ctx context.Context) JobTaskRunJobTaskPtrOutput { + return o +} + +func (o JobTaskRunJobTaskPtrOutput) Elem() JobTaskRunJobTaskOutput { + return o.ApplyT(func(v *JobTaskRunJobTask) JobTaskRunJobTask { + if v != nil { + return *v + } + var ret JobTaskRunJobTask + return ret + }).(JobTaskRunJobTaskOutput) +} + +// (String) ID of the job +func (o JobTaskRunJobTaskPtrOutput) JobId() pulumi.StringPtrOutput { + return o.ApplyT(func(v *JobTaskRunJobTask) *string { + if v == nil { + return nil + } + return &v.JobId + }).(pulumi.StringPtrOutput) +} + +// (Map) Job parameters for the task +func (o JobTaskRunJobTaskPtrOutput) JobParameters() pulumi.MapOutput { + return o.ApplyT(func(v *JobTaskRunJobTask) map[string]interface{} { + if v == nil { + return nil + } + return v.JobParameters + }).(pulumi.MapOutput) +} + type JobTaskSparkJarTask struct { JarUri *string `pulumi:"jarUri"` // The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library. The code should use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job will fail. @@ -27850,7 +28364,8 @@ func (o ModelServingConfigPtrOutput) TrafficConfig() ModelServingConfigTrafficCo } type ModelServingConfigServedModel struct { - EnvironmentVars map[string]interface{} `pulumi:"environmentVars"` + EnvironmentVars map[string]interface{} `pulumi:"environmentVars"` + InstanceProfileArn *string `pulumi:"instanceProfileArn"` // The name of the model in Databricks Model Registry to be served. ModelName string `pulumi:"modelName"` // The version of the model in Databricks Model Registry to be served. @@ -27875,7 +28390,8 @@ type ModelServingConfigServedModelInput interface { } type ModelServingConfigServedModelArgs struct { - EnvironmentVars pulumi.MapInput `pulumi:"environmentVars"` + EnvironmentVars pulumi.MapInput `pulumi:"environmentVars"` + InstanceProfileArn pulumi.StringPtrInput `pulumi:"instanceProfileArn"` // The name of the model in Databricks Model Registry to be served. ModelName pulumi.StringInput `pulumi:"modelName"` // The version of the model in Databricks Model Registry to be served. @@ -27943,6 +28459,10 @@ func (o ModelServingConfigServedModelOutput) EnvironmentVars() pulumi.MapOutput return o.ApplyT(func(v ModelServingConfigServedModel) map[string]interface{} { return v.EnvironmentVars }).(pulumi.MapOutput) } +func (o ModelServingConfigServedModelOutput) InstanceProfileArn() pulumi.StringPtrOutput { + return o.ApplyT(func(v ModelServingConfigServedModel) *string { return v.InstanceProfileArn }).(pulumi.StringPtrOutput) +} + // The name of the model in Databricks Model Registry to be served. func (o ModelServingConfigServedModelOutput) ModelName() pulumi.StringOutput { return o.ApplyT(func(v ModelServingConfigServedModel) string { return v.ModelName }).(pulumi.StringOutput) @@ -32534,6 +33054,7 @@ func (o PipelineClusterClusterLogConfS3PtrOutput) Region() pulumi.StringPtrOutpu type PipelineClusterGcpAttributes struct { Availability *string `pulumi:"availability"` GoogleServiceAccount *string `pulumi:"googleServiceAccount"` + LocalSsdCount *int `pulumi:"localSsdCount"` ZoneId *string `pulumi:"zoneId"` } @@ -32551,6 +33072,7 @@ type PipelineClusterGcpAttributesInput interface { type PipelineClusterGcpAttributesArgs struct { Availability pulumi.StringPtrInput `pulumi:"availability"` GoogleServiceAccount pulumi.StringPtrInput `pulumi:"googleServiceAccount"` + LocalSsdCount pulumi.IntPtrInput `pulumi:"localSsdCount"` ZoneId pulumi.StringPtrInput `pulumi:"zoneId"` } @@ -32639,6 +33161,10 @@ func (o PipelineClusterGcpAttributesOutput) GoogleServiceAccount() pulumi.String return o.ApplyT(func(v PipelineClusterGcpAttributes) *string { return v.GoogleServiceAccount }).(pulumi.StringPtrOutput) } +func (o PipelineClusterGcpAttributesOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v PipelineClusterGcpAttributes) *int { return v.LocalSsdCount }).(pulumi.IntPtrOutput) +} + func (o PipelineClusterGcpAttributesOutput) ZoneId() pulumi.StringPtrOutput { return o.ApplyT(func(v PipelineClusterGcpAttributes) *string { return v.ZoneId }).(pulumi.StringPtrOutput) } @@ -32685,6 +33211,15 @@ func (o PipelineClusterGcpAttributesPtrOutput) GoogleServiceAccount() pulumi.Str }).(pulumi.StringPtrOutput) } +func (o PipelineClusterGcpAttributesPtrOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v *PipelineClusterGcpAttributes) *int { + if v == nil { + return nil + } + return v.LocalSsdCount + }).(pulumi.IntPtrOutput) +} + func (o PipelineClusterGcpAttributesPtrOutput) ZoneId() pulumi.StringPtrOutput { return o.ApplyT(func(v *PipelineClusterGcpAttributes) *string { if v == nil { @@ -43756,6 +44291,7 @@ type GetClusterClusterInfoGcpAttributes struct { Availability *string `pulumi:"availability"` BootDiskSize *int `pulumi:"bootDiskSize"` GoogleServiceAccount *string `pulumi:"googleServiceAccount"` + LocalSsdCount *int `pulumi:"localSsdCount"` UsePreemptibleExecutors *bool `pulumi:"usePreemptibleExecutors"` ZoneId *string `pulumi:"zoneId"` } @@ -43775,6 +44311,7 @@ type GetClusterClusterInfoGcpAttributesArgs struct { Availability pulumi.StringPtrInput `pulumi:"availability"` BootDiskSize pulumi.IntPtrInput `pulumi:"bootDiskSize"` GoogleServiceAccount pulumi.StringPtrInput `pulumi:"googleServiceAccount"` + LocalSsdCount pulumi.IntPtrInput `pulumi:"localSsdCount"` UsePreemptibleExecutors pulumi.BoolPtrInput `pulumi:"usePreemptibleExecutors"` ZoneId pulumi.StringPtrInput `pulumi:"zoneId"` } @@ -43868,6 +44405,10 @@ func (o GetClusterClusterInfoGcpAttributesOutput) GoogleServiceAccount() pulumi. return o.ApplyT(func(v GetClusterClusterInfoGcpAttributes) *string { return v.GoogleServiceAccount }).(pulumi.StringPtrOutput) } +func (o GetClusterClusterInfoGcpAttributesOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v GetClusterClusterInfoGcpAttributes) *int { return v.LocalSsdCount }).(pulumi.IntPtrOutput) +} + func (o GetClusterClusterInfoGcpAttributesOutput) UsePreemptibleExecutors() pulumi.BoolPtrOutput { return o.ApplyT(func(v GetClusterClusterInfoGcpAttributes) *bool { return v.UsePreemptibleExecutors }).(pulumi.BoolPtrOutput) } @@ -43927,6 +44468,15 @@ func (o GetClusterClusterInfoGcpAttributesPtrOutput) GoogleServiceAccount() pulu }).(pulumi.StringPtrOutput) } +func (o GetClusterClusterInfoGcpAttributesPtrOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v *GetClusterClusterInfoGcpAttributes) *int { + if v == nil { + return nil + } + return v.LocalSsdCount + }).(pulumi.IntPtrOutput) +} + func (o GetClusterClusterInfoGcpAttributesPtrOutput) UsePreemptibleExecutors() pulumi.BoolPtrOutput { return o.ApplyT(func(v *GetClusterClusterInfoGcpAttributes) *bool { if v == nil { @@ -46239,6 +46789,7 @@ func (o GetInstancePoolPoolInfoDiskSpecDiskTypePtrOutput) EbsVolumeType() pulumi type GetInstancePoolPoolInfoGcpAttributes struct { GcpAvailability *string `pulumi:"gcpAvailability"` + LocalSsdCount *int `pulumi:"localSsdCount"` } // GetInstancePoolPoolInfoGcpAttributesInput is an input type that accepts GetInstancePoolPoolInfoGcpAttributesArgs and GetInstancePoolPoolInfoGcpAttributesOutput values. @@ -46254,6 +46805,7 @@ type GetInstancePoolPoolInfoGcpAttributesInput interface { type GetInstancePoolPoolInfoGcpAttributesArgs struct { GcpAvailability pulumi.StringPtrInput `pulumi:"gcpAvailability"` + LocalSsdCount pulumi.IntPtrInput `pulumi:"localSsdCount"` } func (GetInstancePoolPoolInfoGcpAttributesArgs) ElementType() reflect.Type { @@ -46337,6 +46889,10 @@ func (o GetInstancePoolPoolInfoGcpAttributesOutput) GcpAvailability() pulumi.Str return o.ApplyT(func(v GetInstancePoolPoolInfoGcpAttributes) *string { return v.GcpAvailability }).(pulumi.StringPtrOutput) } +func (o GetInstancePoolPoolInfoGcpAttributesOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v GetInstancePoolPoolInfoGcpAttributes) *int { return v.LocalSsdCount }).(pulumi.IntPtrOutput) +} + type GetInstancePoolPoolInfoGcpAttributesPtrOutput struct{ *pulumi.OutputState } func (GetInstancePoolPoolInfoGcpAttributesPtrOutput) ElementType() reflect.Type { @@ -46370,6 +46926,15 @@ func (o GetInstancePoolPoolInfoGcpAttributesPtrOutput) GcpAvailability() pulumi. }).(pulumi.StringPtrOutput) } +func (o GetInstancePoolPoolInfoGcpAttributesPtrOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v *GetInstancePoolPoolInfoGcpAttributes) *int { + if v == nil { + return nil + } + return v.LocalSsdCount + }).(pulumi.IntPtrOutput) +} + type GetInstancePoolPoolInfoInstancePoolFleetAttribute struct { FleetOnDemandOption *GetInstancePoolPoolInfoInstancePoolFleetAttributeFleetOnDemandOption `pulumi:"fleetOnDemandOption"` FleetSpotOption *GetInstancePoolPoolInfoInstancePoolFleetAttributeFleetSpotOption `pulumi:"fleetSpotOption"` @@ -47530,11 +48095,13 @@ type GetJobJobSettingsSettings struct { NewCluster *GetJobJobSettingsSettingsNewCluster `pulumi:"newCluster"` NotebookTask *GetJobJobSettingsSettingsNotebookTask `pulumi:"notebookTask"` NotificationSettings *GetJobJobSettingsSettingsNotificationSettings `pulumi:"notificationSettings"` + Parameters []GetJobJobSettingsSettingsParameter `pulumi:"parameters"` PipelineTask *GetJobJobSettingsSettingsPipelineTask `pulumi:"pipelineTask"` PythonWheelTask *GetJobJobSettingsSettingsPythonWheelTask `pulumi:"pythonWheelTask"` Queue *GetJobJobSettingsSettingsQueue `pulumi:"queue"` RetryOnTimeout *bool `pulumi:"retryOnTimeout"` RunAs *GetJobJobSettingsSettingsRunAs `pulumi:"runAs"` + RunJobTask *GetJobJobSettingsSettingsRunJobTask `pulumi:"runJobTask"` Schedule *GetJobJobSettingsSettingsSchedule `pulumi:"schedule"` SparkJarTask *GetJobJobSettingsSettingsSparkJarTask `pulumi:"sparkJarTask"` SparkPythonTask *GetJobJobSettingsSettingsSparkPythonTask `pulumi:"sparkPythonTask"` @@ -47576,11 +48143,13 @@ type GetJobJobSettingsSettingsArgs struct { NewCluster GetJobJobSettingsSettingsNewClusterPtrInput `pulumi:"newCluster"` NotebookTask GetJobJobSettingsSettingsNotebookTaskPtrInput `pulumi:"notebookTask"` NotificationSettings GetJobJobSettingsSettingsNotificationSettingsPtrInput `pulumi:"notificationSettings"` + Parameters GetJobJobSettingsSettingsParameterArrayInput `pulumi:"parameters"` PipelineTask GetJobJobSettingsSettingsPipelineTaskPtrInput `pulumi:"pipelineTask"` PythonWheelTask GetJobJobSettingsSettingsPythonWheelTaskPtrInput `pulumi:"pythonWheelTask"` Queue GetJobJobSettingsSettingsQueuePtrInput `pulumi:"queue"` RetryOnTimeout pulumi.BoolPtrInput `pulumi:"retryOnTimeout"` RunAs GetJobJobSettingsSettingsRunAsPtrInput `pulumi:"runAs"` + RunJobTask GetJobJobSettingsSettingsRunJobTaskPtrInput `pulumi:"runJobTask"` Schedule GetJobJobSettingsSettingsSchedulePtrInput `pulumi:"schedule"` SparkJarTask GetJobJobSettingsSettingsSparkJarTaskPtrInput `pulumi:"sparkJarTask"` SparkPythonTask GetJobJobSettingsSettingsSparkPythonTaskPtrInput `pulumi:"sparkPythonTask"` @@ -47742,6 +48311,10 @@ func (o GetJobJobSettingsSettingsOutput) NotificationSettings() GetJobJobSetting }).(GetJobJobSettingsSettingsNotificationSettingsPtrOutput) } +func (o GetJobJobSettingsSettingsOutput) Parameters() GetJobJobSettingsSettingsParameterArrayOutput { + return o.ApplyT(func(v GetJobJobSettingsSettings) []GetJobJobSettingsSettingsParameter { return v.Parameters }).(GetJobJobSettingsSettingsParameterArrayOutput) +} + func (o GetJobJobSettingsSettingsOutput) PipelineTask() GetJobJobSettingsSettingsPipelineTaskPtrOutput { return o.ApplyT(func(v GetJobJobSettingsSettings) *GetJobJobSettingsSettingsPipelineTask { return v.PipelineTask }).(GetJobJobSettingsSettingsPipelineTaskPtrOutput) } @@ -47762,6 +48335,10 @@ func (o GetJobJobSettingsSettingsOutput) RunAs() GetJobJobSettingsSettingsRunAsP return o.ApplyT(func(v GetJobJobSettingsSettings) *GetJobJobSettingsSettingsRunAs { return v.RunAs }).(GetJobJobSettingsSettingsRunAsPtrOutput) } +func (o GetJobJobSettingsSettingsOutput) RunJobTask() GetJobJobSettingsSettingsRunJobTaskPtrOutput { + return o.ApplyT(func(v GetJobJobSettingsSettings) *GetJobJobSettingsSettingsRunJobTask { return v.RunJobTask }).(GetJobJobSettingsSettingsRunJobTaskPtrOutput) +} + func (o GetJobJobSettingsSettingsOutput) Schedule() GetJobJobSettingsSettingsSchedulePtrOutput { return o.ApplyT(func(v GetJobJobSettingsSettings) *GetJobJobSettingsSettingsSchedule { return v.Schedule }).(GetJobJobSettingsSettingsSchedulePtrOutput) } @@ -47978,6 +48555,15 @@ func (o GetJobJobSettingsSettingsPtrOutput) NotificationSettings() GetJobJobSett }).(GetJobJobSettingsSettingsNotificationSettingsPtrOutput) } +func (o GetJobJobSettingsSettingsPtrOutput) Parameters() GetJobJobSettingsSettingsParameterArrayOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettings) []GetJobJobSettingsSettingsParameter { + if v == nil { + return nil + } + return v.Parameters + }).(GetJobJobSettingsSettingsParameterArrayOutput) +} + func (o GetJobJobSettingsSettingsPtrOutput) PipelineTask() GetJobJobSettingsSettingsPipelineTaskPtrOutput { return o.ApplyT(func(v *GetJobJobSettingsSettings) *GetJobJobSettingsSettingsPipelineTask { if v == nil { @@ -48023,6 +48609,15 @@ func (o GetJobJobSettingsSettingsPtrOutput) RunAs() GetJobJobSettingsSettingsRun }).(GetJobJobSettingsSettingsRunAsPtrOutput) } +func (o GetJobJobSettingsSettingsPtrOutput) RunJobTask() GetJobJobSettingsSettingsRunJobTaskPtrOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettings) *GetJobJobSettingsSettingsRunJobTask { + if v == nil { + return nil + } + return v.RunJobTask + }).(GetJobJobSettingsSettingsRunJobTaskPtrOutput) +} + func (o GetJobJobSettingsSettingsPtrOutput) Schedule() GetJobJobSettingsSettingsSchedulePtrOutput { return o.ApplyT(func(v *GetJobJobSettingsSettings) *GetJobJobSettingsSettingsSchedule { if v == nil { @@ -51722,6 +52317,7 @@ type GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes struct { Availability *string `pulumi:"availability"` BootDiskSize *int `pulumi:"bootDiskSize"` GoogleServiceAccount *string `pulumi:"googleServiceAccount"` + LocalSsdCount *int `pulumi:"localSsdCount"` UsePreemptibleExecutors *bool `pulumi:"usePreemptibleExecutors"` ZoneId *string `pulumi:"zoneId"` } @@ -51741,6 +52337,7 @@ type GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesArgs struct { Availability pulumi.StringPtrInput `pulumi:"availability"` BootDiskSize pulumi.IntPtrInput `pulumi:"bootDiskSize"` GoogleServiceAccount pulumi.StringPtrInput `pulumi:"googleServiceAccount"` + LocalSsdCount pulumi.IntPtrInput `pulumi:"localSsdCount"` UsePreemptibleExecutors pulumi.BoolPtrInput `pulumi:"usePreemptibleExecutors"` ZoneId pulumi.StringPtrInput `pulumi:"zoneId"` } @@ -51836,6 +52433,10 @@ func (o GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesOutput) Google }).(pulumi.StringPtrOutput) } +func (o GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes) *int { return v.LocalSsdCount }).(pulumi.IntPtrOutput) +} + func (o GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesOutput) UsePreemptibleExecutors() pulumi.BoolPtrOutput { return o.ApplyT(func(v GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes) *bool { return v.UsePreemptibleExecutors @@ -51897,6 +52498,15 @@ func (o GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesPtrOutput) Goo }).(pulumi.StringPtrOutput) } +func (o GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesPtrOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes) *int { + if v == nil { + return nil + } + return v.LocalSsdCount + }).(pulumi.IntPtrOutput) +} + func (o GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesPtrOutput) UsePreemptibleExecutors() pulumi.BoolPtrOutput { return o.ApplyT(func(v *GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes) *bool { if v == nil { @@ -55917,6 +56527,7 @@ type GetJobJobSettingsSettingsNewClusterGcpAttributes struct { Availability *string `pulumi:"availability"` BootDiskSize *int `pulumi:"bootDiskSize"` GoogleServiceAccount *string `pulumi:"googleServiceAccount"` + LocalSsdCount *int `pulumi:"localSsdCount"` UsePreemptibleExecutors *bool `pulumi:"usePreemptibleExecutors"` ZoneId *string `pulumi:"zoneId"` } @@ -55936,6 +56547,7 @@ type GetJobJobSettingsSettingsNewClusterGcpAttributesArgs struct { Availability pulumi.StringPtrInput `pulumi:"availability"` BootDiskSize pulumi.IntPtrInput `pulumi:"bootDiskSize"` GoogleServiceAccount pulumi.StringPtrInput `pulumi:"googleServiceAccount"` + LocalSsdCount pulumi.IntPtrInput `pulumi:"localSsdCount"` UsePreemptibleExecutors pulumi.BoolPtrInput `pulumi:"usePreemptibleExecutors"` ZoneId pulumi.StringPtrInput `pulumi:"zoneId"` } @@ -56029,6 +56641,10 @@ func (o GetJobJobSettingsSettingsNewClusterGcpAttributesOutput) GoogleServiceAcc return o.ApplyT(func(v GetJobJobSettingsSettingsNewClusterGcpAttributes) *string { return v.GoogleServiceAccount }).(pulumi.StringPtrOutput) } +func (o GetJobJobSettingsSettingsNewClusterGcpAttributesOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsNewClusterGcpAttributes) *int { return v.LocalSsdCount }).(pulumi.IntPtrOutput) +} + func (o GetJobJobSettingsSettingsNewClusterGcpAttributesOutput) UsePreemptibleExecutors() pulumi.BoolPtrOutput { return o.ApplyT(func(v GetJobJobSettingsSettingsNewClusterGcpAttributes) *bool { return v.UsePreemptibleExecutors }).(pulumi.BoolPtrOutput) } @@ -56088,6 +56704,15 @@ func (o GetJobJobSettingsSettingsNewClusterGcpAttributesPtrOutput) GoogleService }).(pulumi.StringPtrOutput) } +func (o GetJobJobSettingsSettingsNewClusterGcpAttributesPtrOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsNewClusterGcpAttributes) *int { + if v == nil { + return nil + } + return v.LocalSsdCount + }).(pulumi.IntPtrOutput) +} + func (o GetJobJobSettingsSettingsNewClusterGcpAttributesPtrOutput) UsePreemptibleExecutors() pulumi.BoolPtrOutput { return o.ApplyT(func(v *GetJobJobSettingsSettingsNewClusterGcpAttributes) *bool { if v == nil { @@ -57724,6 +58349,109 @@ func (o GetJobJobSettingsSettingsNotificationSettingsPtrOutput) NoAlertForSkippe }).(pulumi.BoolPtrOutput) } +type GetJobJobSettingsSettingsParameter struct { + Default *string `pulumi:"default"` + // the job name of Job if the resource was matched by id. + Name *string `pulumi:"name"` +} + +// GetJobJobSettingsSettingsParameterInput is an input type that accepts GetJobJobSettingsSettingsParameterArgs and GetJobJobSettingsSettingsParameterOutput values. +// You can construct a concrete instance of `GetJobJobSettingsSettingsParameterInput` via: +// +// GetJobJobSettingsSettingsParameterArgs{...} +type GetJobJobSettingsSettingsParameterInput interface { + pulumi.Input + + ToGetJobJobSettingsSettingsParameterOutput() GetJobJobSettingsSettingsParameterOutput + ToGetJobJobSettingsSettingsParameterOutputWithContext(context.Context) GetJobJobSettingsSettingsParameterOutput +} + +type GetJobJobSettingsSettingsParameterArgs struct { + Default pulumi.StringPtrInput `pulumi:"default"` + // the job name of Job if the resource was matched by id. + Name pulumi.StringPtrInput `pulumi:"name"` +} + +func (GetJobJobSettingsSettingsParameterArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetJobJobSettingsSettingsParameter)(nil)).Elem() +} + +func (i GetJobJobSettingsSettingsParameterArgs) ToGetJobJobSettingsSettingsParameterOutput() GetJobJobSettingsSettingsParameterOutput { + return i.ToGetJobJobSettingsSettingsParameterOutputWithContext(context.Background()) +} + +func (i GetJobJobSettingsSettingsParameterArgs) ToGetJobJobSettingsSettingsParameterOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsParameterOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsParameterOutput) +} + +// GetJobJobSettingsSettingsParameterArrayInput is an input type that accepts GetJobJobSettingsSettingsParameterArray and GetJobJobSettingsSettingsParameterArrayOutput values. +// You can construct a concrete instance of `GetJobJobSettingsSettingsParameterArrayInput` via: +// +// GetJobJobSettingsSettingsParameterArray{ GetJobJobSettingsSettingsParameterArgs{...} } +type GetJobJobSettingsSettingsParameterArrayInput interface { + pulumi.Input + + ToGetJobJobSettingsSettingsParameterArrayOutput() GetJobJobSettingsSettingsParameterArrayOutput + ToGetJobJobSettingsSettingsParameterArrayOutputWithContext(context.Context) GetJobJobSettingsSettingsParameterArrayOutput +} + +type GetJobJobSettingsSettingsParameterArray []GetJobJobSettingsSettingsParameterInput + +func (GetJobJobSettingsSettingsParameterArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetJobJobSettingsSettingsParameter)(nil)).Elem() +} + +func (i GetJobJobSettingsSettingsParameterArray) ToGetJobJobSettingsSettingsParameterArrayOutput() GetJobJobSettingsSettingsParameterArrayOutput { + return i.ToGetJobJobSettingsSettingsParameterArrayOutputWithContext(context.Background()) +} + +func (i GetJobJobSettingsSettingsParameterArray) ToGetJobJobSettingsSettingsParameterArrayOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsParameterArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsParameterArrayOutput) +} + +type GetJobJobSettingsSettingsParameterOutput struct{ *pulumi.OutputState } + +func (GetJobJobSettingsSettingsParameterOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetJobJobSettingsSettingsParameter)(nil)).Elem() +} + +func (o GetJobJobSettingsSettingsParameterOutput) ToGetJobJobSettingsSettingsParameterOutput() GetJobJobSettingsSettingsParameterOutput { + return o +} + +func (o GetJobJobSettingsSettingsParameterOutput) ToGetJobJobSettingsSettingsParameterOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsParameterOutput { + return o +} + +func (o GetJobJobSettingsSettingsParameterOutput) Default() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsParameter) *string { return v.Default }).(pulumi.StringPtrOutput) +} + +// the job name of Job if the resource was matched by id. +func (o GetJobJobSettingsSettingsParameterOutput) Name() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsParameter) *string { return v.Name }).(pulumi.StringPtrOutput) +} + +type GetJobJobSettingsSettingsParameterArrayOutput struct{ *pulumi.OutputState } + +func (GetJobJobSettingsSettingsParameterArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetJobJobSettingsSettingsParameter)(nil)).Elem() +} + +func (o GetJobJobSettingsSettingsParameterArrayOutput) ToGetJobJobSettingsSettingsParameterArrayOutput() GetJobJobSettingsSettingsParameterArrayOutput { + return o +} + +func (o GetJobJobSettingsSettingsParameterArrayOutput) ToGetJobJobSettingsSettingsParameterArrayOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsParameterArrayOutput { + return o +} + +func (o GetJobJobSettingsSettingsParameterArrayOutput) Index(i pulumi.IntInput) GetJobJobSettingsSettingsParameterOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetJobJobSettingsSettingsParameter { + return vs[0].([]GetJobJobSettingsSettingsParameter)[vs[1].(int)] + }).(GetJobJobSettingsSettingsParameterOutput) +} + type GetJobJobSettingsSettingsPipelineTask struct { FullRefresh *bool `pulumi:"fullRefresh"` PipelineId string `pulumi:"pipelineId"` @@ -58316,6 +59044,154 @@ func (o GetJobJobSettingsSettingsRunAsPtrOutput) UserName() pulumi.StringPtrOutp }).(pulumi.StringPtrOutput) } +type GetJobJobSettingsSettingsRunJobTask struct { + JobId string `pulumi:"jobId"` + JobParameters map[string]interface{} `pulumi:"jobParameters"` +} + +// GetJobJobSettingsSettingsRunJobTaskInput is an input type that accepts GetJobJobSettingsSettingsRunJobTaskArgs and GetJobJobSettingsSettingsRunJobTaskOutput values. +// You can construct a concrete instance of `GetJobJobSettingsSettingsRunJobTaskInput` via: +// +// GetJobJobSettingsSettingsRunJobTaskArgs{...} +type GetJobJobSettingsSettingsRunJobTaskInput interface { + pulumi.Input + + ToGetJobJobSettingsSettingsRunJobTaskOutput() GetJobJobSettingsSettingsRunJobTaskOutput + ToGetJobJobSettingsSettingsRunJobTaskOutputWithContext(context.Context) GetJobJobSettingsSettingsRunJobTaskOutput +} + +type GetJobJobSettingsSettingsRunJobTaskArgs struct { + JobId pulumi.StringInput `pulumi:"jobId"` + JobParameters pulumi.MapInput `pulumi:"jobParameters"` +} + +func (GetJobJobSettingsSettingsRunJobTaskArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetJobJobSettingsSettingsRunJobTask)(nil)).Elem() +} + +func (i GetJobJobSettingsSettingsRunJobTaskArgs) ToGetJobJobSettingsSettingsRunJobTaskOutput() GetJobJobSettingsSettingsRunJobTaskOutput { + return i.ToGetJobJobSettingsSettingsRunJobTaskOutputWithContext(context.Background()) +} + +func (i GetJobJobSettingsSettingsRunJobTaskArgs) ToGetJobJobSettingsSettingsRunJobTaskOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsRunJobTaskOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsRunJobTaskOutput) +} + +func (i GetJobJobSettingsSettingsRunJobTaskArgs) ToGetJobJobSettingsSettingsRunJobTaskPtrOutput() GetJobJobSettingsSettingsRunJobTaskPtrOutput { + return i.ToGetJobJobSettingsSettingsRunJobTaskPtrOutputWithContext(context.Background()) +} + +func (i GetJobJobSettingsSettingsRunJobTaskArgs) ToGetJobJobSettingsSettingsRunJobTaskPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsRunJobTaskPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsRunJobTaskOutput).ToGetJobJobSettingsSettingsRunJobTaskPtrOutputWithContext(ctx) +} + +// GetJobJobSettingsSettingsRunJobTaskPtrInput is an input type that accepts GetJobJobSettingsSettingsRunJobTaskArgs, GetJobJobSettingsSettingsRunJobTaskPtr and GetJobJobSettingsSettingsRunJobTaskPtrOutput values. +// You can construct a concrete instance of `GetJobJobSettingsSettingsRunJobTaskPtrInput` via: +// +// GetJobJobSettingsSettingsRunJobTaskArgs{...} +// +// or: +// +// nil +type GetJobJobSettingsSettingsRunJobTaskPtrInput interface { + pulumi.Input + + ToGetJobJobSettingsSettingsRunJobTaskPtrOutput() GetJobJobSettingsSettingsRunJobTaskPtrOutput + ToGetJobJobSettingsSettingsRunJobTaskPtrOutputWithContext(context.Context) GetJobJobSettingsSettingsRunJobTaskPtrOutput +} + +type getJobJobSettingsSettingsRunJobTaskPtrType GetJobJobSettingsSettingsRunJobTaskArgs + +func GetJobJobSettingsSettingsRunJobTaskPtr(v *GetJobJobSettingsSettingsRunJobTaskArgs) GetJobJobSettingsSettingsRunJobTaskPtrInput { + return (*getJobJobSettingsSettingsRunJobTaskPtrType)(v) +} + +func (*getJobJobSettingsSettingsRunJobTaskPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**GetJobJobSettingsSettingsRunJobTask)(nil)).Elem() +} + +func (i *getJobJobSettingsSettingsRunJobTaskPtrType) ToGetJobJobSettingsSettingsRunJobTaskPtrOutput() GetJobJobSettingsSettingsRunJobTaskPtrOutput { + return i.ToGetJobJobSettingsSettingsRunJobTaskPtrOutputWithContext(context.Background()) +} + +func (i *getJobJobSettingsSettingsRunJobTaskPtrType) ToGetJobJobSettingsSettingsRunJobTaskPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsRunJobTaskPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsRunJobTaskPtrOutput) +} + +type GetJobJobSettingsSettingsRunJobTaskOutput struct{ *pulumi.OutputState } + +func (GetJobJobSettingsSettingsRunJobTaskOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetJobJobSettingsSettingsRunJobTask)(nil)).Elem() +} + +func (o GetJobJobSettingsSettingsRunJobTaskOutput) ToGetJobJobSettingsSettingsRunJobTaskOutput() GetJobJobSettingsSettingsRunJobTaskOutput { + return o +} + +func (o GetJobJobSettingsSettingsRunJobTaskOutput) ToGetJobJobSettingsSettingsRunJobTaskOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsRunJobTaskOutput { + return o +} + +func (o GetJobJobSettingsSettingsRunJobTaskOutput) ToGetJobJobSettingsSettingsRunJobTaskPtrOutput() GetJobJobSettingsSettingsRunJobTaskPtrOutput { + return o.ToGetJobJobSettingsSettingsRunJobTaskPtrOutputWithContext(context.Background()) +} + +func (o GetJobJobSettingsSettingsRunJobTaskOutput) ToGetJobJobSettingsSettingsRunJobTaskPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsRunJobTaskPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v GetJobJobSettingsSettingsRunJobTask) *GetJobJobSettingsSettingsRunJobTask { + return &v + }).(GetJobJobSettingsSettingsRunJobTaskPtrOutput) +} + +func (o GetJobJobSettingsSettingsRunJobTaskOutput) JobId() pulumi.StringOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsRunJobTask) string { return v.JobId }).(pulumi.StringOutput) +} + +func (o GetJobJobSettingsSettingsRunJobTaskOutput) JobParameters() pulumi.MapOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsRunJobTask) map[string]interface{} { return v.JobParameters }).(pulumi.MapOutput) +} + +type GetJobJobSettingsSettingsRunJobTaskPtrOutput struct{ *pulumi.OutputState } + +func (GetJobJobSettingsSettingsRunJobTaskPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**GetJobJobSettingsSettingsRunJobTask)(nil)).Elem() +} + +func (o GetJobJobSettingsSettingsRunJobTaskPtrOutput) ToGetJobJobSettingsSettingsRunJobTaskPtrOutput() GetJobJobSettingsSettingsRunJobTaskPtrOutput { + return o +} + +func (o GetJobJobSettingsSettingsRunJobTaskPtrOutput) ToGetJobJobSettingsSettingsRunJobTaskPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsRunJobTaskPtrOutput { + return o +} + +func (o GetJobJobSettingsSettingsRunJobTaskPtrOutput) Elem() GetJobJobSettingsSettingsRunJobTaskOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsRunJobTask) GetJobJobSettingsSettingsRunJobTask { + if v != nil { + return *v + } + var ret GetJobJobSettingsSettingsRunJobTask + return ret + }).(GetJobJobSettingsSettingsRunJobTaskOutput) +} + +func (o GetJobJobSettingsSettingsRunJobTaskPtrOutput) JobId() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsRunJobTask) *string { + if v == nil { + return nil + } + return &v.JobId + }).(pulumi.StringPtrOutput) +} + +func (o GetJobJobSettingsSettingsRunJobTaskPtrOutput) JobParameters() pulumi.MapOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsRunJobTask) map[string]interface{} { + if v == nil { + return nil + } + return v.JobParameters + }).(pulumi.MapOutput) +} + type GetJobJobSettingsSettingsSchedule struct { PauseStatus string `pulumi:"pauseStatus"` QuartzCronExpression string `pulumi:"quartzCronExpression"` @@ -58958,6 +59834,7 @@ type GetJobJobSettingsSettingsTask struct { PythonWheelTask *GetJobJobSettingsSettingsTaskPythonWheelTask `pulumi:"pythonWheelTask"` RetryOnTimeout bool `pulumi:"retryOnTimeout"` RunIf *string `pulumi:"runIf"` + RunJobTask *GetJobJobSettingsSettingsTaskRunJobTask `pulumi:"runJobTask"` SparkJarTask *GetJobJobSettingsSettingsTaskSparkJarTask `pulumi:"sparkJarTask"` SparkPythonTask *GetJobJobSettingsSettingsTaskSparkPythonTask `pulumi:"sparkPythonTask"` SparkSubmitTask *GetJobJobSettingsSettingsTaskSparkSubmitTask `pulumi:"sparkSubmitTask"` @@ -58997,6 +59874,7 @@ type GetJobJobSettingsSettingsTaskArgs struct { PythonWheelTask GetJobJobSettingsSettingsTaskPythonWheelTaskPtrInput `pulumi:"pythonWheelTask"` RetryOnTimeout pulumi.BoolInput `pulumi:"retryOnTimeout"` RunIf pulumi.StringPtrInput `pulumi:"runIf"` + RunJobTask GetJobJobSettingsSettingsTaskRunJobTaskPtrInput `pulumi:"runJobTask"` SparkJarTask GetJobJobSettingsSettingsTaskSparkJarTaskPtrInput `pulumi:"sparkJarTask"` SparkPythonTask GetJobJobSettingsSettingsTaskSparkPythonTaskPtrInput `pulumi:"sparkPythonTask"` SparkSubmitTask GetJobJobSettingsSettingsTaskSparkSubmitTaskPtrInput `pulumi:"sparkSubmitTask"` @@ -59144,6 +60022,10 @@ func (o GetJobJobSettingsSettingsTaskOutput) RunIf() pulumi.StringPtrOutput { return o.ApplyT(func(v GetJobJobSettingsSettingsTask) *string { return v.RunIf }).(pulumi.StringPtrOutput) } +func (o GetJobJobSettingsSettingsTaskOutput) RunJobTask() GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsTask) *GetJobJobSettingsSettingsTaskRunJobTask { return v.RunJobTask }).(GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput) +} + func (o GetJobJobSettingsSettingsTaskOutput) SparkJarTask() GetJobJobSettingsSettingsTaskSparkJarTaskPtrOutput { return o.ApplyT(func(v GetJobJobSettingsSettingsTask) *GetJobJobSettingsSettingsTaskSparkJarTask { return v.SparkJarTask @@ -62811,6 +63693,7 @@ type GetJobJobSettingsSettingsTaskNewClusterGcpAttributes struct { Availability *string `pulumi:"availability"` BootDiskSize *int `pulumi:"bootDiskSize"` GoogleServiceAccount *string `pulumi:"googleServiceAccount"` + LocalSsdCount *int `pulumi:"localSsdCount"` UsePreemptibleExecutors *bool `pulumi:"usePreemptibleExecutors"` ZoneId *string `pulumi:"zoneId"` } @@ -62830,6 +63713,7 @@ type GetJobJobSettingsSettingsTaskNewClusterGcpAttributesArgs struct { Availability pulumi.StringPtrInput `pulumi:"availability"` BootDiskSize pulumi.IntPtrInput `pulumi:"bootDiskSize"` GoogleServiceAccount pulumi.StringPtrInput `pulumi:"googleServiceAccount"` + LocalSsdCount pulumi.IntPtrInput `pulumi:"localSsdCount"` UsePreemptibleExecutors pulumi.BoolPtrInput `pulumi:"usePreemptibleExecutors"` ZoneId pulumi.StringPtrInput `pulumi:"zoneId"` } @@ -62923,6 +63807,10 @@ func (o GetJobJobSettingsSettingsTaskNewClusterGcpAttributesOutput) GoogleServic return o.ApplyT(func(v GetJobJobSettingsSettingsTaskNewClusterGcpAttributes) *string { return v.GoogleServiceAccount }).(pulumi.StringPtrOutput) } +func (o GetJobJobSettingsSettingsTaskNewClusterGcpAttributesOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsTaskNewClusterGcpAttributes) *int { return v.LocalSsdCount }).(pulumi.IntPtrOutput) +} + func (o GetJobJobSettingsSettingsTaskNewClusterGcpAttributesOutput) UsePreemptibleExecutors() pulumi.BoolPtrOutput { return o.ApplyT(func(v GetJobJobSettingsSettingsTaskNewClusterGcpAttributes) *bool { return v.UsePreemptibleExecutors }).(pulumi.BoolPtrOutput) } @@ -62982,6 +63870,15 @@ func (o GetJobJobSettingsSettingsTaskNewClusterGcpAttributesPtrOutput) GoogleSer }).(pulumi.StringPtrOutput) } +func (o GetJobJobSettingsSettingsTaskNewClusterGcpAttributesPtrOutput) LocalSsdCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsTaskNewClusterGcpAttributes) *int { + if v == nil { + return nil + } + return v.LocalSsdCount + }).(pulumi.IntPtrOutput) +} + func (o GetJobJobSettingsSettingsTaskNewClusterGcpAttributesPtrOutput) UsePreemptibleExecutors() pulumi.BoolPtrOutput { return o.ApplyT(func(v *GetJobJobSettingsSettingsTaskNewClusterGcpAttributes) *bool { if v == nil { @@ -64959,6 +65856,154 @@ func (o GetJobJobSettingsSettingsTaskPythonWheelTaskPtrOutput) Parameters() pulu }).(pulumi.StringArrayOutput) } +type GetJobJobSettingsSettingsTaskRunJobTask struct { + JobId string `pulumi:"jobId"` + JobParameters map[string]interface{} `pulumi:"jobParameters"` +} + +// GetJobJobSettingsSettingsTaskRunJobTaskInput is an input type that accepts GetJobJobSettingsSettingsTaskRunJobTaskArgs and GetJobJobSettingsSettingsTaskRunJobTaskOutput values. +// You can construct a concrete instance of `GetJobJobSettingsSettingsTaskRunJobTaskInput` via: +// +// GetJobJobSettingsSettingsTaskRunJobTaskArgs{...} +type GetJobJobSettingsSettingsTaskRunJobTaskInput interface { + pulumi.Input + + ToGetJobJobSettingsSettingsTaskRunJobTaskOutput() GetJobJobSettingsSettingsTaskRunJobTaskOutput + ToGetJobJobSettingsSettingsTaskRunJobTaskOutputWithContext(context.Context) GetJobJobSettingsSettingsTaskRunJobTaskOutput +} + +type GetJobJobSettingsSettingsTaskRunJobTaskArgs struct { + JobId pulumi.StringInput `pulumi:"jobId"` + JobParameters pulumi.MapInput `pulumi:"jobParameters"` +} + +func (GetJobJobSettingsSettingsTaskRunJobTaskArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetJobJobSettingsSettingsTaskRunJobTask)(nil)).Elem() +} + +func (i GetJobJobSettingsSettingsTaskRunJobTaskArgs) ToGetJobJobSettingsSettingsTaskRunJobTaskOutput() GetJobJobSettingsSettingsTaskRunJobTaskOutput { + return i.ToGetJobJobSettingsSettingsTaskRunJobTaskOutputWithContext(context.Background()) +} + +func (i GetJobJobSettingsSettingsTaskRunJobTaskArgs) ToGetJobJobSettingsSettingsTaskRunJobTaskOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsTaskRunJobTaskOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsTaskRunJobTaskOutput) +} + +func (i GetJobJobSettingsSettingsTaskRunJobTaskArgs) ToGetJobJobSettingsSettingsTaskRunJobTaskPtrOutput() GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput { + return i.ToGetJobJobSettingsSettingsTaskRunJobTaskPtrOutputWithContext(context.Background()) +} + +func (i GetJobJobSettingsSettingsTaskRunJobTaskArgs) ToGetJobJobSettingsSettingsTaskRunJobTaskPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsTaskRunJobTaskOutput).ToGetJobJobSettingsSettingsTaskRunJobTaskPtrOutputWithContext(ctx) +} + +// GetJobJobSettingsSettingsTaskRunJobTaskPtrInput is an input type that accepts GetJobJobSettingsSettingsTaskRunJobTaskArgs, GetJobJobSettingsSettingsTaskRunJobTaskPtr and GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput values. +// You can construct a concrete instance of `GetJobJobSettingsSettingsTaskRunJobTaskPtrInput` via: +// +// GetJobJobSettingsSettingsTaskRunJobTaskArgs{...} +// +// or: +// +// nil +type GetJobJobSettingsSettingsTaskRunJobTaskPtrInput interface { + pulumi.Input + + ToGetJobJobSettingsSettingsTaskRunJobTaskPtrOutput() GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput + ToGetJobJobSettingsSettingsTaskRunJobTaskPtrOutputWithContext(context.Context) GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput +} + +type getJobJobSettingsSettingsTaskRunJobTaskPtrType GetJobJobSettingsSettingsTaskRunJobTaskArgs + +func GetJobJobSettingsSettingsTaskRunJobTaskPtr(v *GetJobJobSettingsSettingsTaskRunJobTaskArgs) GetJobJobSettingsSettingsTaskRunJobTaskPtrInput { + return (*getJobJobSettingsSettingsTaskRunJobTaskPtrType)(v) +} + +func (*getJobJobSettingsSettingsTaskRunJobTaskPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**GetJobJobSettingsSettingsTaskRunJobTask)(nil)).Elem() +} + +func (i *getJobJobSettingsSettingsTaskRunJobTaskPtrType) ToGetJobJobSettingsSettingsTaskRunJobTaskPtrOutput() GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput { + return i.ToGetJobJobSettingsSettingsTaskRunJobTaskPtrOutputWithContext(context.Background()) +} + +func (i *getJobJobSettingsSettingsTaskRunJobTaskPtrType) ToGetJobJobSettingsSettingsTaskRunJobTaskPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput) +} + +type GetJobJobSettingsSettingsTaskRunJobTaskOutput struct{ *pulumi.OutputState } + +func (GetJobJobSettingsSettingsTaskRunJobTaskOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetJobJobSettingsSettingsTaskRunJobTask)(nil)).Elem() +} + +func (o GetJobJobSettingsSettingsTaskRunJobTaskOutput) ToGetJobJobSettingsSettingsTaskRunJobTaskOutput() GetJobJobSettingsSettingsTaskRunJobTaskOutput { + return o +} + +func (o GetJobJobSettingsSettingsTaskRunJobTaskOutput) ToGetJobJobSettingsSettingsTaskRunJobTaskOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsTaskRunJobTaskOutput { + return o +} + +func (o GetJobJobSettingsSettingsTaskRunJobTaskOutput) ToGetJobJobSettingsSettingsTaskRunJobTaskPtrOutput() GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput { + return o.ToGetJobJobSettingsSettingsTaskRunJobTaskPtrOutputWithContext(context.Background()) +} + +func (o GetJobJobSettingsSettingsTaskRunJobTaskOutput) ToGetJobJobSettingsSettingsTaskRunJobTaskPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v GetJobJobSettingsSettingsTaskRunJobTask) *GetJobJobSettingsSettingsTaskRunJobTask { + return &v + }).(GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput) +} + +func (o GetJobJobSettingsSettingsTaskRunJobTaskOutput) JobId() pulumi.StringOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsTaskRunJobTask) string { return v.JobId }).(pulumi.StringOutput) +} + +func (o GetJobJobSettingsSettingsTaskRunJobTaskOutput) JobParameters() pulumi.MapOutput { + return o.ApplyT(func(v GetJobJobSettingsSettingsTaskRunJobTask) map[string]interface{} { return v.JobParameters }).(pulumi.MapOutput) +} + +type GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput struct{ *pulumi.OutputState } + +func (GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**GetJobJobSettingsSettingsTaskRunJobTask)(nil)).Elem() +} + +func (o GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput) ToGetJobJobSettingsSettingsTaskRunJobTaskPtrOutput() GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput { + return o +} + +func (o GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput) ToGetJobJobSettingsSettingsTaskRunJobTaskPtrOutputWithContext(ctx context.Context) GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput { + return o +} + +func (o GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput) Elem() GetJobJobSettingsSettingsTaskRunJobTaskOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsTaskRunJobTask) GetJobJobSettingsSettingsTaskRunJobTask { + if v != nil { + return *v + } + var ret GetJobJobSettingsSettingsTaskRunJobTask + return ret + }).(GetJobJobSettingsSettingsTaskRunJobTaskOutput) +} + +func (o GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput) JobId() pulumi.StringPtrOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsTaskRunJobTask) *string { + if v == nil { + return nil + } + return &v.JobId + }).(pulumi.StringPtrOutput) +} + +func (o GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput) JobParameters() pulumi.MapOutput { + return o.ApplyT(func(v *GetJobJobSettingsSettingsTaskRunJobTask) map[string]interface{} { + if v == nil { + return nil + } + return v.JobParameters + }).(pulumi.MapOutput) +} + type GetJobJobSettingsSettingsTaskSparkJarTask struct { JarUri *string `pulumi:"jarUri"` MainClassName *string `pulumi:"mainClassName"` @@ -68974,6 +70019,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*JobNotebookTaskPtrInput)(nil)).Elem(), JobNotebookTaskArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobNotificationSettingsInput)(nil)).Elem(), JobNotificationSettingsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobNotificationSettingsPtrInput)(nil)).Elem(), JobNotificationSettingsArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*JobParameterInput)(nil)).Elem(), JobParameterArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*JobParameterArrayInput)(nil)).Elem(), JobParameterArray{}) pulumi.RegisterInputType(reflect.TypeOf((*JobPipelineTaskInput)(nil)).Elem(), JobPipelineTaskArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobPipelineTaskPtrInput)(nil)).Elem(), JobPipelineTaskArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobPythonWheelTaskInput)(nil)).Elem(), JobPythonWheelTaskArgs{}) @@ -68982,6 +70029,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*JobQueuePtrInput)(nil)).Elem(), JobQueueArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobRunAsInput)(nil)).Elem(), JobRunAsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobRunAsPtrInput)(nil)).Elem(), JobRunAsArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*JobRunJobTaskInput)(nil)).Elem(), JobRunJobTaskArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*JobRunJobTaskPtrInput)(nil)).Elem(), JobRunJobTaskArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobScheduleInput)(nil)).Elem(), JobScheduleArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobSchedulePtrInput)(nil)).Elem(), JobScheduleArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobSparkJarTaskInput)(nil)).Elem(), JobSparkJarTaskArgs{}) @@ -69061,6 +70110,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*JobTaskPipelineTaskPtrInput)(nil)).Elem(), JobTaskPipelineTaskArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobTaskPythonWheelTaskInput)(nil)).Elem(), JobTaskPythonWheelTaskArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobTaskPythonWheelTaskPtrInput)(nil)).Elem(), JobTaskPythonWheelTaskArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*JobTaskRunJobTaskInput)(nil)).Elem(), JobTaskRunJobTaskArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*JobTaskRunJobTaskPtrInput)(nil)).Elem(), JobTaskRunJobTaskArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobTaskSparkJarTaskInput)(nil)).Elem(), JobTaskSparkJarTaskArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobTaskSparkJarTaskPtrInput)(nil)).Elem(), JobTaskSparkJarTaskArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*JobTaskSparkPythonTaskInput)(nil)).Elem(), JobTaskSparkPythonTaskArgs{}) @@ -69479,6 +70530,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsNotebookTaskPtrInput)(nil)).Elem(), GetJobJobSettingsSettingsNotebookTaskArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsNotificationSettingsInput)(nil)).Elem(), GetJobJobSettingsSettingsNotificationSettingsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsNotificationSettingsPtrInput)(nil)).Elem(), GetJobJobSettingsSettingsNotificationSettingsArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsParameterInput)(nil)).Elem(), GetJobJobSettingsSettingsParameterArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsParameterArrayInput)(nil)).Elem(), GetJobJobSettingsSettingsParameterArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsPipelineTaskInput)(nil)).Elem(), GetJobJobSettingsSettingsPipelineTaskArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsPipelineTaskPtrInput)(nil)).Elem(), GetJobJobSettingsSettingsPipelineTaskArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsPythonWheelTaskInput)(nil)).Elem(), GetJobJobSettingsSettingsPythonWheelTaskArgs{}) @@ -69487,6 +70540,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsQueuePtrInput)(nil)).Elem(), GetJobJobSettingsSettingsQueueArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsRunAsInput)(nil)).Elem(), GetJobJobSettingsSettingsRunAsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsRunAsPtrInput)(nil)).Elem(), GetJobJobSettingsSettingsRunAsArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsRunJobTaskInput)(nil)).Elem(), GetJobJobSettingsSettingsRunJobTaskArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsRunJobTaskPtrInput)(nil)).Elem(), GetJobJobSettingsSettingsRunJobTaskArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsScheduleInput)(nil)).Elem(), GetJobJobSettingsSettingsScheduleArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsSchedulePtrInput)(nil)).Elem(), GetJobJobSettingsSettingsScheduleArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsSparkJarTaskInput)(nil)).Elem(), GetJobJobSettingsSettingsSparkJarTaskArgs{}) @@ -69566,6 +70621,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskPipelineTaskPtrInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskPipelineTaskArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskPythonWheelTaskInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskPythonWheelTaskArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskPythonWheelTaskPtrInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskPythonWheelTaskArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskRunJobTaskInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskRunJobTaskArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskRunJobTaskPtrInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskRunJobTaskArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskSparkJarTaskInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskSparkJarTaskArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskSparkJarTaskPtrInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskSparkJarTaskArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetJobJobSettingsSettingsTaskSparkPythonTaskInput)(nil)).Elem(), GetJobJobSettingsSettingsTaskSparkPythonTaskArgs{}) @@ -69805,6 +70862,8 @@ func init() { pulumi.RegisterOutputType(JobNotebookTaskPtrOutput{}) pulumi.RegisterOutputType(JobNotificationSettingsOutput{}) pulumi.RegisterOutputType(JobNotificationSettingsPtrOutput{}) + pulumi.RegisterOutputType(JobParameterOutput{}) + pulumi.RegisterOutputType(JobParameterArrayOutput{}) pulumi.RegisterOutputType(JobPipelineTaskOutput{}) pulumi.RegisterOutputType(JobPipelineTaskPtrOutput{}) pulumi.RegisterOutputType(JobPythonWheelTaskOutput{}) @@ -69813,6 +70872,8 @@ func init() { pulumi.RegisterOutputType(JobQueuePtrOutput{}) pulumi.RegisterOutputType(JobRunAsOutput{}) pulumi.RegisterOutputType(JobRunAsPtrOutput{}) + pulumi.RegisterOutputType(JobRunJobTaskOutput{}) + pulumi.RegisterOutputType(JobRunJobTaskPtrOutput{}) pulumi.RegisterOutputType(JobScheduleOutput{}) pulumi.RegisterOutputType(JobSchedulePtrOutput{}) pulumi.RegisterOutputType(JobSparkJarTaskOutput{}) @@ -69892,6 +70953,8 @@ func init() { pulumi.RegisterOutputType(JobTaskPipelineTaskPtrOutput{}) pulumi.RegisterOutputType(JobTaskPythonWheelTaskOutput{}) pulumi.RegisterOutputType(JobTaskPythonWheelTaskPtrOutput{}) + pulumi.RegisterOutputType(JobTaskRunJobTaskOutput{}) + pulumi.RegisterOutputType(JobTaskRunJobTaskPtrOutput{}) pulumi.RegisterOutputType(JobTaskSparkJarTaskOutput{}) pulumi.RegisterOutputType(JobTaskSparkJarTaskPtrOutput{}) pulumi.RegisterOutputType(JobTaskSparkPythonTaskOutput{}) @@ -70310,6 +71373,8 @@ func init() { pulumi.RegisterOutputType(GetJobJobSettingsSettingsNotebookTaskPtrOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsNotificationSettingsOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsNotificationSettingsPtrOutput{}) + pulumi.RegisterOutputType(GetJobJobSettingsSettingsParameterOutput{}) + pulumi.RegisterOutputType(GetJobJobSettingsSettingsParameterArrayOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsPipelineTaskOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsPipelineTaskPtrOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsPythonWheelTaskOutput{}) @@ -70318,6 +71383,8 @@ func init() { pulumi.RegisterOutputType(GetJobJobSettingsSettingsQueuePtrOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsRunAsOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsRunAsPtrOutput{}) + pulumi.RegisterOutputType(GetJobJobSettingsSettingsRunJobTaskOutput{}) + pulumi.RegisterOutputType(GetJobJobSettingsSettingsRunJobTaskPtrOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsScheduleOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsSchedulePtrOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsSparkJarTaskOutput{}) @@ -70397,6 +71464,8 @@ func init() { pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskPipelineTaskPtrOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskPythonWheelTaskOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskPythonWheelTaskPtrOutput{}) + pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskRunJobTaskOutput{}) + pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskRunJobTaskPtrOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskSparkJarTaskOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskSparkJarTaskPtrOutput{}) pulumi.RegisterOutputType(GetJobJobSettingsSettingsTaskSparkPythonTaskOutput{}) diff --git a/sdk/java/src/main/java/com/pulumi/databricks/Job.java b/sdk/java/src/main/java/com/pulumi/databricks/Job.java index 97e7042e..e513da03 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/Job.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/Job.java @@ -21,10 +21,12 @@ import com.pulumi.databricks.outputs.JobNewCluster; import com.pulumi.databricks.outputs.JobNotebookTask; import com.pulumi.databricks.outputs.JobNotificationSettings; +import com.pulumi.databricks.outputs.JobParameter; import com.pulumi.databricks.outputs.JobPipelineTask; import com.pulumi.databricks.outputs.JobPythonWheelTask; import com.pulumi.databricks.outputs.JobQueue; import com.pulumi.databricks.outputs.JobRunAs; +import com.pulumi.databricks.outputs.JobRunJobTask; import com.pulumi.databricks.outputs.JobSchedule; import com.pulumi.databricks.outputs.JobSparkJarTask; import com.pulumi.databricks.outputs.JobSparkPythonTask; @@ -315,6 +317,12 @@ public Output> notebookTask() { public Output> notificationSettings() { return Codegen.optional(this.notificationSettings); } + @Export(name="parameters", type=List.class, parameters={JobParameter.class}) + private Output> parameters; + + public Output>> parameters() { + return Codegen.optional(this.parameters); + } @Export(name="pipelineTask", type=JobPipelineTask.class, parameters={}) private Output pipelineTask; @@ -353,6 +361,12 @@ public Output> retryOnTimeout() { public Output> runAs() { return Codegen.optional(this.runAs); } + @Export(name="runJobTask", type=JobRunJobTask.class, parameters={}) + private Output runJobTask; + + public Output> runJobTask() { + return Codegen.optional(this.runJobTask); + } /** * (List) An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. This field is a block and is documented below. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/JobArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/JobArgs.java index 82a52adb..3f009449 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/JobArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/JobArgs.java @@ -16,10 +16,12 @@ import com.pulumi.databricks.inputs.JobNewClusterArgs; import com.pulumi.databricks.inputs.JobNotebookTaskArgs; import com.pulumi.databricks.inputs.JobNotificationSettingsArgs; +import com.pulumi.databricks.inputs.JobParameterArgs; import com.pulumi.databricks.inputs.JobPipelineTaskArgs; import com.pulumi.databricks.inputs.JobPythonWheelTaskArgs; import com.pulumi.databricks.inputs.JobQueueArgs; import com.pulumi.databricks.inputs.JobRunAsArgs; +import com.pulumi.databricks.inputs.JobRunJobTaskArgs; import com.pulumi.databricks.inputs.JobScheduleArgs; import com.pulumi.databricks.inputs.JobSparkJarTaskArgs; import com.pulumi.databricks.inputs.JobSparkPythonTaskArgs; @@ -327,6 +329,13 @@ public Optional> notificationSettings() { return Optional.ofNullable(this.notificationSettings); } + @Import(name="parameters") + private @Nullable Output> parameters; + + public Optional>> parameters() { + return Optional.ofNullable(this.parameters); + } + @Import(name="pipelineTask") private @Nullable Output pipelineTask; @@ -370,6 +379,13 @@ public Optional> runAs() { return Optional.ofNullable(this.runAs); } + @Import(name="runJobTask") + private @Nullable Output runJobTask; + + public Optional> runJobTask() { + return Optional.ofNullable(this.runJobTask); + } + /** * (List) An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. This field is a block and is documented below. * @@ -479,11 +495,13 @@ private JobArgs(JobArgs $) { this.newCluster = $.newCluster; this.notebookTask = $.notebookTask; this.notificationSettings = $.notificationSettings; + this.parameters = $.parameters; this.pipelineTask = $.pipelineTask; this.pythonWheelTask = $.pythonWheelTask; this.queue = $.queue; this.retryOnTimeout = $.retryOnTimeout; this.runAs = $.runAs; + this.runJobTask = $.runJobTask; this.schedule = $.schedule; this.sparkJarTask = $.sparkJarTask; this.sparkPythonTask = $.sparkPythonTask; @@ -908,6 +926,19 @@ public Builder notificationSettings(JobNotificationSettingsArgs notificationSett return notificationSettings(Output.of(notificationSettings)); } + public Builder parameters(@Nullable Output> parameters) { + $.parameters = parameters; + return this; + } + + public Builder parameters(List parameters) { + return parameters(Output.of(parameters)); + } + + public Builder parameters(JobParameterArgs... parameters) { + return parameters(List.of(parameters)); + } + public Builder pipelineTask(@Nullable Output pipelineTask) { $.pipelineTask = pipelineTask; return this; @@ -965,6 +996,15 @@ public Builder runAs(JobRunAsArgs runAs) { return runAs(Output.of(runAs)); } + public Builder runJobTask(@Nullable Output runJobTask) { + $.runJobTask = runJobTask; + return this; + } + + public Builder runJobTask(JobRunJobTaskArgs runJobTask) { + return runJobTask(Output.of(runJobTask)); + } + /** * @param schedule (List) An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. This field is a block and is documented below. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/ClusterGcpAttributesArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/ClusterGcpAttributesArgs.java index 95cb7a5a..91f6bc99 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/ClusterGcpAttributesArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/ClusterGcpAttributesArgs.java @@ -38,9 +38,28 @@ public Optional> googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + @Import(name="localSsdCount") + private @Nullable Output localSsdCount; + + public Optional> localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } + + /** + * @deprecated + * Please use 'availability' instead. + * + */ + @Deprecated /* Please use 'availability' instead. */ @Import(name="usePreemptibleExecutors") private @Nullable Output usePreemptibleExecutors; + /** + * @deprecated + * Please use 'availability' instead. + * + */ + @Deprecated /* Please use 'availability' instead. */ public Optional> usePreemptibleExecutors() { return Optional.ofNullable(this.usePreemptibleExecutors); } @@ -58,6 +77,7 @@ private ClusterGcpAttributesArgs(ClusterGcpAttributesArgs $) { this.availability = $.availability; this.bootDiskSize = $.bootDiskSize; this.googleServiceAccount = $.googleServiceAccount; + this.localSsdCount = $.localSsdCount; this.usePreemptibleExecutors = $.usePreemptibleExecutors; this.zoneId = $.zoneId; } @@ -107,11 +127,36 @@ public Builder googleServiceAccount(String googleServiceAccount) { return googleServiceAccount(Output.of(googleServiceAccount)); } + public Builder localSsdCount(@Nullable Output localSsdCount) { + $.localSsdCount = localSsdCount; + return this; + } + + public Builder localSsdCount(Integer localSsdCount) { + return localSsdCount(Output.of(localSsdCount)); + } + + /** + * @return builder + * + * @deprecated + * Please use 'availability' instead. + * + */ + @Deprecated /* Please use 'availability' instead. */ public Builder usePreemptibleExecutors(@Nullable Output usePreemptibleExecutors) { $.usePreemptibleExecutors = usePreemptibleExecutors; return this; } + /** + * @return builder + * + * @deprecated + * Please use 'availability' instead. + * + */ + @Deprecated /* Please use 'availability' instead. */ public Builder usePreemptibleExecutors(Boolean usePreemptibleExecutors) { return usePreemptibleExecutors(Output.of(usePreemptibleExecutors)); } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetClusterClusterInfoGcpAttributes.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetClusterClusterInfoGcpAttributes.java index 5b0ee4eb..64414ee1 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetClusterClusterInfoGcpAttributes.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetClusterClusterInfoGcpAttributes.java @@ -37,6 +37,13 @@ public Optional googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + @Import(name="localSsdCount") + private @Nullable Integer localSsdCount; + + public Optional localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } + @Import(name="usePreemptibleExecutors") private @Nullable Boolean usePreemptibleExecutors; @@ -57,6 +64,7 @@ private GetClusterClusterInfoGcpAttributes(GetClusterClusterInfoGcpAttributes $) this.availability = $.availability; this.bootDiskSize = $.bootDiskSize; this.googleServiceAccount = $.googleServiceAccount; + this.localSsdCount = $.localSsdCount; this.usePreemptibleExecutors = $.usePreemptibleExecutors; this.zoneId = $.zoneId; } @@ -94,6 +102,11 @@ public Builder googleServiceAccount(@Nullable String googleServiceAccount) { return this; } + public Builder localSsdCount(@Nullable Integer localSsdCount) { + $.localSsdCount = localSsdCount; + return this; + } + public Builder usePreemptibleExecutors(@Nullable Boolean usePreemptibleExecutors) { $.usePreemptibleExecutors = usePreemptibleExecutors; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetClusterClusterInfoGcpAttributesArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetClusterClusterInfoGcpAttributesArgs.java index 0b3781e5..b957fa5d 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetClusterClusterInfoGcpAttributesArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetClusterClusterInfoGcpAttributesArgs.java @@ -38,6 +38,13 @@ public Optional> googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + @Import(name="localSsdCount") + private @Nullable Output localSsdCount; + + public Optional> localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } + @Import(name="usePreemptibleExecutors") private @Nullable Output usePreemptibleExecutors; @@ -58,6 +65,7 @@ private GetClusterClusterInfoGcpAttributesArgs(GetClusterClusterInfoGcpAttribute this.availability = $.availability; this.bootDiskSize = $.bootDiskSize; this.googleServiceAccount = $.googleServiceAccount; + this.localSsdCount = $.localSsdCount; this.usePreemptibleExecutors = $.usePreemptibleExecutors; this.zoneId = $.zoneId; } @@ -107,6 +115,15 @@ public Builder googleServiceAccount(String googleServiceAccount) { return googleServiceAccount(Output.of(googleServiceAccount)); } + public Builder localSsdCount(@Nullable Output localSsdCount) { + $.localSsdCount = localSsdCount; + return this; + } + + public Builder localSsdCount(Integer localSsdCount) { + return localSsdCount(Output.of(localSsdCount)); + } + public Builder usePreemptibleExecutors(@Nullable Output usePreemptibleExecutors) { $.usePreemptibleExecutors = usePreemptibleExecutors; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetInstancePoolPoolInfoGcpAttributes.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetInstancePoolPoolInfoGcpAttributes.java index 02278d56..0dc80440 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetInstancePoolPoolInfoGcpAttributes.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetInstancePoolPoolInfoGcpAttributes.java @@ -4,6 +4,7 @@ package com.pulumi.databricks.inputs; import com.pulumi.core.annotations.Import; +import java.lang.Integer; import java.lang.String; import java.util.Objects; import java.util.Optional; @@ -21,10 +22,18 @@ public Optional gcpAvailability() { return Optional.ofNullable(this.gcpAvailability); } + @Import(name="localSsdCount") + private @Nullable Integer localSsdCount; + + public Optional localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } + private GetInstancePoolPoolInfoGcpAttributes() {} private GetInstancePoolPoolInfoGcpAttributes(GetInstancePoolPoolInfoGcpAttributes $) { this.gcpAvailability = $.gcpAvailability; + this.localSsdCount = $.localSsdCount; } public static Builder builder() { @@ -50,6 +59,11 @@ public Builder gcpAvailability(@Nullable String gcpAvailability) { return this; } + public Builder localSsdCount(@Nullable Integer localSsdCount) { + $.localSsdCount = localSsdCount; + return this; + } + public GetInstancePoolPoolInfoGcpAttributes build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetInstancePoolPoolInfoGcpAttributesArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetInstancePoolPoolInfoGcpAttributesArgs.java index 852ca08b..f4cbaa68 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetInstancePoolPoolInfoGcpAttributesArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetInstancePoolPoolInfoGcpAttributesArgs.java @@ -5,6 +5,7 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; +import java.lang.Integer; import java.lang.String; import java.util.Objects; import java.util.Optional; @@ -22,10 +23,18 @@ public Optional> gcpAvailability() { return Optional.ofNullable(this.gcpAvailability); } + @Import(name="localSsdCount") + private @Nullable Output localSsdCount; + + public Optional> localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } + private GetInstancePoolPoolInfoGcpAttributesArgs() {} private GetInstancePoolPoolInfoGcpAttributesArgs(GetInstancePoolPoolInfoGcpAttributesArgs $) { this.gcpAvailability = $.gcpAvailability; + this.localSsdCount = $.localSsdCount; } public static Builder builder() { @@ -55,6 +64,15 @@ public Builder gcpAvailability(String gcpAvailability) { return gcpAvailability(Output.of(gcpAvailability)); } + public Builder localSsdCount(@Nullable Output localSsdCount) { + $.localSsdCount = localSsdCount; + return this; + } + + public Builder localSsdCount(Integer localSsdCount) { + return localSsdCount(Output.of(localSsdCount)); + } + public GetInstancePoolPoolInfoGcpAttributesArgs build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettings.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettings.java index 9112f618..f43e45e0 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettings.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettings.java @@ -15,10 +15,12 @@ import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsNewCluster; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsNotebookTask; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsNotificationSettings; +import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsParameter; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsPipelineTask; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsPythonWheelTask; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsQueue; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsRunAs; +import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsRunJobTask; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsSchedule; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsSparkJarTask; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsSparkPythonTask; @@ -168,6 +170,13 @@ public Optional notificationSetti return Optional.ofNullable(this.notificationSettings); } + @Import(name="parameters") + private @Nullable List parameters; + + public Optional> parameters() { + return Optional.ofNullable(this.parameters); + } + @Import(name="pipelineTask") private @Nullable GetJobJobSettingsSettingsPipelineTask pipelineTask; @@ -203,6 +212,13 @@ public Optional runAs() { return Optional.ofNullable(this.runAs); } + @Import(name="runJobTask") + private @Nullable GetJobJobSettingsSettingsRunJobTask runJobTask; + + public Optional runJobTask() { + return Optional.ofNullable(this.runJobTask); + } + @Import(name="schedule") private @Nullable GetJobJobSettingsSettingsSchedule schedule; @@ -286,11 +302,13 @@ private GetJobJobSettingsSettings(GetJobJobSettingsSettings $) { this.newCluster = $.newCluster; this.notebookTask = $.notebookTask; this.notificationSettings = $.notificationSettings; + this.parameters = $.parameters; this.pipelineTask = $.pipelineTask; this.pythonWheelTask = $.pythonWheelTask; this.queue = $.queue; this.retryOnTimeout = $.retryOnTimeout; this.runAs = $.runAs; + this.runJobTask = $.runJobTask; this.schedule = $.schedule; this.sparkJarTask = $.sparkJarTask; this.sparkPythonTask = $.sparkPythonTask; @@ -423,6 +441,15 @@ public Builder notificationSettings(@Nullable GetJobJobSettingsSettingsNotificat return this; } + public Builder parameters(@Nullable List parameters) { + $.parameters = parameters; + return this; + } + + public Builder parameters(GetJobJobSettingsSettingsParameter... parameters) { + return parameters(List.of(parameters)); + } + public Builder pipelineTask(@Nullable GetJobJobSettingsSettingsPipelineTask pipelineTask) { $.pipelineTask = pipelineTask; return this; @@ -448,6 +475,11 @@ public Builder runAs(@Nullable GetJobJobSettingsSettingsRunAs runAs) { return this; } + public Builder runJobTask(@Nullable GetJobJobSettingsSettingsRunJobTask runJobTask) { + $.runJobTask = runJobTask; + return this; + } + public Builder schedule(@Nullable GetJobJobSettingsSettingsSchedule schedule) { $.schedule = schedule; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsArgs.java index 9fe7826a..7540b58d 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsArgs.java @@ -16,10 +16,12 @@ import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsNewClusterArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsNotebookTaskArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsNotificationSettingsArgs; +import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsParameterArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsPipelineTaskArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsPythonWheelTaskArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsQueueArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsRunAsArgs; +import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsRunJobTaskArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsScheduleArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsSparkJarTaskArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsSparkPythonTaskArgs; @@ -169,6 +171,13 @@ public Optional> notif return Optional.ofNullable(this.notificationSettings); } + @Import(name="parameters") + private @Nullable Output> parameters; + + public Optional>> parameters() { + return Optional.ofNullable(this.parameters); + } + @Import(name="pipelineTask") private @Nullable Output pipelineTask; @@ -204,6 +213,13 @@ public Optional> runAs() { return Optional.ofNullable(this.runAs); } + @Import(name="runJobTask") + private @Nullable Output runJobTask; + + public Optional> runJobTask() { + return Optional.ofNullable(this.runJobTask); + } + @Import(name="schedule") private @Nullable Output schedule; @@ -287,11 +303,13 @@ private GetJobJobSettingsSettingsArgs(GetJobJobSettingsSettingsArgs $) { this.newCluster = $.newCluster; this.notebookTask = $.notebookTask; this.notificationSettings = $.notificationSettings; + this.parameters = $.parameters; this.pipelineTask = $.pipelineTask; this.pythonWheelTask = $.pythonWheelTask; this.queue = $.queue; this.retryOnTimeout = $.retryOnTimeout; this.runAs = $.runAs; + this.runJobTask = $.runJobTask; this.schedule = $.schedule; this.sparkJarTask = $.sparkJarTask; this.sparkPythonTask = $.sparkPythonTask; @@ -498,6 +516,19 @@ public Builder notificationSettings(GetJobJobSettingsSettingsNotificationSetting return notificationSettings(Output.of(notificationSettings)); } + public Builder parameters(@Nullable Output> parameters) { + $.parameters = parameters; + return this; + } + + public Builder parameters(List parameters) { + return parameters(Output.of(parameters)); + } + + public Builder parameters(GetJobJobSettingsSettingsParameterArgs... parameters) { + return parameters(List.of(parameters)); + } + public Builder pipelineTask(@Nullable Output pipelineTask) { $.pipelineTask = pipelineTask; return this; @@ -543,6 +574,15 @@ public Builder runAs(GetJobJobSettingsSettingsRunAsArgs runAs) { return runAs(Output.of(runAs)); } + public Builder runJobTask(@Nullable Output runJobTask) { + $.runJobTask = runJobTask; + return this; + } + + public Builder runJobTask(GetJobJobSettingsSettingsRunJobTaskArgs runJobTask) { + return runJobTask(Output.of(runJobTask)); + } + public Builder schedule(@Nullable Output schedule) { $.schedule = schedule; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes.java index 1fef6697..1fb584f0 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes.java @@ -37,6 +37,13 @@ public Optional googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + @Import(name="localSsdCount") + private @Nullable Integer localSsdCount; + + public Optional localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } + @Import(name="usePreemptibleExecutors") private @Nullable Boolean usePreemptibleExecutors; @@ -57,6 +64,7 @@ private GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes(GetJobJobSett this.availability = $.availability; this.bootDiskSize = $.bootDiskSize; this.googleServiceAccount = $.googleServiceAccount; + this.localSsdCount = $.localSsdCount; this.usePreemptibleExecutors = $.usePreemptibleExecutors; this.zoneId = $.zoneId; } @@ -94,6 +102,11 @@ public Builder googleServiceAccount(@Nullable String googleServiceAccount) { return this; } + public Builder localSsdCount(@Nullable Integer localSsdCount) { + $.localSsdCount = localSsdCount; + return this; + } + public Builder usePreemptibleExecutors(@Nullable Boolean usePreemptibleExecutors) { $.usePreemptibleExecutors = usePreemptibleExecutors; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesArgs.java index d7f96f7d..19c2e2b6 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesArgs.java @@ -38,6 +38,13 @@ public Optional> googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + @Import(name="localSsdCount") + private @Nullable Output localSsdCount; + + public Optional> localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } + @Import(name="usePreemptibleExecutors") private @Nullable Output usePreemptibleExecutors; @@ -58,6 +65,7 @@ private GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesArgs(GetJobJob this.availability = $.availability; this.bootDiskSize = $.bootDiskSize; this.googleServiceAccount = $.googleServiceAccount; + this.localSsdCount = $.localSsdCount; this.usePreemptibleExecutors = $.usePreemptibleExecutors; this.zoneId = $.zoneId; } @@ -107,6 +115,15 @@ public Builder googleServiceAccount(String googleServiceAccount) { return googleServiceAccount(Output.of(googleServiceAccount)); } + public Builder localSsdCount(@Nullable Output localSsdCount) { + $.localSsdCount = localSsdCount; + return this; + } + + public Builder localSsdCount(Integer localSsdCount) { + return localSsdCount(Output.of(localSsdCount)); + } + public Builder usePreemptibleExecutors(@Nullable Output usePreemptibleExecutors) { $.usePreemptibleExecutors = usePreemptibleExecutors; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsNewClusterGcpAttributes.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsNewClusterGcpAttributes.java index 47b558c2..9a015d47 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsNewClusterGcpAttributes.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsNewClusterGcpAttributes.java @@ -37,6 +37,13 @@ public Optional googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + @Import(name="localSsdCount") + private @Nullable Integer localSsdCount; + + public Optional localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } + @Import(name="usePreemptibleExecutors") private @Nullable Boolean usePreemptibleExecutors; @@ -57,6 +64,7 @@ private GetJobJobSettingsSettingsNewClusterGcpAttributes(GetJobJobSettingsSettin this.availability = $.availability; this.bootDiskSize = $.bootDiskSize; this.googleServiceAccount = $.googleServiceAccount; + this.localSsdCount = $.localSsdCount; this.usePreemptibleExecutors = $.usePreemptibleExecutors; this.zoneId = $.zoneId; } @@ -94,6 +102,11 @@ public Builder googleServiceAccount(@Nullable String googleServiceAccount) { return this; } + public Builder localSsdCount(@Nullable Integer localSsdCount) { + $.localSsdCount = localSsdCount; + return this; + } + public Builder usePreemptibleExecutors(@Nullable Boolean usePreemptibleExecutors) { $.usePreemptibleExecutors = usePreemptibleExecutors; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsNewClusterGcpAttributesArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsNewClusterGcpAttributesArgs.java index ea69ebe6..6743741b 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsNewClusterGcpAttributesArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsNewClusterGcpAttributesArgs.java @@ -38,6 +38,13 @@ public Optional> googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + @Import(name="localSsdCount") + private @Nullable Output localSsdCount; + + public Optional> localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } + @Import(name="usePreemptibleExecutors") private @Nullable Output usePreemptibleExecutors; @@ -58,6 +65,7 @@ private GetJobJobSettingsSettingsNewClusterGcpAttributesArgs(GetJobJobSettingsSe this.availability = $.availability; this.bootDiskSize = $.bootDiskSize; this.googleServiceAccount = $.googleServiceAccount; + this.localSsdCount = $.localSsdCount; this.usePreemptibleExecutors = $.usePreemptibleExecutors; this.zoneId = $.zoneId; } @@ -107,6 +115,15 @@ public Builder googleServiceAccount(String googleServiceAccount) { return googleServiceAccount(Output.of(googleServiceAccount)); } + public Builder localSsdCount(@Nullable Output localSsdCount) { + $.localSsdCount = localSsdCount; + return this; + } + + public Builder localSsdCount(Integer localSsdCount) { + return localSsdCount(Output.of(localSsdCount)); + } + public Builder usePreemptibleExecutors(@Nullable Output usePreemptibleExecutors) { $.usePreemptibleExecutors = usePreemptibleExecutors; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsParameter.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsParameter.java new file mode 100644 index 00000000..23b04ca8 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsParameter.java @@ -0,0 +1,85 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetJobJobSettingsSettingsParameter extends com.pulumi.resources.InvokeArgs { + + public static final GetJobJobSettingsSettingsParameter Empty = new GetJobJobSettingsSettingsParameter(); + + @Import(name="default") + private @Nullable String default_; + + public Optional default_() { + return Optional.ofNullable(this.default_); + } + + /** + * the job name of databricks.Job if the resource was matched by id. + * + */ + @Import(name="name") + private @Nullable String name; + + /** + * @return the job name of databricks.Job if the resource was matched by id. + * + */ + public Optional name() { + return Optional.ofNullable(this.name); + } + + private GetJobJobSettingsSettingsParameter() {} + + private GetJobJobSettingsSettingsParameter(GetJobJobSettingsSettingsParameter $) { + this.default_ = $.default_; + this.name = $.name; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetJobJobSettingsSettingsParameter defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetJobJobSettingsSettingsParameter $; + + public Builder() { + $ = new GetJobJobSettingsSettingsParameter(); + } + + public Builder(GetJobJobSettingsSettingsParameter defaults) { + $ = new GetJobJobSettingsSettingsParameter(Objects.requireNonNull(defaults)); + } + + public Builder default_(@Nullable String default_) { + $.default_ = default_; + return this; + } + + /** + * @param name the job name of databricks.Job if the resource was matched by id. + * + * @return builder + * + */ + public Builder name(@Nullable String name) { + $.name = name; + return this; + } + + public GetJobJobSettingsSettingsParameter build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsParameterArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsParameterArgs.java new file mode 100644 index 00000000..55bcf708 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsParameterArgs.java @@ -0,0 +1,100 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetJobJobSettingsSettingsParameterArgs extends com.pulumi.resources.ResourceArgs { + + public static final GetJobJobSettingsSettingsParameterArgs Empty = new GetJobJobSettingsSettingsParameterArgs(); + + @Import(name="default") + private @Nullable Output default_; + + public Optional> default_() { + return Optional.ofNullable(this.default_); + } + + /** + * the job name of databricks.Job if the resource was matched by id. + * + */ + @Import(name="name") + private @Nullable Output name; + + /** + * @return the job name of databricks.Job if the resource was matched by id. + * + */ + public Optional> name() { + return Optional.ofNullable(this.name); + } + + private GetJobJobSettingsSettingsParameterArgs() {} + + private GetJobJobSettingsSettingsParameterArgs(GetJobJobSettingsSettingsParameterArgs $) { + this.default_ = $.default_; + this.name = $.name; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetJobJobSettingsSettingsParameterArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetJobJobSettingsSettingsParameterArgs $; + + public Builder() { + $ = new GetJobJobSettingsSettingsParameterArgs(); + } + + public Builder(GetJobJobSettingsSettingsParameterArgs defaults) { + $ = new GetJobJobSettingsSettingsParameterArgs(Objects.requireNonNull(defaults)); + } + + public Builder default_(@Nullable Output default_) { + $.default_ = default_; + return this; + } + + public Builder default_(String default_) { + return default_(Output.of(default_)); + } + + /** + * @param name the job name of databricks.Job if the resource was matched by id. + * + * @return builder + * + */ + public Builder name(@Nullable Output name) { + $.name = name; + return this; + } + + /** + * @param name the job name of databricks.Job if the resource was matched by id. + * + * @return builder + * + */ + public Builder name(String name) { + return name(Output.of(name)); + } + + public GetJobJobSettingsSettingsParameterArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsRunJobTask.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsRunJobTask.java new file mode 100644 index 00000000..3b976b9c --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsRunJobTask.java @@ -0,0 +1,74 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.annotations.Import; +import java.lang.Object; +import java.lang.String; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetJobJobSettingsSettingsRunJobTask extends com.pulumi.resources.InvokeArgs { + + public static final GetJobJobSettingsSettingsRunJobTask Empty = new GetJobJobSettingsSettingsRunJobTask(); + + @Import(name="jobId", required=true) + private String jobId; + + public String jobId() { + return this.jobId; + } + + @Import(name="jobParameters") + private @Nullable Map jobParameters; + + public Optional> jobParameters() { + return Optional.ofNullable(this.jobParameters); + } + + private GetJobJobSettingsSettingsRunJobTask() {} + + private GetJobJobSettingsSettingsRunJobTask(GetJobJobSettingsSettingsRunJobTask $) { + this.jobId = $.jobId; + this.jobParameters = $.jobParameters; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetJobJobSettingsSettingsRunJobTask defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetJobJobSettingsSettingsRunJobTask $; + + public Builder() { + $ = new GetJobJobSettingsSettingsRunJobTask(); + } + + public Builder(GetJobJobSettingsSettingsRunJobTask defaults) { + $ = new GetJobJobSettingsSettingsRunJobTask(Objects.requireNonNull(defaults)); + } + + public Builder jobId(String jobId) { + $.jobId = jobId; + return this; + } + + public Builder jobParameters(@Nullable Map jobParameters) { + $.jobParameters = jobParameters; + return this; + } + + public GetJobJobSettingsSettingsRunJobTask build() { + $.jobId = Objects.requireNonNull($.jobId, "expected parameter 'jobId' to be non-null"); + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsRunJobTaskArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsRunJobTaskArgs.java new file mode 100644 index 00000000..514e90bb --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsRunJobTaskArgs.java @@ -0,0 +1,83 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.Object; +import java.lang.String; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetJobJobSettingsSettingsRunJobTaskArgs extends com.pulumi.resources.ResourceArgs { + + public static final GetJobJobSettingsSettingsRunJobTaskArgs Empty = new GetJobJobSettingsSettingsRunJobTaskArgs(); + + @Import(name="jobId", required=true) + private Output jobId; + + public Output jobId() { + return this.jobId; + } + + @Import(name="jobParameters") + private @Nullable Output> jobParameters; + + public Optional>> jobParameters() { + return Optional.ofNullable(this.jobParameters); + } + + private GetJobJobSettingsSettingsRunJobTaskArgs() {} + + private GetJobJobSettingsSettingsRunJobTaskArgs(GetJobJobSettingsSettingsRunJobTaskArgs $) { + this.jobId = $.jobId; + this.jobParameters = $.jobParameters; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetJobJobSettingsSettingsRunJobTaskArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetJobJobSettingsSettingsRunJobTaskArgs $; + + public Builder() { + $ = new GetJobJobSettingsSettingsRunJobTaskArgs(); + } + + public Builder(GetJobJobSettingsSettingsRunJobTaskArgs defaults) { + $ = new GetJobJobSettingsSettingsRunJobTaskArgs(Objects.requireNonNull(defaults)); + } + + public Builder jobId(Output jobId) { + $.jobId = jobId; + return this; + } + + public Builder jobId(String jobId) { + return jobId(Output.of(jobId)); + } + + public Builder jobParameters(@Nullable Output> jobParameters) { + $.jobParameters = jobParameters; + return this; + } + + public Builder jobParameters(Map jobParameters) { + return jobParameters(Output.of(jobParameters)); + } + + public GetJobJobSettingsSettingsRunJobTaskArgs build() { + $.jobId = Objects.requireNonNull($.jobId, "expected parameter 'jobId' to be non-null"); + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTask.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTask.java index 56bc8949..a5a54387 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTask.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTask.java @@ -15,6 +15,7 @@ import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskNotificationSettings; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskPipelineTask; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskPythonWheelTask; +import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskRunJobTask; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskSparkJarTask; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskSparkPythonTask; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskSparkSubmitTask; @@ -165,6 +166,13 @@ public Optional runIf() { return Optional.ofNullable(this.runIf); } + @Import(name="runJobTask") + private @Nullable GetJobJobSettingsSettingsTaskRunJobTask runJobTask; + + public Optional runJobTask() { + return Optional.ofNullable(this.runJobTask); + } + @Import(name="sparkJarTask") private @Nullable GetJobJobSettingsSettingsTaskSparkJarTask sparkJarTask; @@ -229,6 +237,7 @@ private GetJobJobSettingsSettingsTask(GetJobJobSettingsSettingsTask $) { this.pythonWheelTask = $.pythonWheelTask; this.retryOnTimeout = $.retryOnTimeout; this.runIf = $.runIf; + this.runJobTask = $.runJobTask; this.sparkJarTask = $.sparkJarTask; this.sparkPythonTask = $.sparkPythonTask; this.sparkSubmitTask = $.sparkSubmitTask; @@ -358,6 +367,11 @@ public Builder runIf(@Nullable String runIf) { return this; } + public Builder runJobTask(@Nullable GetJobJobSettingsSettingsTaskRunJobTask runJobTask) { + $.runJobTask = runJobTask; + return this; + } + public Builder sparkJarTask(@Nullable GetJobJobSettingsSettingsTaskSparkJarTask sparkJarTask) { $.sparkJarTask = sparkJarTask; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskArgs.java index 4d0df602..68be981d 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskArgs.java @@ -16,6 +16,7 @@ import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskNotificationSettingsArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskPipelineTaskArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskPythonWheelTaskArgs; +import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskRunJobTaskArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskSparkJarTaskArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskSparkPythonTaskArgs; import com.pulumi.databricks.inputs.GetJobJobSettingsSettingsTaskSparkSubmitTaskArgs; @@ -166,6 +167,13 @@ public Optional> runIf() { return Optional.ofNullable(this.runIf); } + @Import(name="runJobTask") + private @Nullable Output runJobTask; + + public Optional> runJobTask() { + return Optional.ofNullable(this.runJobTask); + } + @Import(name="sparkJarTask") private @Nullable Output sparkJarTask; @@ -230,6 +238,7 @@ private GetJobJobSettingsSettingsTaskArgs(GetJobJobSettingsSettingsTaskArgs $) { this.pythonWheelTask = $.pythonWheelTask; this.retryOnTimeout = $.retryOnTimeout; this.runIf = $.runIf; + this.runJobTask = $.runJobTask; this.sparkJarTask = $.sparkJarTask; this.sparkPythonTask = $.sparkPythonTask; this.sparkSubmitTask = $.sparkSubmitTask; @@ -435,6 +444,15 @@ public Builder runIf(String runIf) { return runIf(Output.of(runIf)); } + public Builder runJobTask(@Nullable Output runJobTask) { + $.runJobTask = runJobTask; + return this; + } + + public Builder runJobTask(GetJobJobSettingsSettingsTaskRunJobTaskArgs runJobTask) { + return runJobTask(Output.of(runJobTask)); + } + public Builder sparkJarTask(@Nullable Output sparkJarTask) { $.sparkJarTask = sparkJarTask; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributes.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributes.java index 7b83f834..464c8005 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributes.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributes.java @@ -37,6 +37,13 @@ public Optional googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + @Import(name="localSsdCount") + private @Nullable Integer localSsdCount; + + public Optional localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } + @Import(name="usePreemptibleExecutors") private @Nullable Boolean usePreemptibleExecutors; @@ -57,6 +64,7 @@ private GetJobJobSettingsSettingsTaskNewClusterGcpAttributes(GetJobJobSettingsSe this.availability = $.availability; this.bootDiskSize = $.bootDiskSize; this.googleServiceAccount = $.googleServiceAccount; + this.localSsdCount = $.localSsdCount; this.usePreemptibleExecutors = $.usePreemptibleExecutors; this.zoneId = $.zoneId; } @@ -94,6 +102,11 @@ public Builder googleServiceAccount(@Nullable String googleServiceAccount) { return this; } + public Builder localSsdCount(@Nullable Integer localSsdCount) { + $.localSsdCount = localSsdCount; + return this; + } + public Builder usePreemptibleExecutors(@Nullable Boolean usePreemptibleExecutors) { $.usePreemptibleExecutors = usePreemptibleExecutors; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributesArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributesArgs.java index 90873288..91deeee5 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributesArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributesArgs.java @@ -38,6 +38,13 @@ public Optional> googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + @Import(name="localSsdCount") + private @Nullable Output localSsdCount; + + public Optional> localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } + @Import(name="usePreemptibleExecutors") private @Nullable Output usePreemptibleExecutors; @@ -58,6 +65,7 @@ private GetJobJobSettingsSettingsTaskNewClusterGcpAttributesArgs(GetJobJobSettin this.availability = $.availability; this.bootDiskSize = $.bootDiskSize; this.googleServiceAccount = $.googleServiceAccount; + this.localSsdCount = $.localSsdCount; this.usePreemptibleExecutors = $.usePreemptibleExecutors; this.zoneId = $.zoneId; } @@ -107,6 +115,15 @@ public Builder googleServiceAccount(String googleServiceAccount) { return googleServiceAccount(Output.of(googleServiceAccount)); } + public Builder localSsdCount(@Nullable Output localSsdCount) { + $.localSsdCount = localSsdCount; + return this; + } + + public Builder localSsdCount(Integer localSsdCount) { + return localSsdCount(Output.of(localSsdCount)); + } + public Builder usePreemptibleExecutors(@Nullable Output usePreemptibleExecutors) { $.usePreemptibleExecutors = usePreemptibleExecutors; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskRunJobTask.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskRunJobTask.java new file mode 100644 index 00000000..12ce6d88 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskRunJobTask.java @@ -0,0 +1,74 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.annotations.Import; +import java.lang.Object; +import java.lang.String; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetJobJobSettingsSettingsTaskRunJobTask extends com.pulumi.resources.InvokeArgs { + + public static final GetJobJobSettingsSettingsTaskRunJobTask Empty = new GetJobJobSettingsSettingsTaskRunJobTask(); + + @Import(name="jobId", required=true) + private String jobId; + + public String jobId() { + return this.jobId; + } + + @Import(name="jobParameters") + private @Nullable Map jobParameters; + + public Optional> jobParameters() { + return Optional.ofNullable(this.jobParameters); + } + + private GetJobJobSettingsSettingsTaskRunJobTask() {} + + private GetJobJobSettingsSettingsTaskRunJobTask(GetJobJobSettingsSettingsTaskRunJobTask $) { + this.jobId = $.jobId; + this.jobParameters = $.jobParameters; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetJobJobSettingsSettingsTaskRunJobTask defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetJobJobSettingsSettingsTaskRunJobTask $; + + public Builder() { + $ = new GetJobJobSettingsSettingsTaskRunJobTask(); + } + + public Builder(GetJobJobSettingsSettingsTaskRunJobTask defaults) { + $ = new GetJobJobSettingsSettingsTaskRunJobTask(Objects.requireNonNull(defaults)); + } + + public Builder jobId(String jobId) { + $.jobId = jobId; + return this; + } + + public Builder jobParameters(@Nullable Map jobParameters) { + $.jobParameters = jobParameters; + return this; + } + + public GetJobJobSettingsSettingsTaskRunJobTask build() { + $.jobId = Objects.requireNonNull($.jobId, "expected parameter 'jobId' to be non-null"); + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskRunJobTaskArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskRunJobTaskArgs.java new file mode 100644 index 00000000..8034990c --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/GetJobJobSettingsSettingsTaskRunJobTaskArgs.java @@ -0,0 +1,83 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.Object; +import java.lang.String; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetJobJobSettingsSettingsTaskRunJobTaskArgs extends com.pulumi.resources.ResourceArgs { + + public static final GetJobJobSettingsSettingsTaskRunJobTaskArgs Empty = new GetJobJobSettingsSettingsTaskRunJobTaskArgs(); + + @Import(name="jobId", required=true) + private Output jobId; + + public Output jobId() { + return this.jobId; + } + + @Import(name="jobParameters") + private @Nullable Output> jobParameters; + + public Optional>> jobParameters() { + return Optional.ofNullable(this.jobParameters); + } + + private GetJobJobSettingsSettingsTaskRunJobTaskArgs() {} + + private GetJobJobSettingsSettingsTaskRunJobTaskArgs(GetJobJobSettingsSettingsTaskRunJobTaskArgs $) { + this.jobId = $.jobId; + this.jobParameters = $.jobParameters; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetJobJobSettingsSettingsTaskRunJobTaskArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetJobJobSettingsSettingsTaskRunJobTaskArgs $; + + public Builder() { + $ = new GetJobJobSettingsSettingsTaskRunJobTaskArgs(); + } + + public Builder(GetJobJobSettingsSettingsTaskRunJobTaskArgs defaults) { + $ = new GetJobJobSettingsSettingsTaskRunJobTaskArgs(Objects.requireNonNull(defaults)); + } + + public Builder jobId(Output jobId) { + $.jobId = jobId; + return this; + } + + public Builder jobId(String jobId) { + return jobId(Output.of(jobId)); + } + + public Builder jobParameters(@Nullable Output> jobParameters) { + $.jobParameters = jobParameters; + return this; + } + + public Builder jobParameters(Map jobParameters) { + return jobParameters(Output.of(jobParameters)); + } + + public GetJobJobSettingsSettingsTaskRunJobTaskArgs build() { + $.jobId = Objects.requireNonNull($.jobId, "expected parameter 'jobId' to be non-null"); + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/InstancePoolAzureAttributesArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/InstancePoolAzureAttributesArgs.java index 91da300a..6b6bff4e 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/InstancePoolAzureAttributesArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/InstancePoolAzureAttributesArgs.java @@ -17,14 +17,14 @@ public final class InstancePoolAzureAttributesArgs extends com.pulumi.resources. public static final InstancePoolAzureAttributesArgs Empty = new InstancePoolAzureAttributesArgs(); /** - * Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + * Availability type used for all nodes. Valid values are `SPOT_AZURE` and `ON_DEMAND_AZURE`. * */ @Import(name="availability") private @Nullable Output availability; /** - * @return Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + * @return Availability type used for all nodes. Valid values are `SPOT_AZURE` and `ON_DEMAND_AZURE`. * */ public Optional> availability() { @@ -72,7 +72,7 @@ public Builder(InstancePoolAzureAttributesArgs defaults) { } /** - * @param availability Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + * @param availability Availability type used for all nodes. Valid values are `SPOT_AZURE` and `ON_DEMAND_AZURE`. * * @return builder * @@ -83,7 +83,7 @@ public Builder availability(@Nullable Output availability) { } /** - * @param availability Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + * @param availability Availability type used for all nodes. Valid values are `SPOT_AZURE` and `ON_DEMAND_AZURE`. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/InstancePoolGcpAttributesArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/InstancePoolGcpAttributesArgs.java index aaac7b83..ee8d56fe 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/InstancePoolGcpAttributesArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/InstancePoolGcpAttributesArgs.java @@ -5,6 +5,7 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; +import java.lang.Integer; import java.lang.String; import java.util.Objects; import java.util.Optional; @@ -15,17 +16,41 @@ public final class InstancePoolGcpAttributesArgs extends com.pulumi.resources.Re public static final InstancePoolGcpAttributesArgs Empty = new InstancePoolGcpAttributesArgs(); + /** + * Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + * + */ @Import(name="gcpAvailability") private @Nullable Output gcpAvailability; + /** + * @return Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + * + */ public Optional> gcpAvailability() { return Optional.ofNullable(this.gcpAvailability); } + /** + * Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. + * + */ + @Import(name="localSsdCount") + private @Nullable Output localSsdCount; + + /** + * @return Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. + * + */ + public Optional> localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } + private InstancePoolGcpAttributesArgs() {} private InstancePoolGcpAttributesArgs(InstancePoolGcpAttributesArgs $) { this.gcpAvailability = $.gcpAvailability; + this.localSsdCount = $.localSsdCount; } public static Builder builder() { @@ -46,15 +71,48 @@ public Builder(InstancePoolGcpAttributesArgs defaults) { $ = new InstancePoolGcpAttributesArgs(Objects.requireNonNull(defaults)); } + /** + * @param gcpAvailability Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + * + * @return builder + * + */ public Builder gcpAvailability(@Nullable Output gcpAvailability) { $.gcpAvailability = gcpAvailability; return this; } + /** + * @param gcpAvailability Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + * + * @return builder + * + */ public Builder gcpAvailability(String gcpAvailability) { return gcpAvailability(Output.of(gcpAvailability)); } + /** + * @param localSsdCount Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. + * + * @return builder + * + */ + public Builder localSsdCount(@Nullable Output localSsdCount) { + $.localSsdCount = localSsdCount; + return this; + } + + /** + * @param localSsdCount Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. + * + * @return builder + * + */ + public Builder localSsdCount(Integer localSsdCount) { + return localSsdCount(Output.of(localSsdCount)); + } + public InstancePoolGcpAttributesArgs build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobJobClusterNewClusterGcpAttributesArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobJobClusterNewClusterGcpAttributesArgs.java index 55f2885e..13821522 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobJobClusterNewClusterGcpAttributesArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobJobClusterNewClusterGcpAttributesArgs.java @@ -38,6 +38,13 @@ public Optional> googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + @Import(name="localSsdCount") + private @Nullable Output localSsdCount; + + public Optional> localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } + @Import(name="usePreemptibleExecutors") private @Nullable Output usePreemptibleExecutors; @@ -58,6 +65,7 @@ private JobJobClusterNewClusterGcpAttributesArgs(JobJobClusterNewClusterGcpAttri this.availability = $.availability; this.bootDiskSize = $.bootDiskSize; this.googleServiceAccount = $.googleServiceAccount; + this.localSsdCount = $.localSsdCount; this.usePreemptibleExecutors = $.usePreemptibleExecutors; this.zoneId = $.zoneId; } @@ -107,6 +115,15 @@ public Builder googleServiceAccount(String googleServiceAccount) { return googleServiceAccount(Output.of(googleServiceAccount)); } + public Builder localSsdCount(@Nullable Output localSsdCount) { + $.localSsdCount = localSsdCount; + return this; + } + + public Builder localSsdCount(Integer localSsdCount) { + return localSsdCount(Output.of(localSsdCount)); + } + public Builder usePreemptibleExecutors(@Nullable Output usePreemptibleExecutors) { $.usePreemptibleExecutors = usePreemptibleExecutors; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobNewClusterGcpAttributesArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobNewClusterGcpAttributesArgs.java index 758ab596..2ebfdb7c 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobNewClusterGcpAttributesArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobNewClusterGcpAttributesArgs.java @@ -38,6 +38,13 @@ public Optional> googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + @Import(name="localSsdCount") + private @Nullable Output localSsdCount; + + public Optional> localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } + @Import(name="usePreemptibleExecutors") private @Nullable Output usePreemptibleExecutors; @@ -58,6 +65,7 @@ private JobNewClusterGcpAttributesArgs(JobNewClusterGcpAttributesArgs $) { this.availability = $.availability; this.bootDiskSize = $.bootDiskSize; this.googleServiceAccount = $.googleServiceAccount; + this.localSsdCount = $.localSsdCount; this.usePreemptibleExecutors = $.usePreemptibleExecutors; this.zoneId = $.zoneId; } @@ -107,6 +115,15 @@ public Builder googleServiceAccount(String googleServiceAccount) { return googleServiceAccount(Output.of(googleServiceAccount)); } + public Builder localSsdCount(@Nullable Output localSsdCount) { + $.localSsdCount = localSsdCount; + return this; + } + + public Builder localSsdCount(Integer localSsdCount) { + return localSsdCount(Output.of(localSsdCount)); + } + public Builder usePreemptibleExecutors(@Nullable Output usePreemptibleExecutors) { $.usePreemptibleExecutors = usePreemptibleExecutors; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobNotificationSettingsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobNotificationSettingsArgs.java index 85ae716c..cbce3aac 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobNotificationSettingsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobNotificationSettingsArgs.java @@ -31,14 +31,14 @@ public Optional> noAlertForCanceledRuns() { } /** - * (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + * (Bool) don't send alert for skipped runs. * */ @Import(name="noAlertForSkippedRuns") private @Nullable Output noAlertForSkippedRuns; /** - * @return (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + * @return (Bool) don't send alert for skipped runs. * */ public Optional> noAlertForSkippedRuns() { @@ -92,7 +92,7 @@ public Builder noAlertForCanceledRuns(Boolean noAlertForCanceledRuns) { } /** - * @param noAlertForSkippedRuns (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + * @param noAlertForSkippedRuns (Bool) don't send alert for skipped runs. * * @return builder * @@ -103,7 +103,7 @@ public Builder noAlertForSkippedRuns(@Nullable Output noAlertForSkipped } /** - * @param noAlertForSkippedRuns (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + * @param noAlertForSkippedRuns (Bool) don't send alert for skipped runs. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobParameterArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobParameterArgs.java new file mode 100644 index 00000000..2169393d --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobParameterArgs.java @@ -0,0 +1,120 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class JobParameterArgs extends com.pulumi.resources.ResourceArgs { + + public static final JobParameterArgs Empty = new JobParameterArgs(); + + /** + * Default value of the parameter. + * + */ + @Import(name="default") + private @Nullable Output default_; + + /** + * @return Default value of the parameter. + * + */ + public Optional> default_() { + return Optional.ofNullable(this.default_); + } + + /** + * An optional name for the job. The default value is Untitled. + * + */ + @Import(name="name") + private @Nullable Output name; + + /** + * @return An optional name for the job. The default value is Untitled. + * + */ + public Optional> name() { + return Optional.ofNullable(this.name); + } + + private JobParameterArgs() {} + + private JobParameterArgs(JobParameterArgs $) { + this.default_ = $.default_; + this.name = $.name; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(JobParameterArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private JobParameterArgs $; + + public Builder() { + $ = new JobParameterArgs(); + } + + public Builder(JobParameterArgs defaults) { + $ = new JobParameterArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param default_ Default value of the parameter. + * + * @return builder + * + */ + public Builder default_(@Nullable Output default_) { + $.default_ = default_; + return this; + } + + /** + * @param default_ Default value of the parameter. + * + * @return builder + * + */ + public Builder default_(String default_) { + return default_(Output.of(default_)); + } + + /** + * @param name An optional name for the job. The default value is Untitled. + * + * @return builder + * + */ + public Builder name(@Nullable Output name) { + $.name = name; + return this; + } + + /** + * @param name An optional name for the job. The default value is Untitled. + * + * @return builder + * + */ + public Builder name(String name) { + return name(Output.of(name)); + } + + public JobParameterArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobRunJobTaskArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobRunJobTaskArgs.java new file mode 100644 index 00000000..0d1241dd --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobRunJobTaskArgs.java @@ -0,0 +1,123 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.Object; +import java.lang.String; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class JobRunJobTaskArgs extends com.pulumi.resources.ResourceArgs { + + public static final JobRunJobTaskArgs Empty = new JobRunJobTaskArgs(); + + /** + * (String) ID of the job + * + */ + @Import(name="jobId", required=true) + private Output jobId; + + /** + * @return (String) ID of the job + * + */ + public Output jobId() { + return this.jobId; + } + + /** + * (Map) Job parameters for the task + * + */ + @Import(name="jobParameters") + private @Nullable Output> jobParameters; + + /** + * @return (Map) Job parameters for the task + * + */ + public Optional>> jobParameters() { + return Optional.ofNullable(this.jobParameters); + } + + private JobRunJobTaskArgs() {} + + private JobRunJobTaskArgs(JobRunJobTaskArgs $) { + this.jobId = $.jobId; + this.jobParameters = $.jobParameters; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(JobRunJobTaskArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private JobRunJobTaskArgs $; + + public Builder() { + $ = new JobRunJobTaskArgs(); + } + + public Builder(JobRunJobTaskArgs defaults) { + $ = new JobRunJobTaskArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param jobId (String) ID of the job + * + * @return builder + * + */ + public Builder jobId(Output jobId) { + $.jobId = jobId; + return this; + } + + /** + * @param jobId (String) ID of the job + * + * @return builder + * + */ + public Builder jobId(String jobId) { + return jobId(Output.of(jobId)); + } + + /** + * @param jobParameters (Map) Job parameters for the task + * + * @return builder + * + */ + public Builder jobParameters(@Nullable Output> jobParameters) { + $.jobParameters = jobParameters; + return this; + } + + /** + * @param jobParameters (Map) Job parameters for the task + * + * @return builder + * + */ + public Builder jobParameters(Map jobParameters) { + return jobParameters(Output.of(jobParameters)); + } + + public JobRunJobTaskArgs build() { + $.jobId = Objects.requireNonNull($.jobId, "expected parameter 'jobId' to be non-null"); + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobState.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobState.java index 01d8144b..947ed0a3 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobState.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobState.java @@ -16,10 +16,12 @@ import com.pulumi.databricks.inputs.JobNewClusterArgs; import com.pulumi.databricks.inputs.JobNotebookTaskArgs; import com.pulumi.databricks.inputs.JobNotificationSettingsArgs; +import com.pulumi.databricks.inputs.JobParameterArgs; import com.pulumi.databricks.inputs.JobPipelineTaskArgs; import com.pulumi.databricks.inputs.JobPythonWheelTaskArgs; import com.pulumi.databricks.inputs.JobQueueArgs; import com.pulumi.databricks.inputs.JobRunAsArgs; +import com.pulumi.databricks.inputs.JobRunJobTaskArgs; import com.pulumi.databricks.inputs.JobScheduleArgs; import com.pulumi.databricks.inputs.JobSparkJarTaskArgs; import com.pulumi.databricks.inputs.JobSparkPythonTaskArgs; @@ -327,6 +329,13 @@ public Optional> notificationSettings() { return Optional.ofNullable(this.notificationSettings); } + @Import(name="parameters") + private @Nullable Output> parameters; + + public Optional>> parameters() { + return Optional.ofNullable(this.parameters); + } + @Import(name="pipelineTask") private @Nullable Output pipelineTask; @@ -370,6 +379,13 @@ public Optional> runAs() { return Optional.ofNullable(this.runAs); } + @Import(name="runJobTask") + private @Nullable Output runJobTask; + + public Optional> runJobTask() { + return Optional.ofNullable(this.runJobTask); + } + /** * (List) An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. This field is a block and is documented below. * @@ -494,11 +510,13 @@ private JobState(JobState $) { this.newCluster = $.newCluster; this.notebookTask = $.notebookTask; this.notificationSettings = $.notificationSettings; + this.parameters = $.parameters; this.pipelineTask = $.pipelineTask; this.pythonWheelTask = $.pythonWheelTask; this.queue = $.queue; this.retryOnTimeout = $.retryOnTimeout; this.runAs = $.runAs; + this.runJobTask = $.runJobTask; this.schedule = $.schedule; this.sparkJarTask = $.sparkJarTask; this.sparkPythonTask = $.sparkPythonTask; @@ -924,6 +942,19 @@ public Builder notificationSettings(JobNotificationSettingsArgs notificationSett return notificationSettings(Output.of(notificationSettings)); } + public Builder parameters(@Nullable Output> parameters) { + $.parameters = parameters; + return this; + } + + public Builder parameters(List parameters) { + return parameters(Output.of(parameters)); + } + + public Builder parameters(JobParameterArgs... parameters) { + return parameters(List.of(parameters)); + } + public Builder pipelineTask(@Nullable Output pipelineTask) { $.pipelineTask = pipelineTask; return this; @@ -981,6 +1012,15 @@ public Builder runAs(JobRunAsArgs runAs) { return runAs(Output.of(runAs)); } + public Builder runJobTask(@Nullable Output runJobTask) { + $.runJobTask = runJobTask; + return this; + } + + public Builder runJobTask(JobRunJobTaskArgs runJobTask) { + return runJobTask(Output.of(runJobTask)); + } + /** * @param schedule (List) An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. This field is a block and is documented below. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskArgs.java index c63277db..113b7ea8 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskArgs.java @@ -16,6 +16,7 @@ import com.pulumi.databricks.inputs.JobTaskNotificationSettingsArgs; import com.pulumi.databricks.inputs.JobTaskPipelineTaskArgs; import com.pulumi.databricks.inputs.JobTaskPythonWheelTaskArgs; +import com.pulumi.databricks.inputs.JobTaskRunJobTaskArgs; import com.pulumi.databricks.inputs.JobTaskSparkJarTaskArgs; import com.pulumi.databricks.inputs.JobTaskSparkPythonTaskArgs; import com.pulumi.databricks.inputs.JobTaskSparkSubmitTaskArgs; @@ -239,13 +240,28 @@ public Optional> retryOnTimeout() { return Optional.ofNullable(this.retryOnTimeout); } + /** + * An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. + * + */ @Import(name="runIf") private @Nullable Output runIf; + /** + * @return An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. + * + */ public Optional> runIf() { return Optional.ofNullable(this.runIf); } + @Import(name="runJobTask") + private @Nullable Output runJobTask; + + public Optional> runJobTask() { + return Optional.ofNullable(this.runJobTask); + } + @Import(name="sparkJarTask") private @Nullable Output sparkJarTask; @@ -328,6 +344,7 @@ private JobTaskArgs(JobTaskArgs $) { this.pythonWheelTask = $.pythonWheelTask; this.retryOnTimeout = $.retryOnTimeout; this.runIf = $.runIf; + this.runJobTask = $.runJobTask; this.sparkJarTask = $.sparkJarTask; this.sparkPythonTask = $.sparkPythonTask; this.sparkSubmitTask = $.sparkSubmitTask; @@ -656,15 +673,36 @@ public Builder retryOnTimeout(Boolean retryOnTimeout) { return retryOnTimeout(Output.of(retryOnTimeout)); } + /** + * @param runIf An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. + * + * @return builder + * + */ public Builder runIf(@Nullable Output runIf) { $.runIf = runIf; return this; } + /** + * @param runIf An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. + * + * @return builder + * + */ public Builder runIf(String runIf) { return runIf(Output.of(runIf)); } + public Builder runJobTask(@Nullable Output runJobTask) { + $.runJobTask = runJobTask; + return this; + } + + public Builder runJobTask(JobTaskRunJobTaskArgs runJobTask) { + return runJobTask(Output.of(runJobTask)); + } + public Builder sparkJarTask(@Nullable Output sparkJarTask) { $.sparkJarTask = sparkJarTask; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskNewClusterGcpAttributesArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskNewClusterGcpAttributesArgs.java index 4ccaa092..7c3b443a 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskNewClusterGcpAttributesArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskNewClusterGcpAttributesArgs.java @@ -38,6 +38,13 @@ public Optional> googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + @Import(name="localSsdCount") + private @Nullable Output localSsdCount; + + public Optional> localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } + @Import(name="usePreemptibleExecutors") private @Nullable Output usePreemptibleExecutors; @@ -58,6 +65,7 @@ private JobTaskNewClusterGcpAttributesArgs(JobTaskNewClusterGcpAttributesArgs $) this.availability = $.availability; this.bootDiskSize = $.bootDiskSize; this.googleServiceAccount = $.googleServiceAccount; + this.localSsdCount = $.localSsdCount; this.usePreemptibleExecutors = $.usePreemptibleExecutors; this.zoneId = $.zoneId; } @@ -107,6 +115,15 @@ public Builder googleServiceAccount(String googleServiceAccount) { return googleServiceAccount(Output.of(googleServiceAccount)); } + public Builder localSsdCount(@Nullable Output localSsdCount) { + $.localSsdCount = localSsdCount; + return this; + } + + public Builder localSsdCount(Integer localSsdCount) { + return localSsdCount(Output.of(localSsdCount)); + } + public Builder usePreemptibleExecutors(@Nullable Output usePreemptibleExecutors) { $.usePreemptibleExecutors = usePreemptibleExecutors; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskNotificationSettingsArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskNotificationSettingsArgs.java index 1cb2dc99..07a61eb9 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskNotificationSettingsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskNotificationSettingsArgs.java @@ -46,14 +46,14 @@ public Optional> noAlertForCanceledRuns() { } /** - * (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + * (Bool) don't send alert for skipped runs. * */ @Import(name="noAlertForSkippedRuns") private @Nullable Output noAlertForSkippedRuns; /** - * @return (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + * @return (Bool) don't send alert for skipped runs. * */ public Optional> noAlertForSkippedRuns() { @@ -129,7 +129,7 @@ public Builder noAlertForCanceledRuns(Boolean noAlertForCanceledRuns) { } /** - * @param noAlertForSkippedRuns (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + * @param noAlertForSkippedRuns (Bool) don't send alert for skipped runs. * * @return builder * @@ -140,7 +140,7 @@ public Builder noAlertForSkippedRuns(@Nullable Output noAlertForSkipped } /** - * @param noAlertForSkippedRuns (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + * @param noAlertForSkippedRuns (Bool) don't send alert for skipped runs. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskRunJobTaskArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskRunJobTaskArgs.java new file mode 100644 index 00000000..80ebb28a --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/JobTaskRunJobTaskArgs.java @@ -0,0 +1,123 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.Object; +import java.lang.String; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class JobTaskRunJobTaskArgs extends com.pulumi.resources.ResourceArgs { + + public static final JobTaskRunJobTaskArgs Empty = new JobTaskRunJobTaskArgs(); + + /** + * (String) ID of the job + * + */ + @Import(name="jobId", required=true) + private Output jobId; + + /** + * @return (String) ID of the job + * + */ + public Output jobId() { + return this.jobId; + } + + /** + * (Map) Job parameters for the task + * + */ + @Import(name="jobParameters") + private @Nullable Output> jobParameters; + + /** + * @return (Map) Job parameters for the task + * + */ + public Optional>> jobParameters() { + return Optional.ofNullable(this.jobParameters); + } + + private JobTaskRunJobTaskArgs() {} + + private JobTaskRunJobTaskArgs(JobTaskRunJobTaskArgs $) { + this.jobId = $.jobId; + this.jobParameters = $.jobParameters; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(JobTaskRunJobTaskArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private JobTaskRunJobTaskArgs $; + + public Builder() { + $ = new JobTaskRunJobTaskArgs(); + } + + public Builder(JobTaskRunJobTaskArgs defaults) { + $ = new JobTaskRunJobTaskArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param jobId (String) ID of the job + * + * @return builder + * + */ + public Builder jobId(Output jobId) { + $.jobId = jobId; + return this; + } + + /** + * @param jobId (String) ID of the job + * + * @return builder + * + */ + public Builder jobId(String jobId) { + return jobId(Output.of(jobId)); + } + + /** + * @param jobParameters (Map) Job parameters for the task + * + * @return builder + * + */ + public Builder jobParameters(@Nullable Output> jobParameters) { + $.jobParameters = jobParameters; + return this; + } + + /** + * @param jobParameters (Map) Job parameters for the task + * + * @return builder + * + */ + public Builder jobParameters(Map jobParameters) { + return jobParameters(Output.of(jobParameters)); + } + + public JobTaskRunJobTaskArgs build() { + $.jobId = Objects.requireNonNull($.jobId, "expected parameter 'jobId' to be non-null"); + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/ModelServingConfigServedModelArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/ModelServingConfigServedModelArgs.java index 5be5a44f..24b37a1c 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/ModelServingConfigServedModelArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/ModelServingConfigServedModelArgs.java @@ -25,6 +25,13 @@ public Optional>> environmentVars() { return Optional.ofNullable(this.environmentVars); } + @Import(name="instanceProfileArn") + private @Nullable Output instanceProfileArn; + + public Optional> instanceProfileArn() { + return Optional.ofNullable(this.instanceProfileArn); + } + /** * The name of the model in Databricks Model Registry to be served. * @@ -104,6 +111,7 @@ private ModelServingConfigServedModelArgs() {} private ModelServingConfigServedModelArgs(ModelServingConfigServedModelArgs $) { this.environmentVars = $.environmentVars; + this.instanceProfileArn = $.instanceProfileArn; this.modelName = $.modelName; this.modelVersion = $.modelVersion; this.name = $.name; @@ -138,6 +146,15 @@ public Builder environmentVars(Map environmentVars) { return environmentVars(Output.of(environmentVars)); } + public Builder instanceProfileArn(@Nullable Output instanceProfileArn) { + $.instanceProfileArn = instanceProfileArn; + return this; + } + + public Builder instanceProfileArn(String instanceProfileArn) { + return instanceProfileArn(Output.of(instanceProfileArn)); + } + /** * @param modelName The name of the model in Databricks Model Registry to be served. * diff --git a/sdk/java/src/main/java/com/pulumi/databricks/inputs/PipelineClusterGcpAttributesArgs.java b/sdk/java/src/main/java/com/pulumi/databricks/inputs/PipelineClusterGcpAttributesArgs.java index 739842a6..b171f7b7 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/inputs/PipelineClusterGcpAttributesArgs.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/inputs/PipelineClusterGcpAttributesArgs.java @@ -5,6 +5,7 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; +import java.lang.Integer; import java.lang.String; import java.util.Objects; import java.util.Optional; @@ -29,6 +30,13 @@ public Optional> googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + @Import(name="localSsdCount") + private @Nullable Output localSsdCount; + + public Optional> localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } + @Import(name="zoneId") private @Nullable Output zoneId; @@ -41,6 +49,7 @@ private PipelineClusterGcpAttributesArgs() {} private PipelineClusterGcpAttributesArgs(PipelineClusterGcpAttributesArgs $) { this.availability = $.availability; this.googleServiceAccount = $.googleServiceAccount; + this.localSsdCount = $.localSsdCount; this.zoneId = $.zoneId; } @@ -80,6 +89,15 @@ public Builder googleServiceAccount(String googleServiceAccount) { return googleServiceAccount(Output.of(googleServiceAccount)); } + public Builder localSsdCount(@Nullable Output localSsdCount) { + $.localSsdCount = localSsdCount; + return this; + } + + public Builder localSsdCount(Integer localSsdCount) { + return localSsdCount(Output.of(localSsdCount)); + } + public Builder zoneId(@Nullable Output zoneId) { $.zoneId = zoneId; return this; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/ClusterGcpAttributes.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/ClusterGcpAttributes.java index 808c715f..eeef45bc 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/ClusterGcpAttributes.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/ClusterGcpAttributes.java @@ -16,6 +16,13 @@ public final class ClusterGcpAttributes { private @Nullable String availability; private @Nullable Integer bootDiskSize; private @Nullable String googleServiceAccount; + private @Nullable Integer localSsdCount; + /** + * @deprecated + * Please use 'availability' instead. + * + */ + @Deprecated /* Please use 'availability' instead. */ private @Nullable Boolean usePreemptibleExecutors; private @Nullable String zoneId; @@ -29,6 +36,15 @@ public Optional bootDiskSize() { public Optional googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + public Optional localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } + /** + * @deprecated + * Please use 'availability' instead. + * + */ + @Deprecated /* Please use 'availability' instead. */ public Optional usePreemptibleExecutors() { return Optional.ofNullable(this.usePreemptibleExecutors); } @@ -48,6 +64,7 @@ public static final class Builder { private @Nullable String availability; private @Nullable Integer bootDiskSize; private @Nullable String googleServiceAccount; + private @Nullable Integer localSsdCount; private @Nullable Boolean usePreemptibleExecutors; private @Nullable String zoneId; public Builder() {} @@ -56,6 +73,7 @@ public Builder(ClusterGcpAttributes defaults) { this.availability = defaults.availability; this.bootDiskSize = defaults.bootDiskSize; this.googleServiceAccount = defaults.googleServiceAccount; + this.localSsdCount = defaults.localSsdCount; this.usePreemptibleExecutors = defaults.usePreemptibleExecutors; this.zoneId = defaults.zoneId; } @@ -76,6 +94,11 @@ public Builder googleServiceAccount(@Nullable String googleServiceAccount) { return this; } @CustomType.Setter + public Builder localSsdCount(@Nullable Integer localSsdCount) { + this.localSsdCount = localSsdCount; + return this; + } + @CustomType.Setter public Builder usePreemptibleExecutors(@Nullable Boolean usePreemptibleExecutors) { this.usePreemptibleExecutors = usePreemptibleExecutors; return this; @@ -90,6 +113,7 @@ public ClusterGcpAttributes build() { o.availability = availability; o.bootDiskSize = bootDiskSize; o.googleServiceAccount = googleServiceAccount; + o.localSsdCount = localSsdCount; o.usePreemptibleExecutors = usePreemptibleExecutors; o.zoneId = zoneId; return o; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetClusterClusterInfoGcpAttributes.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetClusterClusterInfoGcpAttributes.java index 21bcbcf2..95de53e4 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetClusterClusterInfoGcpAttributes.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetClusterClusterInfoGcpAttributes.java @@ -16,6 +16,7 @@ public final class GetClusterClusterInfoGcpAttributes { private @Nullable String availability; private @Nullable Integer bootDiskSize; private @Nullable String googleServiceAccount; + private @Nullable Integer localSsdCount; private @Nullable Boolean usePreemptibleExecutors; private @Nullable String zoneId; @@ -29,6 +30,9 @@ public Optional bootDiskSize() { public Optional googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + public Optional localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } public Optional usePreemptibleExecutors() { return Optional.ofNullable(this.usePreemptibleExecutors); } @@ -48,6 +52,7 @@ public static final class Builder { private @Nullable String availability; private @Nullable Integer bootDiskSize; private @Nullable String googleServiceAccount; + private @Nullable Integer localSsdCount; private @Nullable Boolean usePreemptibleExecutors; private @Nullable String zoneId; public Builder() {} @@ -56,6 +61,7 @@ public Builder(GetClusterClusterInfoGcpAttributes defaults) { this.availability = defaults.availability; this.bootDiskSize = defaults.bootDiskSize; this.googleServiceAccount = defaults.googleServiceAccount; + this.localSsdCount = defaults.localSsdCount; this.usePreemptibleExecutors = defaults.usePreemptibleExecutors; this.zoneId = defaults.zoneId; } @@ -76,6 +82,11 @@ public Builder googleServiceAccount(@Nullable String googleServiceAccount) { return this; } @CustomType.Setter + public Builder localSsdCount(@Nullable Integer localSsdCount) { + this.localSsdCount = localSsdCount; + return this; + } + @CustomType.Setter public Builder usePreemptibleExecutors(@Nullable Boolean usePreemptibleExecutors) { this.usePreemptibleExecutors = usePreemptibleExecutors; return this; @@ -90,6 +101,7 @@ public GetClusterClusterInfoGcpAttributes build() { o.availability = availability; o.bootDiskSize = bootDiskSize; o.googleServiceAccount = googleServiceAccount; + o.localSsdCount = localSsdCount; o.usePreemptibleExecutors = usePreemptibleExecutors; o.zoneId = zoneId; return o; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetInstancePoolPoolInfoGcpAttributes.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetInstancePoolPoolInfoGcpAttributes.java index 13aae3a7..55a0b8af 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetInstancePoolPoolInfoGcpAttributes.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetInstancePoolPoolInfoGcpAttributes.java @@ -4,6 +4,7 @@ package com.pulumi.databricks.outputs; import com.pulumi.core.annotations.CustomType; +import java.lang.Integer; import java.lang.String; import java.util.Objects; import java.util.Optional; @@ -12,11 +13,15 @@ @CustomType public final class GetInstancePoolPoolInfoGcpAttributes { private @Nullable String gcpAvailability; + private @Nullable Integer localSsdCount; private GetInstancePoolPoolInfoGcpAttributes() {} public Optional gcpAvailability() { return Optional.ofNullable(this.gcpAvailability); } + public Optional localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } public static Builder builder() { return new Builder(); @@ -28,10 +33,12 @@ public static Builder builder(GetInstancePoolPoolInfoGcpAttributes defaults) { @CustomType.Builder public static final class Builder { private @Nullable String gcpAvailability; + private @Nullable Integer localSsdCount; public Builder() {} public Builder(GetInstancePoolPoolInfoGcpAttributes defaults) { Objects.requireNonNull(defaults); this.gcpAvailability = defaults.gcpAvailability; + this.localSsdCount = defaults.localSsdCount; } @CustomType.Setter @@ -39,9 +46,15 @@ public Builder gcpAvailability(@Nullable String gcpAvailability) { this.gcpAvailability = gcpAvailability; return this; } + @CustomType.Setter + public Builder localSsdCount(@Nullable Integer localSsdCount) { + this.localSsdCount = localSsdCount; + return this; + } public GetInstancePoolPoolInfoGcpAttributes build() { final var o = new GetInstancePoolPoolInfoGcpAttributes(); o.gcpAvailability = gcpAvailability; + o.localSsdCount = localSsdCount; return o; } } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettings.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettings.java index 9a262520..5a4deb6c 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettings.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettings.java @@ -15,10 +15,12 @@ import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsNewCluster; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsNotebookTask; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsNotificationSettings; +import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsParameter; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsPipelineTask; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsPythonWheelTask; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsQueue; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsRunAs; +import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsRunJobTask; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsSchedule; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsSparkJarTask; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsSparkPythonTask; @@ -59,11 +61,13 @@ public final class GetJobJobSettingsSettings { private @Nullable GetJobJobSettingsSettingsNewCluster newCluster; private @Nullable GetJobJobSettingsSettingsNotebookTask notebookTask; private @Nullable GetJobJobSettingsSettingsNotificationSettings notificationSettings; + private @Nullable List parameters; private @Nullable GetJobJobSettingsSettingsPipelineTask pipelineTask; private @Nullable GetJobJobSettingsSettingsPythonWheelTask pythonWheelTask; private @Nullable GetJobJobSettingsSettingsQueue queue; private @Nullable Boolean retryOnTimeout; private @Nullable GetJobJobSettingsSettingsRunAs runAs; + private @Nullable GetJobJobSettingsSettingsRunJobTask runJobTask; private @Nullable GetJobJobSettingsSettingsSchedule schedule; private @Nullable GetJobJobSettingsSettingsSparkJarTask sparkJarTask; private @Nullable GetJobJobSettingsSettingsSparkPythonTask sparkPythonTask; @@ -130,6 +134,9 @@ public Optional notebookTask() { public Optional notificationSettings() { return Optional.ofNullable(this.notificationSettings); } + public List parameters() { + return this.parameters == null ? List.of() : this.parameters; + } public Optional pipelineTask() { return Optional.ofNullable(this.pipelineTask); } @@ -145,6 +152,9 @@ public Optional retryOnTimeout() { public Optional runAs() { return Optional.ofNullable(this.runAs); } + public Optional runJobTask() { + return Optional.ofNullable(this.runJobTask); + } public Optional schedule() { return Optional.ofNullable(this.schedule); } @@ -199,11 +209,13 @@ public static final class Builder { private @Nullable GetJobJobSettingsSettingsNewCluster newCluster; private @Nullable GetJobJobSettingsSettingsNotebookTask notebookTask; private @Nullable GetJobJobSettingsSettingsNotificationSettings notificationSettings; + private @Nullable List parameters; private @Nullable GetJobJobSettingsSettingsPipelineTask pipelineTask; private @Nullable GetJobJobSettingsSettingsPythonWheelTask pythonWheelTask; private @Nullable GetJobJobSettingsSettingsQueue queue; private @Nullable Boolean retryOnTimeout; private @Nullable GetJobJobSettingsSettingsRunAs runAs; + private @Nullable GetJobJobSettingsSettingsRunJobTask runJobTask; private @Nullable GetJobJobSettingsSettingsSchedule schedule; private @Nullable GetJobJobSettingsSettingsSparkJarTask sparkJarTask; private @Nullable GetJobJobSettingsSettingsSparkPythonTask sparkPythonTask; @@ -233,11 +245,13 @@ public Builder(GetJobJobSettingsSettings defaults) { this.newCluster = defaults.newCluster; this.notebookTask = defaults.notebookTask; this.notificationSettings = defaults.notificationSettings; + this.parameters = defaults.parameters; this.pipelineTask = defaults.pipelineTask; this.pythonWheelTask = defaults.pythonWheelTask; this.queue = defaults.queue; this.retryOnTimeout = defaults.retryOnTimeout; this.runAs = defaults.runAs; + this.runJobTask = defaults.runJobTask; this.schedule = defaults.schedule; this.sparkJarTask = defaults.sparkJarTask; this.sparkPythonTask = defaults.sparkPythonTask; @@ -344,6 +358,14 @@ public Builder notificationSettings(@Nullable GetJobJobSettingsSettingsNotificat return this; } @CustomType.Setter + public Builder parameters(@Nullable List parameters) { + this.parameters = parameters; + return this; + } + public Builder parameters(GetJobJobSettingsSettingsParameter... parameters) { + return parameters(List.of(parameters)); + } + @CustomType.Setter public Builder pipelineTask(@Nullable GetJobJobSettingsSettingsPipelineTask pipelineTask) { this.pipelineTask = pipelineTask; return this; @@ -369,6 +391,11 @@ public Builder runAs(@Nullable GetJobJobSettingsSettingsRunAs runAs) { return this; } @CustomType.Setter + public Builder runJobTask(@Nullable GetJobJobSettingsSettingsRunJobTask runJobTask) { + this.runJobTask = runJobTask; + return this; + } + @CustomType.Setter public Builder schedule(@Nullable GetJobJobSettingsSettingsSchedule schedule) { this.schedule = schedule; return this; @@ -435,11 +462,13 @@ public GetJobJobSettingsSettings build() { o.newCluster = newCluster; o.notebookTask = notebookTask; o.notificationSettings = notificationSettings; + o.parameters = parameters; o.pipelineTask = pipelineTask; o.pythonWheelTask = pythonWheelTask; o.queue = queue; o.retryOnTimeout = retryOnTimeout; o.runAs = runAs; + o.runJobTask = runJobTask; o.schedule = schedule; o.sparkJarTask = sparkJarTask; o.sparkPythonTask = sparkPythonTask; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes.java index 8615113e..4d71194e 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes.java @@ -16,6 +16,7 @@ public final class GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes { private @Nullable String availability; private @Nullable Integer bootDiskSize; private @Nullable String googleServiceAccount; + private @Nullable Integer localSsdCount; private @Nullable Boolean usePreemptibleExecutors; private @Nullable String zoneId; @@ -29,6 +30,9 @@ public Optional bootDiskSize() { public Optional googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + public Optional localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } public Optional usePreemptibleExecutors() { return Optional.ofNullable(this.usePreemptibleExecutors); } @@ -48,6 +52,7 @@ public static final class Builder { private @Nullable String availability; private @Nullable Integer bootDiskSize; private @Nullable String googleServiceAccount; + private @Nullable Integer localSsdCount; private @Nullable Boolean usePreemptibleExecutors; private @Nullable String zoneId; public Builder() {} @@ -56,6 +61,7 @@ public Builder(GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes defaul this.availability = defaults.availability; this.bootDiskSize = defaults.bootDiskSize; this.googleServiceAccount = defaults.googleServiceAccount; + this.localSsdCount = defaults.localSsdCount; this.usePreemptibleExecutors = defaults.usePreemptibleExecutors; this.zoneId = defaults.zoneId; } @@ -76,6 +82,11 @@ public Builder googleServiceAccount(@Nullable String googleServiceAccount) { return this; } @CustomType.Setter + public Builder localSsdCount(@Nullable Integer localSsdCount) { + this.localSsdCount = localSsdCount; + return this; + } + @CustomType.Setter public Builder usePreemptibleExecutors(@Nullable Boolean usePreemptibleExecutors) { this.usePreemptibleExecutors = usePreemptibleExecutors; return this; @@ -90,6 +101,7 @@ public GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes build() { o.availability = availability; o.bootDiskSize = bootDiskSize; o.googleServiceAccount = googleServiceAccount; + o.localSsdCount = localSsdCount; o.usePreemptibleExecutors = usePreemptibleExecutors; o.zoneId = zoneId; return o; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsNewClusterGcpAttributes.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsNewClusterGcpAttributes.java index a3329dc2..5aa434a9 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsNewClusterGcpAttributes.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsNewClusterGcpAttributes.java @@ -16,6 +16,7 @@ public final class GetJobJobSettingsSettingsNewClusterGcpAttributes { private @Nullable String availability; private @Nullable Integer bootDiskSize; private @Nullable String googleServiceAccount; + private @Nullable Integer localSsdCount; private @Nullable Boolean usePreemptibleExecutors; private @Nullable String zoneId; @@ -29,6 +30,9 @@ public Optional bootDiskSize() { public Optional googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + public Optional localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } public Optional usePreemptibleExecutors() { return Optional.ofNullable(this.usePreemptibleExecutors); } @@ -48,6 +52,7 @@ public static final class Builder { private @Nullable String availability; private @Nullable Integer bootDiskSize; private @Nullable String googleServiceAccount; + private @Nullable Integer localSsdCount; private @Nullable Boolean usePreemptibleExecutors; private @Nullable String zoneId; public Builder() {} @@ -56,6 +61,7 @@ public Builder(GetJobJobSettingsSettingsNewClusterGcpAttributes defaults) { this.availability = defaults.availability; this.bootDiskSize = defaults.bootDiskSize; this.googleServiceAccount = defaults.googleServiceAccount; + this.localSsdCount = defaults.localSsdCount; this.usePreemptibleExecutors = defaults.usePreemptibleExecutors; this.zoneId = defaults.zoneId; } @@ -76,6 +82,11 @@ public Builder googleServiceAccount(@Nullable String googleServiceAccount) { return this; } @CustomType.Setter + public Builder localSsdCount(@Nullable Integer localSsdCount) { + this.localSsdCount = localSsdCount; + return this; + } + @CustomType.Setter public Builder usePreemptibleExecutors(@Nullable Boolean usePreemptibleExecutors) { this.usePreemptibleExecutors = usePreemptibleExecutors; return this; @@ -90,6 +101,7 @@ public GetJobJobSettingsSettingsNewClusterGcpAttributes build() { o.availability = availability; o.bootDiskSize = bootDiskSize; o.googleServiceAccount = googleServiceAccount; + o.localSsdCount = localSsdCount; o.usePreemptibleExecutors = usePreemptibleExecutors; o.zoneId = zoneId; return o; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsParameter.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsParameter.java new file mode 100644 index 00000000..b0812527 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsParameter.java @@ -0,0 +1,68 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class GetJobJobSettingsSettingsParameter { + private @Nullable String default_; + /** + * @return the job name of databricks.Job if the resource was matched by id. + * + */ + private @Nullable String name; + + private GetJobJobSettingsSettingsParameter() {} + public Optional default_() { + return Optional.ofNullable(this.default_); + } + /** + * @return the job name of databricks.Job if the resource was matched by id. + * + */ + public Optional name() { + return Optional.ofNullable(this.name); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetJobJobSettingsSettingsParameter defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String default_; + private @Nullable String name; + public Builder() {} + public Builder(GetJobJobSettingsSettingsParameter defaults) { + Objects.requireNonNull(defaults); + this.default_ = defaults.default_; + this.name = defaults.name; + } + + @CustomType.Setter("default") + public Builder default_(@Nullable String default_) { + this.default_ = default_; + return this; + } + @CustomType.Setter + public Builder name(@Nullable String name) { + this.name = name; + return this; + } + public GetJobJobSettingsSettingsParameter build() { + final var o = new GetJobJobSettingsSettingsParameter(); + o.default_ = default_; + o.name = name; + return o; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsRunJobTask.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsRunJobTask.java new file mode 100644 index 00000000..01672a81 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsRunJobTask.java @@ -0,0 +1,61 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.Object; +import java.lang.String; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Nullable; + +@CustomType +public final class GetJobJobSettingsSettingsRunJobTask { + private String jobId; + private @Nullable Map jobParameters; + + private GetJobJobSettingsSettingsRunJobTask() {} + public String jobId() { + return this.jobId; + } + public Map jobParameters() { + return this.jobParameters == null ? Map.of() : this.jobParameters; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetJobJobSettingsSettingsRunJobTask defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String jobId; + private @Nullable Map jobParameters; + public Builder() {} + public Builder(GetJobJobSettingsSettingsRunJobTask defaults) { + Objects.requireNonNull(defaults); + this.jobId = defaults.jobId; + this.jobParameters = defaults.jobParameters; + } + + @CustomType.Setter + public Builder jobId(String jobId) { + this.jobId = Objects.requireNonNull(jobId); + return this; + } + @CustomType.Setter + public Builder jobParameters(@Nullable Map jobParameters) { + this.jobParameters = jobParameters; + return this; + } + public GetJobJobSettingsSettingsRunJobTask build() { + final var o = new GetJobJobSettingsSettingsRunJobTask(); + o.jobId = jobId; + o.jobParameters = jobParameters; + return o; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTask.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTask.java index 3418bfc9..482ef485 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTask.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTask.java @@ -15,6 +15,7 @@ import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsTaskNotificationSettings; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsTaskPipelineTask; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsTaskPythonWheelTask; +import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsTaskRunJobTask; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsTaskSparkJarTask; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsTaskSparkPythonTask; import com.pulumi.databricks.outputs.GetJobJobSettingsSettingsTaskSparkSubmitTask; @@ -48,6 +49,7 @@ public final class GetJobJobSettingsSettingsTask { private @Nullable GetJobJobSettingsSettingsTaskPythonWheelTask pythonWheelTask; private Boolean retryOnTimeout; private @Nullable String runIf; + private @Nullable GetJobJobSettingsSettingsTaskRunJobTask runJobTask; private @Nullable GetJobJobSettingsSettingsTaskSparkJarTask sparkJarTask; private @Nullable GetJobJobSettingsSettingsTaskSparkPythonTask sparkPythonTask; private @Nullable GetJobJobSettingsSettingsTaskSparkSubmitTask sparkSubmitTask; @@ -113,6 +115,9 @@ public Boolean retryOnTimeout() { public Optional runIf() { return Optional.ofNullable(this.runIf); } + public Optional runJobTask() { + return Optional.ofNullable(this.runJobTask); + } public Optional sparkJarTask() { return Optional.ofNullable(this.sparkJarTask); } @@ -160,6 +165,7 @@ public static final class Builder { private @Nullable GetJobJobSettingsSettingsTaskPythonWheelTask pythonWheelTask; private Boolean retryOnTimeout; private @Nullable String runIf; + private @Nullable GetJobJobSettingsSettingsTaskRunJobTask runJobTask; private @Nullable GetJobJobSettingsSettingsTaskSparkJarTask sparkJarTask; private @Nullable GetJobJobSettingsSettingsTaskSparkPythonTask sparkPythonTask; private @Nullable GetJobJobSettingsSettingsTaskSparkSubmitTask sparkSubmitTask; @@ -188,6 +194,7 @@ public Builder(GetJobJobSettingsSettingsTask defaults) { this.pythonWheelTask = defaults.pythonWheelTask; this.retryOnTimeout = defaults.retryOnTimeout; this.runIf = defaults.runIf; + this.runJobTask = defaults.runJobTask; this.sparkJarTask = defaults.sparkJarTask; this.sparkPythonTask = defaults.sparkPythonTask; this.sparkSubmitTask = defaults.sparkSubmitTask; @@ -298,6 +305,11 @@ public Builder runIf(@Nullable String runIf) { return this; } @CustomType.Setter + public Builder runJobTask(@Nullable GetJobJobSettingsSettingsTaskRunJobTask runJobTask) { + this.runJobTask = runJobTask; + return this; + } + @CustomType.Setter public Builder sparkJarTask(@Nullable GetJobJobSettingsSettingsTaskSparkJarTask sparkJarTask) { this.sparkJarTask = sparkJarTask; return this; @@ -348,6 +360,7 @@ public GetJobJobSettingsSettingsTask build() { o.pythonWheelTask = pythonWheelTask; o.retryOnTimeout = retryOnTimeout; o.runIf = runIf; + o.runJobTask = runJobTask; o.sparkJarTask = sparkJarTask; o.sparkPythonTask = sparkPythonTask; o.sparkSubmitTask = sparkSubmitTask; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributes.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributes.java index 4872016a..a0904998 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributes.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskNewClusterGcpAttributes.java @@ -16,6 +16,7 @@ public final class GetJobJobSettingsSettingsTaskNewClusterGcpAttributes { private @Nullable String availability; private @Nullable Integer bootDiskSize; private @Nullable String googleServiceAccount; + private @Nullable Integer localSsdCount; private @Nullable Boolean usePreemptibleExecutors; private @Nullable String zoneId; @@ -29,6 +30,9 @@ public Optional bootDiskSize() { public Optional googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + public Optional localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } public Optional usePreemptibleExecutors() { return Optional.ofNullable(this.usePreemptibleExecutors); } @@ -48,6 +52,7 @@ public static final class Builder { private @Nullable String availability; private @Nullable Integer bootDiskSize; private @Nullable String googleServiceAccount; + private @Nullable Integer localSsdCount; private @Nullable Boolean usePreemptibleExecutors; private @Nullable String zoneId; public Builder() {} @@ -56,6 +61,7 @@ public Builder(GetJobJobSettingsSettingsTaskNewClusterGcpAttributes defaults) { this.availability = defaults.availability; this.bootDiskSize = defaults.bootDiskSize; this.googleServiceAccount = defaults.googleServiceAccount; + this.localSsdCount = defaults.localSsdCount; this.usePreemptibleExecutors = defaults.usePreemptibleExecutors; this.zoneId = defaults.zoneId; } @@ -76,6 +82,11 @@ public Builder googleServiceAccount(@Nullable String googleServiceAccount) { return this; } @CustomType.Setter + public Builder localSsdCount(@Nullable Integer localSsdCount) { + this.localSsdCount = localSsdCount; + return this; + } + @CustomType.Setter public Builder usePreemptibleExecutors(@Nullable Boolean usePreemptibleExecutors) { this.usePreemptibleExecutors = usePreemptibleExecutors; return this; @@ -90,6 +101,7 @@ public GetJobJobSettingsSettingsTaskNewClusterGcpAttributes build() { o.availability = availability; o.bootDiskSize = bootDiskSize; o.googleServiceAccount = googleServiceAccount; + o.localSsdCount = localSsdCount; o.usePreemptibleExecutors = usePreemptibleExecutors; o.zoneId = zoneId; return o; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskRunJobTask.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskRunJobTask.java new file mode 100644 index 00000000..24857b52 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/GetJobJobSettingsSettingsTaskRunJobTask.java @@ -0,0 +1,61 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.Object; +import java.lang.String; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Nullable; + +@CustomType +public final class GetJobJobSettingsSettingsTaskRunJobTask { + private String jobId; + private @Nullable Map jobParameters; + + private GetJobJobSettingsSettingsTaskRunJobTask() {} + public String jobId() { + return this.jobId; + } + public Map jobParameters() { + return this.jobParameters == null ? Map.of() : this.jobParameters; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetJobJobSettingsSettingsTaskRunJobTask defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String jobId; + private @Nullable Map jobParameters; + public Builder() {} + public Builder(GetJobJobSettingsSettingsTaskRunJobTask defaults) { + Objects.requireNonNull(defaults); + this.jobId = defaults.jobId; + this.jobParameters = defaults.jobParameters; + } + + @CustomType.Setter + public Builder jobId(String jobId) { + this.jobId = Objects.requireNonNull(jobId); + return this; + } + @CustomType.Setter + public Builder jobParameters(@Nullable Map jobParameters) { + this.jobParameters = jobParameters; + return this; + } + public GetJobJobSettingsSettingsTaskRunJobTask build() { + final var o = new GetJobJobSettingsSettingsTaskRunJobTask(); + o.jobId = jobId; + o.jobParameters = jobParameters; + return o; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/InstancePoolAzureAttributes.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/InstancePoolAzureAttributes.java index 62225c64..dd5ac1c5 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/InstancePoolAzureAttributes.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/InstancePoolAzureAttributes.java @@ -13,7 +13,7 @@ @CustomType public final class InstancePoolAzureAttributes { /** - * @return Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + * @return Availability type used for all nodes. Valid values are `SPOT_AZURE` and `ON_DEMAND_AZURE`. * */ private @Nullable String availability; @@ -25,7 +25,7 @@ public final class InstancePoolAzureAttributes { private InstancePoolAzureAttributes() {} /** - * @return Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + * @return Availability type used for all nodes. Valid values are `SPOT_AZURE` and `ON_DEMAND_AZURE`. * */ public Optional availability() { diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/InstancePoolGcpAttributes.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/InstancePoolGcpAttributes.java index 5b6f366e..db3ef571 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/InstancePoolGcpAttributes.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/InstancePoolGcpAttributes.java @@ -4,6 +4,7 @@ package com.pulumi.databricks.outputs; import com.pulumi.core.annotations.CustomType; +import java.lang.Integer; import java.lang.String; import java.util.Objects; import java.util.Optional; @@ -11,12 +12,32 @@ @CustomType public final class InstancePoolGcpAttributes { + /** + * @return Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + * + */ private @Nullable String gcpAvailability; + /** + * @return Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. + * + */ + private @Nullable Integer localSsdCount; private InstancePoolGcpAttributes() {} + /** + * @return Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + * + */ public Optional gcpAvailability() { return Optional.ofNullable(this.gcpAvailability); } + /** + * @return Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. + * + */ + public Optional localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } public static Builder builder() { return new Builder(); @@ -28,10 +49,12 @@ public static Builder builder(InstancePoolGcpAttributes defaults) { @CustomType.Builder public static final class Builder { private @Nullable String gcpAvailability; + private @Nullable Integer localSsdCount; public Builder() {} public Builder(InstancePoolGcpAttributes defaults) { Objects.requireNonNull(defaults); this.gcpAvailability = defaults.gcpAvailability; + this.localSsdCount = defaults.localSsdCount; } @CustomType.Setter @@ -39,9 +62,15 @@ public Builder gcpAvailability(@Nullable String gcpAvailability) { this.gcpAvailability = gcpAvailability; return this; } + @CustomType.Setter + public Builder localSsdCount(@Nullable Integer localSsdCount) { + this.localSsdCount = localSsdCount; + return this; + } public InstancePoolGcpAttributes build() { final var o = new InstancePoolGcpAttributes(); o.gcpAvailability = gcpAvailability; + o.localSsdCount = localSsdCount; return o; } } diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobJobClusterNewClusterGcpAttributes.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobJobClusterNewClusterGcpAttributes.java index 13e9db3a..ff94cc1e 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobJobClusterNewClusterGcpAttributes.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobJobClusterNewClusterGcpAttributes.java @@ -16,6 +16,7 @@ public final class JobJobClusterNewClusterGcpAttributes { private @Nullable String availability; private @Nullable Integer bootDiskSize; private @Nullable String googleServiceAccount; + private @Nullable Integer localSsdCount; private @Nullable Boolean usePreemptibleExecutors; private @Nullable String zoneId; @@ -29,6 +30,9 @@ public Optional bootDiskSize() { public Optional googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + public Optional localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } public Optional usePreemptibleExecutors() { return Optional.ofNullable(this.usePreemptibleExecutors); } @@ -48,6 +52,7 @@ public static final class Builder { private @Nullable String availability; private @Nullable Integer bootDiskSize; private @Nullable String googleServiceAccount; + private @Nullable Integer localSsdCount; private @Nullable Boolean usePreemptibleExecutors; private @Nullable String zoneId; public Builder() {} @@ -56,6 +61,7 @@ public Builder(JobJobClusterNewClusterGcpAttributes defaults) { this.availability = defaults.availability; this.bootDiskSize = defaults.bootDiskSize; this.googleServiceAccount = defaults.googleServiceAccount; + this.localSsdCount = defaults.localSsdCount; this.usePreemptibleExecutors = defaults.usePreemptibleExecutors; this.zoneId = defaults.zoneId; } @@ -76,6 +82,11 @@ public Builder googleServiceAccount(@Nullable String googleServiceAccount) { return this; } @CustomType.Setter + public Builder localSsdCount(@Nullable Integer localSsdCount) { + this.localSsdCount = localSsdCount; + return this; + } + @CustomType.Setter public Builder usePreemptibleExecutors(@Nullable Boolean usePreemptibleExecutors) { this.usePreemptibleExecutors = usePreemptibleExecutors; return this; @@ -90,6 +101,7 @@ public JobJobClusterNewClusterGcpAttributes build() { o.availability = availability; o.bootDiskSize = bootDiskSize; o.googleServiceAccount = googleServiceAccount; + o.localSsdCount = localSsdCount; o.usePreemptibleExecutors = usePreemptibleExecutors; o.zoneId = zoneId; return o; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobNewClusterGcpAttributes.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobNewClusterGcpAttributes.java index a406f99d..c52c38ec 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobNewClusterGcpAttributes.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobNewClusterGcpAttributes.java @@ -16,6 +16,7 @@ public final class JobNewClusterGcpAttributes { private @Nullable String availability; private @Nullable Integer bootDiskSize; private @Nullable String googleServiceAccount; + private @Nullable Integer localSsdCount; private @Nullable Boolean usePreemptibleExecutors; private @Nullable String zoneId; @@ -29,6 +30,9 @@ public Optional bootDiskSize() { public Optional googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + public Optional localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } public Optional usePreemptibleExecutors() { return Optional.ofNullable(this.usePreemptibleExecutors); } @@ -48,6 +52,7 @@ public static final class Builder { private @Nullable String availability; private @Nullable Integer bootDiskSize; private @Nullable String googleServiceAccount; + private @Nullable Integer localSsdCount; private @Nullable Boolean usePreemptibleExecutors; private @Nullable String zoneId; public Builder() {} @@ -56,6 +61,7 @@ public Builder(JobNewClusterGcpAttributes defaults) { this.availability = defaults.availability; this.bootDiskSize = defaults.bootDiskSize; this.googleServiceAccount = defaults.googleServiceAccount; + this.localSsdCount = defaults.localSsdCount; this.usePreemptibleExecutors = defaults.usePreemptibleExecutors; this.zoneId = defaults.zoneId; } @@ -76,6 +82,11 @@ public Builder googleServiceAccount(@Nullable String googleServiceAccount) { return this; } @CustomType.Setter + public Builder localSsdCount(@Nullable Integer localSsdCount) { + this.localSsdCount = localSsdCount; + return this; + } + @CustomType.Setter public Builder usePreemptibleExecutors(@Nullable Boolean usePreemptibleExecutors) { this.usePreemptibleExecutors = usePreemptibleExecutors; return this; @@ -90,6 +101,7 @@ public JobNewClusterGcpAttributes build() { o.availability = availability; o.bootDiskSize = bootDiskSize; o.googleServiceAccount = googleServiceAccount; + o.localSsdCount = localSsdCount; o.usePreemptibleExecutors = usePreemptibleExecutors; o.zoneId = zoneId; return o; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobNotificationSettings.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobNotificationSettings.java index e3b8fe8e..fdcd8914 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobNotificationSettings.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobNotificationSettings.java @@ -17,7 +17,7 @@ public final class JobNotificationSettings { */ private @Nullable Boolean noAlertForCanceledRuns; /** - * @return (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + * @return (Bool) don't send alert for skipped runs. * */ private @Nullable Boolean noAlertForSkippedRuns; @@ -31,7 +31,7 @@ public Optional noAlertForCanceledRuns() { return Optional.ofNullable(this.noAlertForCanceledRuns); } /** - * @return (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + * @return (Bool) don't send alert for skipped runs. * */ public Optional noAlertForSkippedRuns() { diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobParameter.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobParameter.java new file mode 100644 index 00000000..f2a0f50f --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobParameter.java @@ -0,0 +1,76 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class JobParameter { + /** + * @return Default value of the parameter. + * + */ + private @Nullable String default_; + /** + * @return An optional name for the job. The default value is Untitled. + * + */ + private @Nullable String name; + + private JobParameter() {} + /** + * @return Default value of the parameter. + * + */ + public Optional default_() { + return Optional.ofNullable(this.default_); + } + /** + * @return An optional name for the job. The default value is Untitled. + * + */ + public Optional name() { + return Optional.ofNullable(this.name); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(JobParameter defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String default_; + private @Nullable String name; + public Builder() {} + public Builder(JobParameter defaults) { + Objects.requireNonNull(defaults); + this.default_ = defaults.default_; + this.name = defaults.name; + } + + @CustomType.Setter("default") + public Builder default_(@Nullable String default_) { + this.default_ = default_; + return this; + } + @CustomType.Setter + public Builder name(@Nullable String name) { + this.name = name; + return this; + } + public JobParameter build() { + final var o = new JobParameter(); + o.default_ = default_; + o.name = name; + return o; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobRunJobTask.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobRunJobTask.java new file mode 100644 index 00000000..4e700739 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobRunJobTask.java @@ -0,0 +1,77 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.Object; +import java.lang.String; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Nullable; + +@CustomType +public final class JobRunJobTask { + /** + * @return (String) ID of the job + * + */ + private String jobId; + /** + * @return (Map) Job parameters for the task + * + */ + private @Nullable Map jobParameters; + + private JobRunJobTask() {} + /** + * @return (String) ID of the job + * + */ + public String jobId() { + return this.jobId; + } + /** + * @return (Map) Job parameters for the task + * + */ + public Map jobParameters() { + return this.jobParameters == null ? Map.of() : this.jobParameters; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(JobRunJobTask defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String jobId; + private @Nullable Map jobParameters; + public Builder() {} + public Builder(JobRunJobTask defaults) { + Objects.requireNonNull(defaults); + this.jobId = defaults.jobId; + this.jobParameters = defaults.jobParameters; + } + + @CustomType.Setter + public Builder jobId(String jobId) { + this.jobId = Objects.requireNonNull(jobId); + return this; + } + @CustomType.Setter + public Builder jobParameters(@Nullable Map jobParameters) { + this.jobParameters = jobParameters; + return this; + } + public JobRunJobTask build() { + final var o = new JobRunJobTask(); + o.jobId = jobId; + o.jobParameters = jobParameters; + return o; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTask.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTask.java index eb655cdc..f5aee5b9 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTask.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTask.java @@ -15,6 +15,7 @@ import com.pulumi.databricks.outputs.JobTaskNotificationSettings; import com.pulumi.databricks.outputs.JobTaskPipelineTask; import com.pulumi.databricks.outputs.JobTaskPythonWheelTask; +import com.pulumi.databricks.outputs.JobTaskRunJobTask; import com.pulumi.databricks.outputs.JobTaskSparkJarTask; import com.pulumi.databricks.outputs.JobTaskSparkPythonTask; import com.pulumi.databricks.outputs.JobTaskSparkSubmitTask; @@ -87,7 +88,12 @@ public final class JobTask { * */ private @Nullable Boolean retryOnTimeout; + /** + * @return An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. + * + */ private @Nullable String runIf; + private @Nullable JobTaskRunJobTask runJobTask; private @Nullable JobTaskSparkJarTask sparkJarTask; private @Nullable JobTaskSparkPythonTask sparkPythonTask; private @Nullable JobTaskSparkSubmitTask sparkSubmitTask; @@ -199,9 +205,16 @@ public Optional pythonWheelTask() { public Optional retryOnTimeout() { return Optional.ofNullable(this.retryOnTimeout); } + /** + * @return An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. + * + */ public Optional runIf() { return Optional.ofNullable(this.runIf); } + public Optional runJobTask() { + return Optional.ofNullable(this.runJobTask); + } public Optional sparkJarTask() { return Optional.ofNullable(this.sparkJarTask); } @@ -258,6 +271,7 @@ public static final class Builder { private @Nullable JobTaskPythonWheelTask pythonWheelTask; private @Nullable Boolean retryOnTimeout; private @Nullable String runIf; + private @Nullable JobTaskRunJobTask runJobTask; private @Nullable JobTaskSparkJarTask sparkJarTask; private @Nullable JobTaskSparkPythonTask sparkPythonTask; private @Nullable JobTaskSparkSubmitTask sparkSubmitTask; @@ -286,6 +300,7 @@ public Builder(JobTask defaults) { this.pythonWheelTask = defaults.pythonWheelTask; this.retryOnTimeout = defaults.retryOnTimeout; this.runIf = defaults.runIf; + this.runJobTask = defaults.runJobTask; this.sparkJarTask = defaults.sparkJarTask; this.sparkPythonTask = defaults.sparkPythonTask; this.sparkSubmitTask = defaults.sparkSubmitTask; @@ -396,6 +411,11 @@ public Builder runIf(@Nullable String runIf) { return this; } @CustomType.Setter + public Builder runJobTask(@Nullable JobTaskRunJobTask runJobTask) { + this.runJobTask = runJobTask; + return this; + } + @CustomType.Setter public Builder sparkJarTask(@Nullable JobTaskSparkJarTask sparkJarTask) { this.sparkJarTask = sparkJarTask; return this; @@ -446,6 +466,7 @@ public JobTask build() { o.pythonWheelTask = pythonWheelTask; o.retryOnTimeout = retryOnTimeout; o.runIf = runIf; + o.runJobTask = runJobTask; o.sparkJarTask = sparkJarTask; o.sparkPythonTask = sparkPythonTask; o.sparkSubmitTask = sparkSubmitTask; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskNewClusterGcpAttributes.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskNewClusterGcpAttributes.java index 30f8ca36..879f9514 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskNewClusterGcpAttributes.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskNewClusterGcpAttributes.java @@ -16,6 +16,7 @@ public final class JobTaskNewClusterGcpAttributes { private @Nullable String availability; private @Nullable Integer bootDiskSize; private @Nullable String googleServiceAccount; + private @Nullable Integer localSsdCount; private @Nullable Boolean usePreemptibleExecutors; private @Nullable String zoneId; @@ -29,6 +30,9 @@ public Optional bootDiskSize() { public Optional googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + public Optional localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } public Optional usePreemptibleExecutors() { return Optional.ofNullable(this.usePreemptibleExecutors); } @@ -48,6 +52,7 @@ public static final class Builder { private @Nullable String availability; private @Nullable Integer bootDiskSize; private @Nullable String googleServiceAccount; + private @Nullable Integer localSsdCount; private @Nullable Boolean usePreemptibleExecutors; private @Nullable String zoneId; public Builder() {} @@ -56,6 +61,7 @@ public Builder(JobTaskNewClusterGcpAttributes defaults) { this.availability = defaults.availability; this.bootDiskSize = defaults.bootDiskSize; this.googleServiceAccount = defaults.googleServiceAccount; + this.localSsdCount = defaults.localSsdCount; this.usePreemptibleExecutors = defaults.usePreemptibleExecutors; this.zoneId = defaults.zoneId; } @@ -76,6 +82,11 @@ public Builder googleServiceAccount(@Nullable String googleServiceAccount) { return this; } @CustomType.Setter + public Builder localSsdCount(@Nullable Integer localSsdCount) { + this.localSsdCount = localSsdCount; + return this; + } + @CustomType.Setter public Builder usePreemptibleExecutors(@Nullable Boolean usePreemptibleExecutors) { this.usePreemptibleExecutors = usePreemptibleExecutors; return this; @@ -90,6 +101,7 @@ public JobTaskNewClusterGcpAttributes build() { o.availability = availability; o.bootDiskSize = bootDiskSize; o.googleServiceAccount = googleServiceAccount; + o.localSsdCount = localSsdCount; o.usePreemptibleExecutors = usePreemptibleExecutors; o.zoneId = zoneId; return o; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskNotificationSettings.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskNotificationSettings.java index cbadb164..486f91af 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskNotificationSettings.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskNotificationSettings.java @@ -22,7 +22,7 @@ public final class JobTaskNotificationSettings { */ private @Nullable Boolean noAlertForCanceledRuns; /** - * @return (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + * @return (Bool) don't send alert for skipped runs. * */ private @Nullable Boolean noAlertForSkippedRuns; @@ -43,7 +43,7 @@ public Optional noAlertForCanceledRuns() { return Optional.ofNullable(this.noAlertForCanceledRuns); } /** - * @return (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + * @return (Bool) don't send alert for skipped runs. * */ public Optional noAlertForSkippedRuns() { diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskRunJobTask.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskRunJobTask.java new file mode 100644 index 00000000..91f9e2b3 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/JobTaskRunJobTask.java @@ -0,0 +1,77 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.databricks.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.Object; +import java.lang.String; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Nullable; + +@CustomType +public final class JobTaskRunJobTask { + /** + * @return (String) ID of the job + * + */ + private String jobId; + /** + * @return (Map) Job parameters for the task + * + */ + private @Nullable Map jobParameters; + + private JobTaskRunJobTask() {} + /** + * @return (String) ID of the job + * + */ + public String jobId() { + return this.jobId; + } + /** + * @return (Map) Job parameters for the task + * + */ + public Map jobParameters() { + return this.jobParameters == null ? Map.of() : this.jobParameters; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(JobTaskRunJobTask defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String jobId; + private @Nullable Map jobParameters; + public Builder() {} + public Builder(JobTaskRunJobTask defaults) { + Objects.requireNonNull(defaults); + this.jobId = defaults.jobId; + this.jobParameters = defaults.jobParameters; + } + + @CustomType.Setter + public Builder jobId(String jobId) { + this.jobId = Objects.requireNonNull(jobId); + return this; + } + @CustomType.Setter + public Builder jobParameters(@Nullable Map jobParameters) { + this.jobParameters = jobParameters; + return this; + } + public JobTaskRunJobTask build() { + final var o = new JobTaskRunJobTask(); + o.jobId = jobId; + o.jobParameters = jobParameters; + return o; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/ModelServingConfigServedModel.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/ModelServingConfigServedModel.java index 267579c8..b285a5c5 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/ModelServingConfigServedModel.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/ModelServingConfigServedModel.java @@ -15,6 +15,7 @@ @CustomType public final class ModelServingConfigServedModel { private @Nullable Map environmentVars; + private @Nullable String instanceProfileArn; /** * @return The name of the model in Databricks Model Registry to be served. * @@ -45,6 +46,9 @@ private ModelServingConfigServedModel() {} public Map environmentVars() { return this.environmentVars == null ? Map.of() : this.environmentVars; } + public Optional instanceProfileArn() { + return Optional.ofNullable(this.instanceProfileArn); + } /** * @return The name of the model in Databricks Model Registry to be served. * @@ -91,6 +95,7 @@ public static Builder builder(ModelServingConfigServedModel defaults) { @CustomType.Builder public static final class Builder { private @Nullable Map environmentVars; + private @Nullable String instanceProfileArn; private String modelName; private String modelVersion; private @Nullable String name; @@ -100,6 +105,7 @@ public Builder() {} public Builder(ModelServingConfigServedModel defaults) { Objects.requireNonNull(defaults); this.environmentVars = defaults.environmentVars; + this.instanceProfileArn = defaults.instanceProfileArn; this.modelName = defaults.modelName; this.modelVersion = defaults.modelVersion; this.name = defaults.name; @@ -113,6 +119,11 @@ public Builder environmentVars(@Nullable Map environmentVars) { return this; } @CustomType.Setter + public Builder instanceProfileArn(@Nullable String instanceProfileArn) { + this.instanceProfileArn = instanceProfileArn; + return this; + } + @CustomType.Setter public Builder modelName(String modelName) { this.modelName = Objects.requireNonNull(modelName); return this; @@ -140,6 +151,7 @@ public Builder workloadSize(String workloadSize) { public ModelServingConfigServedModel build() { final var o = new ModelServingConfigServedModel(); o.environmentVars = environmentVars; + o.instanceProfileArn = instanceProfileArn; o.modelName = modelName; o.modelVersion = modelVersion; o.name = name; diff --git a/sdk/java/src/main/java/com/pulumi/databricks/outputs/PipelineClusterGcpAttributes.java b/sdk/java/src/main/java/com/pulumi/databricks/outputs/PipelineClusterGcpAttributes.java index a80f152a..3061159a 100644 --- a/sdk/java/src/main/java/com/pulumi/databricks/outputs/PipelineClusterGcpAttributes.java +++ b/sdk/java/src/main/java/com/pulumi/databricks/outputs/PipelineClusterGcpAttributes.java @@ -4,6 +4,7 @@ package com.pulumi.databricks.outputs; import com.pulumi.core.annotations.CustomType; +import java.lang.Integer; import java.lang.String; import java.util.Objects; import java.util.Optional; @@ -13,6 +14,7 @@ public final class PipelineClusterGcpAttributes { private @Nullable String availability; private @Nullable String googleServiceAccount; + private @Nullable Integer localSsdCount; private @Nullable String zoneId; private PipelineClusterGcpAttributes() {} @@ -22,6 +24,9 @@ public Optional availability() { public Optional googleServiceAccount() { return Optional.ofNullable(this.googleServiceAccount); } + public Optional localSsdCount() { + return Optional.ofNullable(this.localSsdCount); + } public Optional zoneId() { return Optional.ofNullable(this.zoneId); } @@ -37,12 +42,14 @@ public static Builder builder(PipelineClusterGcpAttributes defaults) { public static final class Builder { private @Nullable String availability; private @Nullable String googleServiceAccount; + private @Nullable Integer localSsdCount; private @Nullable String zoneId; public Builder() {} public Builder(PipelineClusterGcpAttributes defaults) { Objects.requireNonNull(defaults); this.availability = defaults.availability; this.googleServiceAccount = defaults.googleServiceAccount; + this.localSsdCount = defaults.localSsdCount; this.zoneId = defaults.zoneId; } @@ -57,6 +64,11 @@ public Builder googleServiceAccount(@Nullable String googleServiceAccount) { return this; } @CustomType.Setter + public Builder localSsdCount(@Nullable Integer localSsdCount) { + this.localSsdCount = localSsdCount; + return this; + } + @CustomType.Setter public Builder zoneId(@Nullable String zoneId) { this.zoneId = zoneId; return this; @@ -65,6 +77,7 @@ public PipelineClusterGcpAttributes build() { final var o = new PipelineClusterGcpAttributes(); o.availability = availability; o.googleServiceAccount = googleServiceAccount; + o.localSsdCount = localSsdCount; o.zoneId = zoneId; return o; } diff --git a/sdk/nodejs/job.ts b/sdk/nodejs/job.ts index 990ff314..abf9870a 100644 --- a/sdk/nodejs/job.ts +++ b/sdk/nodejs/job.ts @@ -105,6 +105,7 @@ export class Job extends pulumi.CustomResource { * An optional block controlling the notification settings on the job level (described below). */ public readonly notificationSettings!: pulumi.Output; + public readonly parameters!: pulumi.Output; public readonly pipelineTask!: pulumi.Output; public readonly pythonWheelTask!: pulumi.Output; public readonly queue!: pulumi.Output; @@ -113,6 +114,7 @@ export class Job extends pulumi.CustomResource { */ public readonly retryOnTimeout!: pulumi.Output; public readonly runAs!: pulumi.Output; + public readonly runJobTask!: pulumi.Output; /** * (List) An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. This field is a block and is documented below. */ @@ -168,11 +170,13 @@ export class Job extends pulumi.CustomResource { resourceInputs["newCluster"] = state ? state.newCluster : undefined; resourceInputs["notebookTask"] = state ? state.notebookTask : undefined; resourceInputs["notificationSettings"] = state ? state.notificationSettings : undefined; + resourceInputs["parameters"] = state ? state.parameters : undefined; resourceInputs["pipelineTask"] = state ? state.pipelineTask : undefined; resourceInputs["pythonWheelTask"] = state ? state.pythonWheelTask : undefined; resourceInputs["queue"] = state ? state.queue : undefined; resourceInputs["retryOnTimeout"] = state ? state.retryOnTimeout : undefined; resourceInputs["runAs"] = state ? state.runAs : undefined; + resourceInputs["runJobTask"] = state ? state.runJobTask : undefined; resourceInputs["schedule"] = state ? state.schedule : undefined; resourceInputs["sparkJarTask"] = state ? state.sparkJarTask : undefined; resourceInputs["sparkPythonTask"] = state ? state.sparkPythonTask : undefined; @@ -204,11 +208,13 @@ export class Job extends pulumi.CustomResource { resourceInputs["newCluster"] = args ? args.newCluster : undefined; resourceInputs["notebookTask"] = args ? args.notebookTask : undefined; resourceInputs["notificationSettings"] = args ? args.notificationSettings : undefined; + resourceInputs["parameters"] = args ? args.parameters : undefined; resourceInputs["pipelineTask"] = args ? args.pipelineTask : undefined; resourceInputs["pythonWheelTask"] = args ? args.pythonWheelTask : undefined; resourceInputs["queue"] = args ? args.queue : undefined; resourceInputs["retryOnTimeout"] = args ? args.retryOnTimeout : undefined; resourceInputs["runAs"] = args ? args.runAs : undefined; + resourceInputs["runJobTask"] = args ? args.runJobTask : undefined; resourceInputs["schedule"] = args ? args.schedule : undefined; resourceInputs["sparkJarTask"] = args ? args.sparkJarTask : undefined; resourceInputs["sparkPythonTask"] = args ? args.sparkPythonTask : undefined; @@ -291,6 +297,7 @@ export interface JobState { * An optional block controlling the notification settings on the job level (described below). */ notificationSettings?: pulumi.Input; + parameters?: pulumi.Input[]>; pipelineTask?: pulumi.Input; pythonWheelTask?: pulumi.Input; queue?: pulumi.Input; @@ -299,6 +306,7 @@ export interface JobState { */ retryOnTimeout?: pulumi.Input; runAs?: pulumi.Input; + runJobTask?: pulumi.Input; /** * (List) An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. This field is a block and is documented below. */ @@ -389,6 +397,7 @@ export interface JobArgs { * An optional block controlling the notification settings on the job level (described below). */ notificationSettings?: pulumi.Input; + parameters?: pulumi.Input[]>; pipelineTask?: pulumi.Input; pythonWheelTask?: pulumi.Input; queue?: pulumi.Input; @@ -397,6 +406,7 @@ export interface JobArgs { */ retryOnTimeout?: pulumi.Input; runAs?: pulumi.Input; + runJobTask?: pulumi.Input; /** * (List) An optional periodic schedule for this job. The default behavior is that the job runs when triggered by clicking Run Now in the Jobs UI or sending an API request to runNow. This field is a block and is documented below. */ diff --git a/sdk/nodejs/types/input.ts b/sdk/nodejs/types/input.ts index cf97c40e..3e78d9d6 100644 --- a/sdk/nodejs/types/input.ts +++ b/sdk/nodejs/types/input.ts @@ -87,6 +87,10 @@ export interface ClusterGcpAttributes { availability?: pulumi.Input; bootDiskSize?: pulumi.Input; googleServiceAccount?: pulumi.Input; + localSsdCount?: pulumi.Input; + /** + * @deprecated Please use 'availability' instead. + */ usePreemptibleExecutors?: pulumi.Input; zoneId?: pulumi.Input; } @@ -536,6 +540,7 @@ export interface GetClusterClusterInfoGcpAttributes { availability?: string; bootDiskSize?: number; googleServiceAccount?: string; + localSsdCount?: number; usePreemptibleExecutors?: boolean; zoneId?: string; } @@ -544,6 +549,7 @@ export interface GetClusterClusterInfoGcpAttributesArgs { availability?: pulumi.Input; bootDiskSize?: pulumi.Input; googleServiceAccount?: pulumi.Input; + localSsdCount?: pulumi.Input; usePreemptibleExecutors?: pulumi.Input; zoneId?: pulumi.Input; } @@ -726,10 +732,12 @@ export interface GetInstancePoolPoolInfoDiskSpecDiskTypeArgs { export interface GetInstancePoolPoolInfoGcpAttributes { gcpAvailability?: string; + localSsdCount?: number; } export interface GetInstancePoolPoolInfoGcpAttributesArgs { gcpAvailability?: pulumi.Input; + localSsdCount?: pulumi.Input; } export interface GetInstancePoolPoolInfoInstancePoolFleetAttribute { @@ -845,11 +853,13 @@ export interface GetJobJobSettingsSettings { newCluster?: inputs.GetJobJobSettingsSettingsNewCluster; notebookTask?: inputs.GetJobJobSettingsSettingsNotebookTask; notificationSettings?: inputs.GetJobJobSettingsSettingsNotificationSettings; + parameters?: inputs.GetJobJobSettingsSettingsParameter[]; pipelineTask?: inputs.GetJobJobSettingsSettingsPipelineTask; pythonWheelTask?: inputs.GetJobJobSettingsSettingsPythonWheelTask; queue?: inputs.GetJobJobSettingsSettingsQueue; retryOnTimeout?: boolean; runAs?: inputs.GetJobJobSettingsSettingsRunAs; + runJobTask?: inputs.GetJobJobSettingsSettingsRunJobTask; schedule?: inputs.GetJobJobSettingsSettingsSchedule; sparkJarTask?: inputs.GetJobJobSettingsSettingsSparkJarTask; sparkPythonTask?: inputs.GetJobJobSettingsSettingsSparkPythonTask; @@ -882,11 +892,13 @@ export interface GetJobJobSettingsSettingsArgs { newCluster?: pulumi.Input; notebookTask?: pulumi.Input; notificationSettings?: pulumi.Input; + parameters?: pulumi.Input[]>; pipelineTask?: pulumi.Input; pythonWheelTask?: pulumi.Input; queue?: pulumi.Input; retryOnTimeout?: pulumi.Input; runAs?: pulumi.Input; + runJobTask?: pulumi.Input; schedule?: pulumi.Input; sparkJarTask?: pulumi.Input; sparkPythonTask?: pulumi.Input; @@ -1214,6 +1226,7 @@ export interface GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes { availability?: string; bootDiskSize?: number; googleServiceAccount?: string; + localSsdCount?: number; usePreemptibleExecutors?: boolean; zoneId?: string; } @@ -1222,6 +1235,7 @@ export interface GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributesArgs availability?: pulumi.Input; bootDiskSize?: pulumi.Input; googleServiceAccount?: pulumi.Input; + localSsdCount?: pulumi.Input; usePreemptibleExecutors?: pulumi.Input; zoneId?: pulumi.Input; } @@ -1566,6 +1580,7 @@ export interface GetJobJobSettingsSettingsNewClusterGcpAttributes { availability?: string; bootDiskSize?: number; googleServiceAccount?: string; + localSsdCount?: number; usePreemptibleExecutors?: boolean; zoneId?: string; } @@ -1574,6 +1589,7 @@ export interface GetJobJobSettingsSettingsNewClusterGcpAttributesArgs { availability?: pulumi.Input; bootDiskSize?: pulumi.Input; googleServiceAccount?: pulumi.Input; + localSsdCount?: pulumi.Input; usePreemptibleExecutors?: pulumi.Input; zoneId?: pulumi.Input; } @@ -1696,6 +1712,22 @@ export interface GetJobJobSettingsSettingsNotificationSettingsArgs { noAlertForSkippedRuns?: pulumi.Input; } +export interface GetJobJobSettingsSettingsParameter { + default?: string; + /** + * the job name of databricks.Job if the resource was matched by id. + */ + name?: string; +} + +export interface GetJobJobSettingsSettingsParameterArgs { + default?: pulumi.Input; + /** + * the job name of databricks.Job if the resource was matched by id. + */ + name?: pulumi.Input; +} + export interface GetJobJobSettingsSettingsPipelineTask { fullRefresh?: boolean; pipelineId: string; @@ -1736,6 +1768,16 @@ export interface GetJobJobSettingsSettingsRunAsArgs { userName?: pulumi.Input; } +export interface GetJobJobSettingsSettingsRunJobTask { + jobId: string; + jobParameters?: {[key: string]: any}; +} + +export interface GetJobJobSettingsSettingsRunJobTaskArgs { + jobId: pulumi.Input; + jobParameters?: pulumi.Input<{[key: string]: any}>; +} + export interface GetJobJobSettingsSettingsSchedule { pauseStatus?: string; quartzCronExpression: string; @@ -1800,6 +1842,7 @@ export interface GetJobJobSettingsSettingsTask { pythonWheelTask?: inputs.GetJobJobSettingsSettingsTaskPythonWheelTask; retryOnTimeout?: boolean; runIf?: string; + runJobTask?: inputs.GetJobJobSettingsSettingsTaskRunJobTask; sparkJarTask?: inputs.GetJobJobSettingsSettingsTaskSparkJarTask; sparkPythonTask?: inputs.GetJobJobSettingsSettingsTaskSparkPythonTask; sparkSubmitTask?: inputs.GetJobJobSettingsSettingsTaskSparkSubmitTask; @@ -1828,6 +1871,7 @@ export interface GetJobJobSettingsSettingsTaskArgs { pythonWheelTask?: pulumi.Input; retryOnTimeout?: pulumi.Input; runIf?: pulumi.Input; + runJobTask?: pulumi.Input; sparkJarTask?: pulumi.Input; sparkPythonTask?: pulumi.Input; sparkSubmitTask?: pulumi.Input; @@ -2158,6 +2202,7 @@ export interface GetJobJobSettingsSettingsTaskNewClusterGcpAttributes { availability?: string; bootDiskSize?: number; googleServiceAccount?: string; + localSsdCount?: number; usePreemptibleExecutors?: boolean; zoneId?: string; } @@ -2166,6 +2211,7 @@ export interface GetJobJobSettingsSettingsTaskNewClusterGcpAttributesArgs { availability?: pulumi.Input; bootDiskSize?: pulumi.Input; googleServiceAccount?: pulumi.Input; + localSsdCount?: pulumi.Input; usePreemptibleExecutors?: pulumi.Input; zoneId?: pulumi.Input; } @@ -2314,6 +2360,16 @@ export interface GetJobJobSettingsSettingsTaskPythonWheelTaskArgs { parameters?: pulumi.Input[]>; } +export interface GetJobJobSettingsSettingsTaskRunJobTask { + jobId: string; + jobParameters?: {[key: string]: any}; +} + +export interface GetJobJobSettingsSettingsTaskRunJobTaskArgs { + jobId: pulumi.Input; + jobParameters?: pulumi.Input<{[key: string]: any}>; +} + export interface GetJobJobSettingsSettingsTaskSparkJarTask { jarUri?: string; mainClassName?: string; @@ -2746,7 +2802,7 @@ export interface InstancePoolAwsAttributes { export interface InstancePoolAzureAttributes { /** - * Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + * Availability type used for all nodes. Valid values are `SPOT_AZURE` and `ON_DEMAND_AZURE`. */ availability?: pulumi.Input; /** @@ -2773,7 +2829,14 @@ export interface InstancePoolDiskSpecDiskType { } export interface InstancePoolGcpAttributes { + /** + * Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + */ gcpAvailability?: pulumi.Input; + /** + * Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. + */ + localSsdCount?: pulumi.Input; } export interface InstancePoolInstancePoolFleetAttributes { @@ -3044,6 +3107,7 @@ export interface JobJobClusterNewClusterGcpAttributes { availability?: pulumi.Input; bootDiskSize?: pulumi.Input; googleServiceAccount?: pulumi.Input; + localSsdCount?: pulumi.Input; usePreemptibleExecutors?: pulumi.Input; zoneId?: pulumi.Input; } @@ -3269,6 +3333,7 @@ export interface JobNewClusterGcpAttributes { availability?: pulumi.Input; bootDiskSize?: pulumi.Input; googleServiceAccount?: pulumi.Input; + localSsdCount?: pulumi.Input; usePreemptibleExecutors?: pulumi.Input; zoneId?: pulumi.Input; } @@ -3388,11 +3453,22 @@ export interface JobNotificationSettings { */ noAlertForCanceledRuns?: pulumi.Input; /** - * (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notificationSettings` configuration block). + * (Bool) don't send alert for skipped runs. */ noAlertForSkippedRuns?: pulumi.Input; } +export interface JobParameter { + /** + * Default value of the parameter. + */ + default?: pulumi.Input; + /** + * An optional name for the job. The default value is Untitled. + */ + name?: pulumi.Input; +} + export interface JobPipelineTask { /** * (Bool) Specifies if there should be full refresh of the pipeline. @@ -3450,6 +3526,17 @@ export interface JobRunAs { userName?: pulumi.Input; } +export interface JobRunJobTask { + /** + * (String) ID of the job + */ + jobId: pulumi.Input; + /** + * (Map) Job parameters for the task + */ + jobParameters?: pulumi.Input<{[key: string]: any}>; +} + export interface JobSchedule { /** * Indicate whether this schedule is paused or not. Either `PAUSED` or `UNPAUSED`. When the `pauseStatus` field is omitted and a schedule is provided, the server will default to using `UNPAUSED` as a value for `pauseStatus`. @@ -3548,7 +3635,11 @@ export interface JobTask { * (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout. */ retryOnTimeout?: pulumi.Input; + /** + * An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. + */ runIf?: pulumi.Input; + runJobTask?: pulumi.Input; sparkJarTask?: pulumi.Input; sparkPythonTask?: pulumi.Input; sparkSubmitTask?: pulumi.Input; @@ -3786,6 +3877,7 @@ export interface JobTaskNewClusterGcpAttributes { availability?: pulumi.Input; bootDiskSize?: pulumi.Input; googleServiceAccount?: pulumi.Input; + localSsdCount?: pulumi.Input; usePreemptibleExecutors?: pulumi.Input; zoneId?: pulumi.Input; } @@ -3909,7 +4001,7 @@ export interface JobTaskNotificationSettings { */ noAlertForCanceledRuns?: pulumi.Input; /** - * (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notificationSettings` configuration block). + * (Bool) don't send alert for skipped runs. */ noAlertForSkippedRuns?: pulumi.Input; } @@ -3946,6 +4038,17 @@ export interface JobTaskPythonWheelTask { parameters?: pulumi.Input[]>; } +export interface JobTaskRunJobTask { + /** + * (String) ID of the job + */ + jobId: pulumi.Input; + /** + * (Map) Job parameters for the task + */ + jobParameters?: pulumi.Input<{[key: string]: any}>; +} + export interface JobTaskSparkJarTask { jarUri?: pulumi.Input; /** @@ -4316,6 +4419,7 @@ export interface ModelServingConfig { export interface ModelServingConfigServedModel { environmentVars?: pulumi.Input<{[key: string]: any}>; + instanceProfileArn?: pulumi.Input; /** * The name of the model in Databricks Model Registry to be served. */ @@ -4607,6 +4711,7 @@ export interface PipelineClusterClusterLogConfS3 { export interface PipelineClusterGcpAttributes { availability?: pulumi.Input; googleServiceAccount?: pulumi.Input; + localSsdCount?: pulumi.Input; zoneId?: pulumi.Input; } diff --git a/sdk/nodejs/types/output.ts b/sdk/nodejs/types/output.ts index da531ada..9dd6cb7b 100644 --- a/sdk/nodejs/types/output.ts +++ b/sdk/nodejs/types/output.ts @@ -87,6 +87,10 @@ export interface ClusterGcpAttributes { availability?: string; bootDiskSize?: number; googleServiceAccount?: string; + localSsdCount?: number; + /** + * @deprecated Please use 'availability' instead. + */ usePreemptibleExecutors?: boolean; zoneId?: string; } @@ -352,6 +356,7 @@ export interface GetClusterClusterInfoGcpAttributes { availability?: string; bootDiskSize?: number; googleServiceAccount?: string; + localSsdCount?: number; usePreemptibleExecutors?: boolean; zoneId?: string; } @@ -454,6 +459,7 @@ export interface GetInstancePoolPoolInfoDiskSpecDiskType { export interface GetInstancePoolPoolInfoGcpAttributes { gcpAvailability?: string; + localSsdCount?: number; } export interface GetInstancePoolPoolInfoInstancePoolFleetAttribute { @@ -523,11 +529,13 @@ export interface GetJobJobSettingsSettings { newCluster?: outputs.GetJobJobSettingsSettingsNewCluster; notebookTask?: outputs.GetJobJobSettingsSettingsNotebookTask; notificationSettings?: outputs.GetJobJobSettingsSettingsNotificationSettings; + parameters?: outputs.GetJobJobSettingsSettingsParameter[]; pipelineTask?: outputs.GetJobJobSettingsSettingsPipelineTask; pythonWheelTask?: outputs.GetJobJobSettingsSettingsPythonWheelTask; queue?: outputs.GetJobJobSettingsSettingsQueue; retryOnTimeout?: boolean; runAs?: outputs.GetJobJobSettingsSettingsRunAs; + runJobTask?: outputs.GetJobJobSettingsSettingsRunJobTask; schedule?: outputs.GetJobJobSettingsSettingsSchedule; sparkJarTask?: outputs.GetJobJobSettingsSettingsSparkJarTask; sparkPythonTask?: outputs.GetJobJobSettingsSettingsSparkPythonTask; @@ -699,6 +707,7 @@ export interface GetJobJobSettingsSettingsJobClusterNewClusterGcpAttributes { availability?: string; bootDiskSize?: number; googleServiceAccount?: string; + localSsdCount?: number; usePreemptibleExecutors?: boolean; zoneId?: string; } @@ -875,6 +884,7 @@ export interface GetJobJobSettingsSettingsNewClusterGcpAttributes { availability?: string; bootDiskSize?: number; googleServiceAccount?: string; + localSsdCount?: number; usePreemptibleExecutors?: boolean; zoneId?: string; } @@ -938,6 +948,14 @@ export interface GetJobJobSettingsSettingsNotificationSettings { noAlertForSkippedRuns?: boolean; } +export interface GetJobJobSettingsSettingsParameter { + default?: string; + /** + * the job name of databricks.Job if the resource was matched by id. + */ + name?: string; +} + export interface GetJobJobSettingsSettingsPipelineTask { fullRefresh?: boolean; pipelineId: string; @@ -958,6 +976,11 @@ export interface GetJobJobSettingsSettingsRunAs { userName?: string; } +export interface GetJobJobSettingsSettingsRunJobTask { + jobId: string; + jobParameters?: {[key: string]: any}; +} + export interface GetJobJobSettingsSettingsSchedule { pauseStatus: string; quartzCronExpression: string; @@ -1000,6 +1023,7 @@ export interface GetJobJobSettingsSettingsTask { pythonWheelTask?: outputs.GetJobJobSettingsSettingsTaskPythonWheelTask; retryOnTimeout: boolean; runIf?: string; + runJobTask?: outputs.GetJobJobSettingsSettingsTaskRunJobTask; sparkJarTask?: outputs.GetJobJobSettingsSettingsTaskSparkJarTask; sparkPythonTask?: outputs.GetJobJobSettingsSettingsTaskSparkPythonTask; sparkSubmitTask?: outputs.GetJobJobSettingsSettingsTaskSparkSubmitTask; @@ -1171,6 +1195,7 @@ export interface GetJobJobSettingsSettingsTaskNewClusterGcpAttributes { availability?: string; bootDiskSize?: number; googleServiceAccount?: string; + localSsdCount?: number; usePreemptibleExecutors?: boolean; zoneId?: string; } @@ -1247,6 +1272,11 @@ export interface GetJobJobSettingsSettingsTaskPythonWheelTask { parameters?: string[]; } +export interface GetJobJobSettingsSettingsTaskRunJobTask { + jobId: string; + jobParameters?: {[key: string]: any}; +} + export interface GetJobJobSettingsSettingsTaskSparkJarTask { jarUri?: string; mainClassName?: string; @@ -1482,7 +1512,7 @@ export interface InstancePoolAwsAttributes { export interface InstancePoolAzureAttributes { /** - * Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + * Availability type used for all nodes. Valid values are `SPOT_AZURE` and `ON_DEMAND_AZURE`. */ availability?: string; /** @@ -1509,7 +1539,14 @@ export interface InstancePoolDiskSpecDiskType { } export interface InstancePoolGcpAttributes { + /** + * Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + */ gcpAvailability?: string; + /** + * Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. + */ + localSsdCount?: number; } export interface InstancePoolInstancePoolFleetAttributes { @@ -1780,6 +1817,7 @@ export interface JobJobClusterNewClusterGcpAttributes { availability?: string; bootDiskSize?: number; googleServiceAccount?: string; + localSsdCount?: number; usePreemptibleExecutors?: boolean; zoneId?: string; } @@ -2005,6 +2043,7 @@ export interface JobNewClusterGcpAttributes { availability?: string; bootDiskSize?: number; googleServiceAccount?: string; + localSsdCount?: number; usePreemptibleExecutors?: boolean; zoneId?: string; } @@ -2124,11 +2163,22 @@ export interface JobNotificationSettings { */ noAlertForCanceledRuns?: boolean; /** - * (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notificationSettings` configuration block). + * (Bool) don't send alert for skipped runs. */ noAlertForSkippedRuns?: boolean; } +export interface JobParameter { + /** + * Default value of the parameter. + */ + default?: string; + /** + * An optional name for the job. The default value is Untitled. + */ + name?: string; +} + export interface JobPipelineTask { /** * (Bool) Specifies if there should be full refresh of the pipeline. @@ -2186,6 +2236,17 @@ export interface JobRunAs { userName?: string; } +export interface JobRunJobTask { + /** + * (String) ID of the job + */ + jobId: string; + /** + * (Map) Job parameters for the task + */ + jobParameters?: {[key: string]: any}; +} + export interface JobSchedule { /** * Indicate whether this schedule is paused or not. Either `PAUSED` or `UNPAUSED`. When the `pauseStatus` field is omitted and a schedule is provided, the server will default to using `UNPAUSED` as a value for `pauseStatus`. @@ -2284,7 +2345,11 @@ export interface JobTask { * (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout. */ retryOnTimeout: boolean; + /** + * An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. + */ runIf?: string; + runJobTask?: outputs.JobTaskRunJobTask; sparkJarTask?: outputs.JobTaskSparkJarTask; sparkPythonTask?: outputs.JobTaskSparkPythonTask; sparkSubmitTask?: outputs.JobTaskSparkSubmitTask; @@ -2522,6 +2587,7 @@ export interface JobTaskNewClusterGcpAttributes { availability?: string; bootDiskSize?: number; googleServiceAccount?: string; + localSsdCount?: number; usePreemptibleExecutors?: boolean; zoneId?: string; } @@ -2645,7 +2711,7 @@ export interface JobTaskNotificationSettings { */ noAlertForCanceledRuns?: boolean; /** - * (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notificationSettings` configuration block). + * (Bool) don't send alert for skipped runs. */ noAlertForSkippedRuns?: boolean; } @@ -2682,6 +2748,17 @@ export interface JobTaskPythonWheelTask { parameters?: string[]; } +export interface JobTaskRunJobTask { + /** + * (String) ID of the job + */ + jobId: string; + /** + * (Map) Job parameters for the task + */ + jobParameters?: {[key: string]: any}; +} + export interface JobTaskSparkJarTask { jarUri?: string; /** @@ -3052,6 +3129,7 @@ export interface ModelServingConfig { export interface ModelServingConfigServedModel { environmentVars?: {[key: string]: any}; + instanceProfileArn?: string; /** * The name of the model in Databricks Model Registry to be served. */ @@ -3343,6 +3421,7 @@ export interface PipelineClusterClusterLogConfS3 { export interface PipelineClusterGcpAttributes { availability?: string; googleServiceAccount?: string; + localSsdCount?: number; zoneId?: string; } diff --git a/sdk/python/pulumi_databricks/_inputs.py b/sdk/python/pulumi_databricks/_inputs.py index b70fcfca..fc14b4db 100644 --- a/sdk/python/pulumi_databricks/_inputs.py +++ b/sdk/python/pulumi_databricks/_inputs.py @@ -105,10 +105,12 @@ 'JobNewClusterWorkloadTypeClientsArgs', 'JobNotebookTaskArgs', 'JobNotificationSettingsArgs', + 'JobParameterArgs', 'JobPipelineTaskArgs', 'JobPythonWheelTaskArgs', 'JobQueueArgs', 'JobRunAsArgs', + 'JobRunJobTaskArgs', 'JobScheduleArgs', 'JobSparkJarTaskArgs', 'JobSparkPythonTaskArgs', @@ -149,6 +151,7 @@ 'JobTaskNotificationSettingsArgs', 'JobTaskPipelineTaskArgs', 'JobTaskPythonWheelTaskArgs', + 'JobTaskRunJobTaskArgs', 'JobTaskSparkJarTaskArgs', 'JobTaskSparkPythonTaskArgs', 'JobTaskSparkSubmitTaskArgs', @@ -358,10 +361,12 @@ 'GetJobJobSettingsSettingsNewClusterWorkloadTypeClientsArgs', 'GetJobJobSettingsSettingsNotebookTaskArgs', 'GetJobJobSettingsSettingsNotificationSettingsArgs', + 'GetJobJobSettingsSettingsParameterArgs', 'GetJobJobSettingsSettingsPipelineTaskArgs', 'GetJobJobSettingsSettingsPythonWheelTaskArgs', 'GetJobJobSettingsSettingsQueueArgs', 'GetJobJobSettingsSettingsRunAsArgs', + 'GetJobJobSettingsSettingsRunJobTaskArgs', 'GetJobJobSettingsSettingsScheduleArgs', 'GetJobJobSettingsSettingsSparkJarTaskArgs', 'GetJobJobSettingsSettingsSparkPythonTaskArgs', @@ -402,6 +407,7 @@ 'GetJobJobSettingsSettingsTaskNotificationSettingsArgs', 'GetJobJobSettingsSettingsTaskPipelineTaskArgs', 'GetJobJobSettingsSettingsTaskPythonWheelTaskArgs', + 'GetJobJobSettingsSettingsTaskRunJobTaskArgs', 'GetJobJobSettingsSettingsTaskSparkJarTaskArgs', 'GetJobJobSettingsSettingsTaskSparkPythonTaskArgs', 'GetJobJobSettingsSettingsTaskSparkSubmitTaskArgs', @@ -909,6 +915,7 @@ def __init__(__self__, *, availability: Optional[pulumi.Input[str]] = None, boot_disk_size: Optional[pulumi.Input[int]] = None, google_service_account: Optional[pulumi.Input[str]] = None, + local_ssd_count: Optional[pulumi.Input[int]] = None, use_preemptible_executors: Optional[pulumi.Input[bool]] = None, zone_id: Optional[pulumi.Input[str]] = None): if availability is not None: @@ -917,6 +924,11 @@ def __init__(__self__, *, pulumi.set(__self__, "boot_disk_size", boot_disk_size) if google_service_account is not None: pulumi.set(__self__, "google_service_account", google_service_account) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) + if use_preemptible_executors is not None: + warnings.warn("""Please use 'availability' instead.""", DeprecationWarning) + pulumi.log.warn("""use_preemptible_executors is deprecated: Please use 'availability' instead.""") if use_preemptible_executors is not None: pulumi.set(__self__, "use_preemptible_executors", use_preemptible_executors) if zone_id is not None: @@ -949,9 +961,21 @@ def google_service_account(self) -> Optional[pulumi.Input[str]]: def google_service_account(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "google_service_account", value) + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[pulumi.Input[int]]: + return pulumi.get(self, "local_ssd_count") + + @local_ssd_count.setter + def local_ssd_count(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "local_ssd_count", value) + @property @pulumi.getter(name="usePreemptibleExecutors") def use_preemptible_executors(self) -> Optional[pulumi.Input[bool]]: + warnings.warn("""Please use 'availability' instead.""", DeprecationWarning) + pulumi.log.warn("""use_preemptible_executors is deprecated: Please use 'availability' instead.""") + return pulumi.get(self, "use_preemptible_executors") @use_preemptible_executors.setter @@ -1523,7 +1547,7 @@ def __init__(__self__, *, availability: Optional[pulumi.Input[str]] = None, spot_bid_max_price: Optional[pulumi.Input[float]] = None): """ - :param pulumi.Input[str] availability: Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + :param pulumi.Input[str] availability: Availability type used for all nodes. Valid values are `SPOT_AZURE` and `ON_DEMAND_AZURE`. :param pulumi.Input[float] spot_bid_max_price: The max price for Azure spot instances. Use `-1` to specify the lowest price. """ if availability is not None: @@ -1535,7 +1559,7 @@ def __init__(__self__, *, @pulumi.getter def availability(self) -> Optional[pulumi.Input[str]]: """ - Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + Availability type used for all nodes. Valid values are `SPOT_AZURE` and `ON_DEMAND_AZURE`. """ return pulumi.get(self, "availability") @@ -1639,19 +1663,41 @@ def ebs_volume_type(self, value: Optional[pulumi.Input[str]]): @pulumi.input_type class InstancePoolGcpAttributesArgs: def __init__(__self__, *, - gcp_availability: Optional[pulumi.Input[str]] = None): + gcp_availability: Optional[pulumi.Input[str]] = None, + local_ssd_count: Optional[pulumi.Input[int]] = None): + """ + :param pulumi.Input[str] gcp_availability: Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + :param pulumi.Input[int] local_ssd_count: Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. + """ if gcp_availability is not None: pulumi.set(__self__, "gcp_availability", gcp_availability) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) @property @pulumi.getter(name="gcpAvailability") def gcp_availability(self) -> Optional[pulumi.Input[str]]: + """ + Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + """ return pulumi.get(self, "gcp_availability") @gcp_availability.setter def gcp_availability(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "gcp_availability", value) + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[pulumi.Input[int]]: + """ + Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. + """ + return pulumi.get(self, "local_ssd_count") + + @local_ssd_count.setter + def local_ssd_count(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "local_ssd_count", value) + @pulumi.input_type class InstancePoolInstancePoolFleetAttributesArgs: @@ -3164,6 +3210,7 @@ def __init__(__self__, *, availability: Optional[pulumi.Input[str]] = None, boot_disk_size: Optional[pulumi.Input[int]] = None, google_service_account: Optional[pulumi.Input[str]] = None, + local_ssd_count: Optional[pulumi.Input[int]] = None, use_preemptible_executors: Optional[pulumi.Input[bool]] = None, zone_id: Optional[pulumi.Input[str]] = None): if availability is not None: @@ -3172,6 +3219,8 @@ def __init__(__self__, *, pulumi.set(__self__, "boot_disk_size", boot_disk_size) if google_service_account is not None: pulumi.set(__self__, "google_service_account", google_service_account) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) if use_preemptible_executors is not None: pulumi.set(__self__, "use_preemptible_executors", use_preemptible_executors) if zone_id is not None: @@ -3204,6 +3253,15 @@ def google_service_account(self) -> Optional[pulumi.Input[str]]: def google_service_account(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "google_service_account", value) + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[pulumi.Input[int]]: + return pulumi.get(self, "local_ssd_count") + + @local_ssd_count.setter + def local_ssd_count(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "local_ssd_count", value) + @property @pulumi.getter(name="usePreemptibleExecutors") def use_preemptible_executors(self) -> Optional[pulumi.Input[bool]]: @@ -4584,6 +4642,7 @@ def __init__(__self__, *, availability: Optional[pulumi.Input[str]] = None, boot_disk_size: Optional[pulumi.Input[int]] = None, google_service_account: Optional[pulumi.Input[str]] = None, + local_ssd_count: Optional[pulumi.Input[int]] = None, use_preemptible_executors: Optional[pulumi.Input[bool]] = None, zone_id: Optional[pulumi.Input[str]] = None): if availability is not None: @@ -4592,6 +4651,8 @@ def __init__(__self__, *, pulumi.set(__self__, "boot_disk_size", boot_disk_size) if google_service_account is not None: pulumi.set(__self__, "google_service_account", google_service_account) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) if use_preemptible_executors is not None: pulumi.set(__self__, "use_preemptible_executors", use_preemptible_executors) if zone_id is not None: @@ -4624,6 +4685,15 @@ def google_service_account(self) -> Optional[pulumi.Input[str]]: def google_service_account(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "google_service_account", value) + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[pulumi.Input[int]]: + return pulumi.get(self, "local_ssd_count") + + @local_ssd_count.setter + def local_ssd_count(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "local_ssd_count", value) + @property @pulumi.getter(name="usePreemptibleExecutors") def use_preemptible_executors(self) -> Optional[pulumi.Input[bool]]: @@ -5090,7 +5160,7 @@ def __init__(__self__, *, no_alert_for_skipped_runs: Optional[pulumi.Input[bool]] = None): """ :param pulumi.Input[bool] no_alert_for_canceled_runs: (Bool) don't send alert for cancelled runs. - :param pulumi.Input[bool] no_alert_for_skipped_runs: (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + :param pulumi.Input[bool] no_alert_for_skipped_runs: (Bool) don't send alert for skipped runs. """ if no_alert_for_canceled_runs is not None: pulumi.set(__self__, "no_alert_for_canceled_runs", no_alert_for_canceled_runs) @@ -5113,7 +5183,7 @@ def no_alert_for_canceled_runs(self, value: Optional[pulumi.Input[bool]]): @pulumi.getter(name="noAlertForSkippedRuns") def no_alert_for_skipped_runs(self) -> Optional[pulumi.Input[bool]]: """ - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + (Bool) don't send alert for skipped runs. """ return pulumi.get(self, "no_alert_for_skipped_runs") @@ -5122,6 +5192,45 @@ def no_alert_for_skipped_runs(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "no_alert_for_skipped_runs", value) +@pulumi.input_type +class JobParameterArgs: + def __init__(__self__, *, + default: Optional[pulumi.Input[str]] = None, + name: Optional[pulumi.Input[str]] = None): + """ + :param pulumi.Input[str] default: Default value of the parameter. + :param pulumi.Input[str] name: An optional name for the job. The default value is Untitled. + """ + if default is not None: + pulumi.set(__self__, "default", default) + if name is not None: + pulumi.set(__self__, "name", name) + + @property + @pulumi.getter + def default(self) -> Optional[pulumi.Input[str]]: + """ + Default value of the parameter. + """ + return pulumi.get(self, "default") + + @default.setter + def default(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "default", value) + + @property + @pulumi.getter + def name(self) -> Optional[pulumi.Input[str]]: + """ + An optional name for the job. The default value is Untitled. + """ + return pulumi.get(self, "name") + + @name.setter + def name(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "name", value) + + @pulumi.input_type class JobPipelineTaskArgs: def __init__(__self__, *, @@ -5302,6 +5411,44 @@ def user_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "user_name", value) +@pulumi.input_type +class JobRunJobTaskArgs: + def __init__(__self__, *, + job_id: pulumi.Input[str], + job_parameters: Optional[pulumi.Input[Mapping[str, Any]]] = None): + """ + :param pulumi.Input[str] job_id: (String) ID of the job + :param pulumi.Input[Mapping[str, Any]] job_parameters: (Map) Job parameters for the task + """ + pulumi.set(__self__, "job_id", job_id) + if job_parameters is not None: + pulumi.set(__self__, "job_parameters", job_parameters) + + @property + @pulumi.getter(name="jobId") + def job_id(self) -> pulumi.Input[str]: + """ + (String) ID of the job + """ + return pulumi.get(self, "job_id") + + @job_id.setter + def job_id(self, value: pulumi.Input[str]): + pulumi.set(self, "job_id", value) + + @property + @pulumi.getter(name="jobParameters") + def job_parameters(self) -> Optional[pulumi.Input[Mapping[str, Any]]]: + """ + (Map) Job parameters for the task + """ + return pulumi.get(self, "job_parameters") + + @job_parameters.setter + def job_parameters(self, value: Optional[pulumi.Input[Mapping[str, Any]]]): + pulumi.set(self, "job_parameters", value) + + @pulumi.input_type class JobScheduleArgs: def __init__(__self__, *, @@ -5505,6 +5652,7 @@ def __init__(__self__, *, python_wheel_task: Optional[pulumi.Input['JobTaskPythonWheelTaskArgs']] = None, retry_on_timeout: Optional[pulumi.Input[bool]] = None, run_if: Optional[pulumi.Input[str]] = None, + run_job_task: Optional[pulumi.Input['JobTaskRunJobTaskArgs']] = None, spark_jar_task: Optional[pulumi.Input['JobTaskSparkJarTaskArgs']] = None, spark_python_task: Optional[pulumi.Input['JobTaskSparkPythonTaskArgs']] = None, spark_submit_task: Optional[pulumi.Input['JobTaskSparkSubmitTaskArgs']] = None, @@ -5522,6 +5670,7 @@ def __init__(__self__, *, :param pulumi.Input['JobTaskNewClusterArgs'] new_cluster: Same set of parameters as for Cluster resource. :param pulumi.Input['JobTaskNotificationSettingsArgs'] notification_settings: An optional block controlling the notification settings on the job level (described below). :param pulumi.Input[bool] retry_on_timeout: (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout. + :param pulumi.Input[str] run_if: An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. :param pulumi.Input[str] task_key: string specifying an unique key for a given task. * `*_task` - (Required) one of the specific task blocks described below: :param pulumi.Input[int] timeout_seconds: (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout. @@ -5564,6 +5713,8 @@ def __init__(__self__, *, pulumi.set(__self__, "retry_on_timeout", retry_on_timeout) if run_if is not None: pulumi.set(__self__, "run_if", run_if) + if run_job_task is not None: + pulumi.set(__self__, "run_job_task", run_job_task) if spark_jar_task is not None: pulumi.set(__self__, "spark_jar_task", spark_jar_task) if spark_python_task is not None: @@ -5772,12 +5923,24 @@ def retry_on_timeout(self, value: Optional[pulumi.Input[bool]]): @property @pulumi.getter(name="runIf") def run_if(self) -> Optional[pulumi.Input[str]]: + """ + An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. + """ return pulumi.get(self, "run_if") @run_if.setter def run_if(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "run_if", value) + @property + @pulumi.getter(name="runJobTask") + def run_job_task(self) -> Optional[pulumi.Input['JobTaskRunJobTaskArgs']]: + return pulumi.get(self, "run_job_task") + + @run_job_task.setter + def run_job_task(self, value: Optional[pulumi.Input['JobTaskRunJobTaskArgs']]): + pulumi.set(self, "run_job_task", value) + @property @pulumi.getter(name="sparkJarTask") def spark_jar_task(self) -> Optional[pulumi.Input['JobTaskSparkJarTaskArgs']]: @@ -7182,6 +7345,7 @@ def __init__(__self__, *, availability: Optional[pulumi.Input[str]] = None, boot_disk_size: Optional[pulumi.Input[int]] = None, google_service_account: Optional[pulumi.Input[str]] = None, + local_ssd_count: Optional[pulumi.Input[int]] = None, use_preemptible_executors: Optional[pulumi.Input[bool]] = None, zone_id: Optional[pulumi.Input[str]] = None): if availability is not None: @@ -7190,6 +7354,8 @@ def __init__(__self__, *, pulumi.set(__self__, "boot_disk_size", boot_disk_size) if google_service_account is not None: pulumi.set(__self__, "google_service_account", google_service_account) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) if use_preemptible_executors is not None: pulumi.set(__self__, "use_preemptible_executors", use_preemptible_executors) if zone_id is not None: @@ -7222,6 +7388,15 @@ def google_service_account(self) -> Optional[pulumi.Input[str]]: def google_service_account(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "google_service_account", value) + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[pulumi.Input[int]]: + return pulumi.get(self, "local_ssd_count") + + @local_ssd_count.setter + def local_ssd_count(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "local_ssd_count", value) + @property @pulumi.getter(name="usePreemptibleExecutors") def use_preemptible_executors(self) -> Optional[pulumi.Input[bool]]: @@ -7690,7 +7865,7 @@ def __init__(__self__, *, """ :param pulumi.Input[bool] alert_on_last_attempt: (Bool) do not send notifications to recipients specified in `on_start` for the retried runs and do not send notifications to recipients specified in `on_failure` until the last retry of the run. :param pulumi.Input[bool] no_alert_for_canceled_runs: (Bool) don't send alert for cancelled runs. - :param pulumi.Input[bool] no_alert_for_skipped_runs: (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + :param pulumi.Input[bool] no_alert_for_skipped_runs: (Bool) don't send alert for skipped runs. """ if alert_on_last_attempt is not None: pulumi.set(__self__, "alert_on_last_attempt", alert_on_last_attempt) @@ -7727,7 +7902,7 @@ def no_alert_for_canceled_runs(self, value: Optional[pulumi.Input[bool]]): @pulumi.getter(name="noAlertForSkippedRuns") def no_alert_for_skipped_runs(self) -> Optional[pulumi.Input[bool]]: """ - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + (Bool) don't send alert for skipped runs. """ return pulumi.get(self, "no_alert_for_skipped_runs") @@ -7849,6 +8024,44 @@ def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]) pulumi.set(self, "parameters", value) +@pulumi.input_type +class JobTaskRunJobTaskArgs: + def __init__(__self__, *, + job_id: pulumi.Input[str], + job_parameters: Optional[pulumi.Input[Mapping[str, Any]]] = None): + """ + :param pulumi.Input[str] job_id: (String) ID of the job + :param pulumi.Input[Mapping[str, Any]] job_parameters: (Map) Job parameters for the task + """ + pulumi.set(__self__, "job_id", job_id) + if job_parameters is not None: + pulumi.set(__self__, "job_parameters", job_parameters) + + @property + @pulumi.getter(name="jobId") + def job_id(self) -> pulumi.Input[str]: + """ + (String) ID of the job + """ + return pulumi.get(self, "job_id") + + @job_id.setter + def job_id(self, value: pulumi.Input[str]): + pulumi.set(self, "job_id", value) + + @property + @pulumi.getter(name="jobParameters") + def job_parameters(self) -> Optional[pulumi.Input[Mapping[str, Any]]]: + """ + (Map) Job parameters for the task + """ + return pulumi.get(self, "job_parameters") + + @job_parameters.setter + def job_parameters(self, value: Optional[pulumi.Input[Mapping[str, Any]]]): + pulumi.set(self, "job_parameters", value) + + @pulumi.input_type class JobTaskSparkJarTaskArgs: def __init__(__self__, *, @@ -9140,6 +9353,7 @@ def __init__(__self__, *, model_version: pulumi.Input[str], workload_size: pulumi.Input[str], environment_vars: Optional[pulumi.Input[Mapping[str, Any]]] = None, + instance_profile_arn: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, scale_to_zero_enabled: Optional[pulumi.Input[bool]] = None): """ @@ -9154,6 +9368,8 @@ def __init__(__self__, *, pulumi.set(__self__, "workload_size", workload_size) if environment_vars is not None: pulumi.set(__self__, "environment_vars", environment_vars) + if instance_profile_arn is not None: + pulumi.set(__self__, "instance_profile_arn", instance_profile_arn) if name is not None: pulumi.set(__self__, "name", name) if scale_to_zero_enabled is not None: @@ -9204,6 +9420,15 @@ def environment_vars(self) -> Optional[pulumi.Input[Mapping[str, Any]]]: def environment_vars(self, value: Optional[pulumi.Input[Mapping[str, Any]]]): pulumi.set(self, "environment_vars", value) + @property + @pulumi.getter(name="instanceProfileArn") + def instance_profile_arn(self) -> Optional[pulumi.Input[str]]: + return pulumi.get(self, "instance_profile_arn") + + @instance_profile_arn.setter + def instance_profile_arn(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "instance_profile_arn", value) + @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: @@ -10746,11 +10971,14 @@ class PipelineClusterGcpAttributesArgs: def __init__(__self__, *, availability: Optional[pulumi.Input[str]] = None, google_service_account: Optional[pulumi.Input[str]] = None, + local_ssd_count: Optional[pulumi.Input[int]] = None, zone_id: Optional[pulumi.Input[str]] = None): if availability is not None: pulumi.set(__self__, "availability", availability) if google_service_account is not None: pulumi.set(__self__, "google_service_account", google_service_account) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) if zone_id is not None: pulumi.set(__self__, "zone_id", zone_id) @@ -10772,6 +11000,15 @@ def google_service_account(self) -> Optional[pulumi.Input[str]]: def google_service_account(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "google_service_account", value) + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[pulumi.Input[int]]: + return pulumi.get(self, "local_ssd_count") + + @local_ssd_count.setter + def local_ssd_count(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "local_ssd_count", value) + @property @pulumi.getter(name="zoneId") def zone_id(self) -> Optional[pulumi.Input[str]]: @@ -14369,6 +14606,7 @@ def __init__(__self__, *, availability: Optional[str] = None, boot_disk_size: Optional[int] = None, google_service_account: Optional[str] = None, + local_ssd_count: Optional[int] = None, use_preemptible_executors: Optional[bool] = None, zone_id: Optional[str] = None): if availability is not None: @@ -14377,6 +14615,8 @@ def __init__(__self__, *, pulumi.set(__self__, "boot_disk_size", boot_disk_size) if google_service_account is not None: pulumi.set(__self__, "google_service_account", google_service_account) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) if use_preemptible_executors is not None: pulumi.set(__self__, "use_preemptible_executors", use_preemptible_executors) if zone_id is not None: @@ -14409,6 +14649,15 @@ def google_service_account(self) -> Optional[str]: def google_service_account(self, value: Optional[str]): pulumi.set(self, "google_service_account", value) + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[int]: + return pulumi.get(self, "local_ssd_count") + + @local_ssd_count.setter + def local_ssd_count(self, value: Optional[int]): + pulumi.set(self, "local_ssd_count", value) + @property @pulumi.getter(name="usePreemptibleExecutors") def use_preemptible_executors(self) -> Optional[bool]: @@ -15077,9 +15326,12 @@ def ebs_volume_type(self, value: Optional[str]): @pulumi.input_type class GetInstancePoolPoolInfoGcpAttributesArgs: def __init__(__self__, *, - gcp_availability: Optional[str] = None): + gcp_availability: Optional[str] = None, + local_ssd_count: Optional[int] = None): if gcp_availability is not None: pulumi.set(__self__, "gcp_availability", gcp_availability) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) @property @pulumi.getter(name="gcpAvailability") @@ -15090,6 +15342,15 @@ def gcp_availability(self) -> Optional[str]: def gcp_availability(self, value: Optional[str]): pulumi.set(self, "gcp_availability", value) + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[int]: + return pulumi.get(self, "local_ssd_count") + + @local_ssd_count.setter + def local_ssd_count(self, value: Optional[int]): + pulumi.set(self, "local_ssd_count", value) + @pulumi.input_type class GetInstancePoolPoolInfoInstancePoolFleetAttributeArgs: @@ -15406,11 +15667,13 @@ def __init__(__self__, *, new_cluster: Optional['GetJobJobSettingsSettingsNewClusterArgs'] = None, notebook_task: Optional['GetJobJobSettingsSettingsNotebookTaskArgs'] = None, notification_settings: Optional['GetJobJobSettingsSettingsNotificationSettingsArgs'] = None, + parameters: Optional[Sequence['GetJobJobSettingsSettingsParameterArgs']] = None, pipeline_task: Optional['GetJobJobSettingsSettingsPipelineTaskArgs'] = None, python_wheel_task: Optional['GetJobJobSettingsSettingsPythonWheelTaskArgs'] = None, queue: Optional['GetJobJobSettingsSettingsQueueArgs'] = None, retry_on_timeout: Optional[bool] = None, run_as: Optional['GetJobJobSettingsSettingsRunAsArgs'] = None, + run_job_task: Optional['GetJobJobSettingsSettingsRunJobTaskArgs'] = None, schedule: Optional['GetJobJobSettingsSettingsScheduleArgs'] = None, spark_jar_task: Optional['GetJobJobSettingsSettingsSparkJarTaskArgs'] = None, spark_python_task: Optional['GetJobJobSettingsSettingsSparkPythonTaskArgs'] = None, @@ -15456,6 +15719,8 @@ def __init__(__self__, *, pulumi.set(__self__, "notebook_task", notebook_task) if notification_settings is not None: pulumi.set(__self__, "notification_settings", notification_settings) + if parameters is not None: + pulumi.set(__self__, "parameters", parameters) if pipeline_task is not None: pulumi.set(__self__, "pipeline_task", pipeline_task) if python_wheel_task is not None: @@ -15466,6 +15731,8 @@ def __init__(__self__, *, pulumi.set(__self__, "retry_on_timeout", retry_on_timeout) if run_as is not None: pulumi.set(__self__, "run_as", run_as) + if run_job_task is not None: + pulumi.set(__self__, "run_job_task", run_job_task) if schedule is not None: pulumi.set(__self__, "schedule", schedule) if spark_jar_task is not None: @@ -15641,6 +15908,15 @@ def notification_settings(self) -> Optional['GetJobJobSettingsSettingsNotificati def notification_settings(self, value: Optional['GetJobJobSettingsSettingsNotificationSettingsArgs']): pulumi.set(self, "notification_settings", value) + @property + @pulumi.getter + def parameters(self) -> Optional[Sequence['GetJobJobSettingsSettingsParameterArgs']]: + return pulumi.get(self, "parameters") + + @parameters.setter + def parameters(self, value: Optional[Sequence['GetJobJobSettingsSettingsParameterArgs']]): + pulumi.set(self, "parameters", value) + @property @pulumi.getter(name="pipelineTask") def pipeline_task(self) -> Optional['GetJobJobSettingsSettingsPipelineTaskArgs']: @@ -15686,6 +15962,15 @@ def run_as(self) -> Optional['GetJobJobSettingsSettingsRunAsArgs']: def run_as(self, value: Optional['GetJobJobSettingsSettingsRunAsArgs']): pulumi.set(self, "run_as", value) + @property + @pulumi.getter(name="runJobTask") + def run_job_task(self) -> Optional['GetJobJobSettingsSettingsRunJobTaskArgs']: + return pulumi.get(self, "run_job_task") + + @run_job_task.setter + def run_job_task(self, value: Optional['GetJobJobSettingsSettingsRunJobTaskArgs']): + pulumi.set(self, "run_job_task", value) + @property @pulumi.getter def schedule(self) -> Optional['GetJobJobSettingsSettingsScheduleArgs']: @@ -16974,6 +17259,7 @@ def __init__(__self__, *, availability: Optional[str] = None, boot_disk_size: Optional[int] = None, google_service_account: Optional[str] = None, + local_ssd_count: Optional[int] = None, use_preemptible_executors: Optional[bool] = None, zone_id: Optional[str] = None): if availability is not None: @@ -16982,6 +17268,8 @@ def __init__(__self__, *, pulumi.set(__self__, "boot_disk_size", boot_disk_size) if google_service_account is not None: pulumi.set(__self__, "google_service_account", google_service_account) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) if use_preemptible_executors is not None: pulumi.set(__self__, "use_preemptible_executors", use_preemptible_executors) if zone_id is not None: @@ -17014,6 +17302,15 @@ def google_service_account(self) -> Optional[str]: def google_service_account(self, value: Optional[str]): pulumi.set(self, "google_service_account", value) + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[int]: + return pulumi.get(self, "local_ssd_count") + + @local_ssd_count.setter + def local_ssd_count(self, value: Optional[int]): + pulumi.set(self, "local_ssd_count", value) + @property @pulumi.getter(name="usePreemptibleExecutors") def use_preemptible_executors(self) -> Optional[bool]: @@ -18290,6 +18587,7 @@ def __init__(__self__, *, availability: Optional[str] = None, boot_disk_size: Optional[int] = None, google_service_account: Optional[str] = None, + local_ssd_count: Optional[int] = None, use_preemptible_executors: Optional[bool] = None, zone_id: Optional[str] = None): if availability is not None: @@ -18298,6 +18596,8 @@ def __init__(__self__, *, pulumi.set(__self__, "boot_disk_size", boot_disk_size) if google_service_account is not None: pulumi.set(__self__, "google_service_account", google_service_account) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) if use_preemptible_executors is not None: pulumi.set(__self__, "use_preemptible_executors", use_preemptible_executors) if zone_id is not None: @@ -18330,6 +18630,15 @@ def google_service_account(self) -> Optional[str]: def google_service_account(self, value: Optional[str]): pulumi.set(self, "google_service_account", value) + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[int]: + return pulumi.get(self, "local_ssd_count") + + @local_ssd_count.setter + def local_ssd_count(self, value: Optional[int]): + pulumi.set(self, "local_ssd_count", value) + @property @pulumi.getter(name="usePreemptibleExecutors") def use_preemptible_executors(self) -> Optional[bool]: @@ -18712,6 +19021,41 @@ def no_alert_for_skipped_runs(self, value: Optional[bool]): pulumi.set(self, "no_alert_for_skipped_runs", value) +@pulumi.input_type +class GetJobJobSettingsSettingsParameterArgs: + def __init__(__self__, *, + default: Optional[str] = None, + name: Optional[str] = None): + """ + :param str name: the job name of Job if the resource was matched by id. + """ + if default is not None: + pulumi.set(__self__, "default", default) + if name is not None: + pulumi.set(__self__, "name", name) + + @property + @pulumi.getter + def default(self) -> Optional[str]: + return pulumi.get(self, "default") + + @default.setter + def default(self, value: Optional[str]): + pulumi.set(self, "default", value) + + @property + @pulumi.getter + def name(self) -> Optional[str]: + """ + the job name of Job if the resource was matched by id. + """ + return pulumi.get(self, "name") + + @name.setter + def name(self, value: Optional[str]): + pulumi.set(self, "name", value) + + @pulumi.input_type class GetJobJobSettingsSettingsPipelineTaskArgs: def __init__(__self__, *, @@ -18828,6 +19172,34 @@ def user_name(self, value: Optional[str]): pulumi.set(self, "user_name", value) +@pulumi.input_type +class GetJobJobSettingsSettingsRunJobTaskArgs: + def __init__(__self__, *, + job_id: str, + job_parameters: Optional[Mapping[str, Any]] = None): + pulumi.set(__self__, "job_id", job_id) + if job_parameters is not None: + pulumi.set(__self__, "job_parameters", job_parameters) + + @property + @pulumi.getter(name="jobId") + def job_id(self) -> str: + return pulumi.get(self, "job_id") + + @job_id.setter + def job_id(self, value: str): + pulumi.set(self, "job_id", value) + + @property + @pulumi.getter(name="jobParameters") + def job_parameters(self) -> Optional[Mapping[str, Any]]: + return pulumi.get(self, "job_parameters") + + @job_parameters.setter + def job_parameters(self, value: Optional[Mapping[str, Any]]): + pulumi.set(self, "job_parameters", value) + + @pulumi.input_type class GetJobJobSettingsSettingsScheduleArgs: def __init__(__self__, *, @@ -18986,6 +19358,7 @@ def __init__(__self__, *, pipeline_task: Optional['GetJobJobSettingsSettingsTaskPipelineTaskArgs'] = None, python_wheel_task: Optional['GetJobJobSettingsSettingsTaskPythonWheelTaskArgs'] = None, run_if: Optional[str] = None, + run_job_task: Optional['GetJobJobSettingsSettingsTaskRunJobTaskArgs'] = None, spark_jar_task: Optional['GetJobJobSettingsSettingsTaskSparkJarTaskArgs'] = None, spark_python_task: Optional['GetJobJobSettingsSettingsTaskSparkPythonTaskArgs'] = None, spark_submit_task: Optional['GetJobJobSettingsSettingsTaskSparkSubmitTaskArgs'] = None, @@ -19029,6 +19402,8 @@ def __init__(__self__, *, pulumi.set(__self__, "python_wheel_task", python_wheel_task) if run_if is not None: pulumi.set(__self__, "run_if", run_if) + if run_job_task is not None: + pulumi.set(__self__, "run_job_task", run_job_task) if spark_jar_task is not None: pulumi.set(__self__, "spark_jar_task", spark_jar_task) if spark_python_task is not None: @@ -19213,6 +19588,15 @@ def run_if(self) -> Optional[str]: def run_if(self, value: Optional[str]): pulumi.set(self, "run_if", value) + @property + @pulumi.getter(name="runJobTask") + def run_job_task(self) -> Optional['GetJobJobSettingsSettingsTaskRunJobTaskArgs']: + return pulumi.get(self, "run_job_task") + + @run_job_task.setter + def run_job_task(self, value: Optional['GetJobJobSettingsSettingsTaskRunJobTaskArgs']): + pulumi.set(self, "run_job_task", value) + @property @pulumi.getter(name="sparkJarTask") def spark_jar_task(self) -> Optional['GetJobJobSettingsSettingsTaskSparkJarTaskArgs']: @@ -20510,6 +20894,7 @@ def __init__(__self__, *, availability: Optional[str] = None, boot_disk_size: Optional[int] = None, google_service_account: Optional[str] = None, + local_ssd_count: Optional[int] = None, use_preemptible_executors: Optional[bool] = None, zone_id: Optional[str] = None): if availability is not None: @@ -20518,6 +20903,8 @@ def __init__(__self__, *, pulumi.set(__self__, "boot_disk_size", boot_disk_size) if google_service_account is not None: pulumi.set(__self__, "google_service_account", google_service_account) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) if use_preemptible_executors is not None: pulumi.set(__self__, "use_preemptible_executors", use_preemptible_executors) if zone_id is not None: @@ -20550,6 +20937,15 @@ def google_service_account(self) -> Optional[str]: def google_service_account(self, value: Optional[str]): pulumi.set(self, "google_service_account", value) + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[int]: + return pulumi.get(self, "local_ssd_count") + + @local_ssd_count.setter + def local_ssd_count(self, value: Optional[int]): + pulumi.set(self, "local_ssd_count", value) + @property @pulumi.getter(name="usePreemptibleExecutors") def use_preemptible_executors(self) -> Optional[bool]: @@ -21025,6 +21421,34 @@ def parameters(self, value: Optional[Sequence[str]]): pulumi.set(self, "parameters", value) +@pulumi.input_type +class GetJobJobSettingsSettingsTaskRunJobTaskArgs: + def __init__(__self__, *, + job_id: str, + job_parameters: Optional[Mapping[str, Any]] = None): + pulumi.set(__self__, "job_id", job_id) + if job_parameters is not None: + pulumi.set(__self__, "job_parameters", job_parameters) + + @property + @pulumi.getter(name="jobId") + def job_id(self) -> str: + return pulumi.get(self, "job_id") + + @job_id.setter + def job_id(self, value: str): + pulumi.set(self, "job_id", value) + + @property + @pulumi.getter(name="jobParameters") + def job_parameters(self) -> Optional[Mapping[str, Any]]: + return pulumi.get(self, "job_parameters") + + @job_parameters.setter + def job_parameters(self, value: Optional[Mapping[str, Any]]): + pulumi.set(self, "job_parameters", value) + + @pulumi.input_type class GetJobJobSettingsSettingsTaskSparkJarTaskArgs: def __init__(__self__, *, diff --git a/sdk/python/pulumi_databricks/job.py b/sdk/python/pulumi_databricks/job.py index 01d971c2..93869782 100644 --- a/sdk/python/pulumi_databricks/job.py +++ b/sdk/python/pulumi_databricks/job.py @@ -35,11 +35,13 @@ def __init__(__self__, *, new_cluster: Optional[pulumi.Input['JobNewClusterArgs']] = None, notebook_task: Optional[pulumi.Input['JobNotebookTaskArgs']] = None, notification_settings: Optional[pulumi.Input['JobNotificationSettingsArgs']] = None, + parameters: Optional[pulumi.Input[Sequence[pulumi.Input['JobParameterArgs']]]] = None, pipeline_task: Optional[pulumi.Input['JobPipelineTaskArgs']] = None, python_wheel_task: Optional[pulumi.Input['JobPythonWheelTaskArgs']] = None, queue: Optional[pulumi.Input['JobQueueArgs']] = None, retry_on_timeout: Optional[pulumi.Input[bool]] = None, run_as: Optional[pulumi.Input['JobRunAsArgs']] = None, + run_job_task: Optional[pulumi.Input['JobRunJobTaskArgs']] = None, schedule: Optional[pulumi.Input['JobScheduleArgs']] = None, spark_jar_task: Optional[pulumi.Input['JobSparkJarTaskArgs']] = None, spark_python_task: Optional[pulumi.Input['JobSparkPythonTaskArgs']] = None, @@ -114,6 +116,8 @@ def __init__(__self__, *, pulumi.set(__self__, "notebook_task", notebook_task) if notification_settings is not None: pulumi.set(__self__, "notification_settings", notification_settings) + if parameters is not None: + pulumi.set(__self__, "parameters", parameters) if pipeline_task is not None: pulumi.set(__self__, "pipeline_task", pipeline_task) if python_wheel_task is not None: @@ -124,6 +128,8 @@ def __init__(__self__, *, pulumi.set(__self__, "retry_on_timeout", retry_on_timeout) if run_as is not None: pulumi.set(__self__, "run_as", run_as) + if run_job_task is not None: + pulumi.set(__self__, "run_job_task", run_job_task) if schedule is not None: pulumi.set(__self__, "schedule", schedule) if spark_jar_task is not None: @@ -358,6 +364,15 @@ def notification_settings(self) -> Optional[pulumi.Input['JobNotificationSetting def notification_settings(self, value: Optional[pulumi.Input['JobNotificationSettingsArgs']]): pulumi.set(self, "notification_settings", value) + @property + @pulumi.getter + def parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['JobParameterArgs']]]]: + return pulumi.get(self, "parameters") + + @parameters.setter + def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['JobParameterArgs']]]]): + pulumi.set(self, "parameters", value) + @property @pulumi.getter(name="pipelineTask") def pipeline_task(self) -> Optional[pulumi.Input['JobPipelineTaskArgs']]: @@ -406,6 +421,15 @@ def run_as(self) -> Optional[pulumi.Input['JobRunAsArgs']]: def run_as(self, value: Optional[pulumi.Input['JobRunAsArgs']]): pulumi.set(self, "run_as", value) + @property + @pulumi.getter(name="runJobTask") + def run_job_task(self) -> Optional[pulumi.Input['JobRunJobTaskArgs']]: + return pulumi.get(self, "run_job_task") + + @run_job_task.setter + def run_job_task(self, value: Optional[pulumi.Input['JobRunJobTaskArgs']]): + pulumi.set(self, "run_job_task", value) + @property @pulumi.getter def schedule(self) -> Optional[pulumi.Input['JobScheduleArgs']]: @@ -519,11 +543,13 @@ def __init__(__self__, *, new_cluster: Optional[pulumi.Input['JobNewClusterArgs']] = None, notebook_task: Optional[pulumi.Input['JobNotebookTaskArgs']] = None, notification_settings: Optional[pulumi.Input['JobNotificationSettingsArgs']] = None, + parameters: Optional[pulumi.Input[Sequence[pulumi.Input['JobParameterArgs']]]] = None, pipeline_task: Optional[pulumi.Input['JobPipelineTaskArgs']] = None, python_wheel_task: Optional[pulumi.Input['JobPythonWheelTaskArgs']] = None, queue: Optional[pulumi.Input['JobQueueArgs']] = None, retry_on_timeout: Optional[pulumi.Input[bool]] = None, run_as: Optional[pulumi.Input['JobRunAsArgs']] = None, + run_job_task: Optional[pulumi.Input['JobRunJobTaskArgs']] = None, schedule: Optional[pulumi.Input['JobScheduleArgs']] = None, spark_jar_task: Optional[pulumi.Input['JobSparkJarTaskArgs']] = None, spark_python_task: Optional[pulumi.Input['JobSparkPythonTaskArgs']] = None, @@ -600,6 +626,8 @@ def __init__(__self__, *, pulumi.set(__self__, "notebook_task", notebook_task) if notification_settings is not None: pulumi.set(__self__, "notification_settings", notification_settings) + if parameters is not None: + pulumi.set(__self__, "parameters", parameters) if pipeline_task is not None: pulumi.set(__self__, "pipeline_task", pipeline_task) if python_wheel_task is not None: @@ -610,6 +638,8 @@ def __init__(__self__, *, pulumi.set(__self__, "retry_on_timeout", retry_on_timeout) if run_as is not None: pulumi.set(__self__, "run_as", run_as) + if run_job_task is not None: + pulumi.set(__self__, "run_job_task", run_job_task) if schedule is not None: pulumi.set(__self__, "schedule", schedule) if spark_jar_task is not None: @@ -846,6 +876,15 @@ def notification_settings(self) -> Optional[pulumi.Input['JobNotificationSetting def notification_settings(self, value: Optional[pulumi.Input['JobNotificationSettingsArgs']]): pulumi.set(self, "notification_settings", value) + @property + @pulumi.getter + def parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['JobParameterArgs']]]]: + return pulumi.get(self, "parameters") + + @parameters.setter + def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['JobParameterArgs']]]]): + pulumi.set(self, "parameters", value) + @property @pulumi.getter(name="pipelineTask") def pipeline_task(self) -> Optional[pulumi.Input['JobPipelineTaskArgs']]: @@ -894,6 +933,15 @@ def run_as(self) -> Optional[pulumi.Input['JobRunAsArgs']]: def run_as(self, value: Optional[pulumi.Input['JobRunAsArgs']]): pulumi.set(self, "run_as", value) + @property + @pulumi.getter(name="runJobTask") + def run_job_task(self) -> Optional[pulumi.Input['JobRunJobTaskArgs']]: + return pulumi.get(self, "run_job_task") + + @run_job_task.setter + def run_job_task(self, value: Optional[pulumi.Input['JobRunJobTaskArgs']]): + pulumi.set(self, "run_job_task", value) + @property @pulumi.getter def schedule(self) -> Optional[pulumi.Input['JobScheduleArgs']]: @@ -1021,11 +1069,13 @@ def __init__(__self__, new_cluster: Optional[pulumi.Input[pulumi.InputType['JobNewClusterArgs']]] = None, notebook_task: Optional[pulumi.Input[pulumi.InputType['JobNotebookTaskArgs']]] = None, notification_settings: Optional[pulumi.Input[pulumi.InputType['JobNotificationSettingsArgs']]] = None, + parameters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['JobParameterArgs']]]]] = None, pipeline_task: Optional[pulumi.Input[pulumi.InputType['JobPipelineTaskArgs']]] = None, python_wheel_task: Optional[pulumi.Input[pulumi.InputType['JobPythonWheelTaskArgs']]] = None, queue: Optional[pulumi.Input[pulumi.InputType['JobQueueArgs']]] = None, retry_on_timeout: Optional[pulumi.Input[bool]] = None, run_as: Optional[pulumi.Input[pulumi.InputType['JobRunAsArgs']]] = None, + run_job_task: Optional[pulumi.Input[pulumi.InputType['JobRunJobTaskArgs']]] = None, schedule: Optional[pulumi.Input[pulumi.InputType['JobScheduleArgs']]] = None, spark_jar_task: Optional[pulumi.Input[pulumi.InputType['JobSparkJarTaskArgs']]] = None, spark_python_task: Optional[pulumi.Input[pulumi.InputType['JobSparkPythonTaskArgs']]] = None, @@ -1118,11 +1168,13 @@ def _internal_init(__self__, new_cluster: Optional[pulumi.Input[pulumi.InputType['JobNewClusterArgs']]] = None, notebook_task: Optional[pulumi.Input[pulumi.InputType['JobNotebookTaskArgs']]] = None, notification_settings: Optional[pulumi.Input[pulumi.InputType['JobNotificationSettingsArgs']]] = None, + parameters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['JobParameterArgs']]]]] = None, pipeline_task: Optional[pulumi.Input[pulumi.InputType['JobPipelineTaskArgs']]] = None, python_wheel_task: Optional[pulumi.Input[pulumi.InputType['JobPythonWheelTaskArgs']]] = None, queue: Optional[pulumi.Input[pulumi.InputType['JobQueueArgs']]] = None, retry_on_timeout: Optional[pulumi.Input[bool]] = None, run_as: Optional[pulumi.Input[pulumi.InputType['JobRunAsArgs']]] = None, + run_job_task: Optional[pulumi.Input[pulumi.InputType['JobRunJobTaskArgs']]] = None, schedule: Optional[pulumi.Input[pulumi.InputType['JobScheduleArgs']]] = None, spark_jar_task: Optional[pulumi.Input[pulumi.InputType['JobSparkJarTaskArgs']]] = None, spark_python_task: Optional[pulumi.Input[pulumi.InputType['JobSparkPythonTaskArgs']]] = None, @@ -1163,11 +1215,13 @@ def _internal_init(__self__, __props__.__dict__["new_cluster"] = new_cluster __props__.__dict__["notebook_task"] = notebook_task __props__.__dict__["notification_settings"] = notification_settings + __props__.__dict__["parameters"] = parameters __props__.__dict__["pipeline_task"] = pipeline_task __props__.__dict__["python_wheel_task"] = python_wheel_task __props__.__dict__["queue"] = queue __props__.__dict__["retry_on_timeout"] = retry_on_timeout __props__.__dict__["run_as"] = run_as + __props__.__dict__["run_job_task"] = run_job_task __props__.__dict__["schedule"] = schedule __props__.__dict__["spark_jar_task"] = spark_jar_task __props__.__dict__["spark_python_task"] = spark_python_task @@ -1207,11 +1261,13 @@ def get(resource_name: str, new_cluster: Optional[pulumi.Input[pulumi.InputType['JobNewClusterArgs']]] = None, notebook_task: Optional[pulumi.Input[pulumi.InputType['JobNotebookTaskArgs']]] = None, notification_settings: Optional[pulumi.Input[pulumi.InputType['JobNotificationSettingsArgs']]] = None, + parameters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['JobParameterArgs']]]]] = None, pipeline_task: Optional[pulumi.Input[pulumi.InputType['JobPipelineTaskArgs']]] = None, python_wheel_task: Optional[pulumi.Input[pulumi.InputType['JobPythonWheelTaskArgs']]] = None, queue: Optional[pulumi.Input[pulumi.InputType['JobQueueArgs']]] = None, retry_on_timeout: Optional[pulumi.Input[bool]] = None, run_as: Optional[pulumi.Input[pulumi.InputType['JobRunAsArgs']]] = None, + run_job_task: Optional[pulumi.Input[pulumi.InputType['JobRunJobTaskArgs']]] = None, schedule: Optional[pulumi.Input[pulumi.InputType['JobScheduleArgs']]] = None, spark_jar_task: Optional[pulumi.Input[pulumi.InputType['JobSparkJarTaskArgs']]] = None, spark_python_task: Optional[pulumi.Input[pulumi.InputType['JobSparkPythonTaskArgs']]] = None, @@ -1275,11 +1331,13 @@ def get(resource_name: str, __props__.__dict__["new_cluster"] = new_cluster __props__.__dict__["notebook_task"] = notebook_task __props__.__dict__["notification_settings"] = notification_settings + __props__.__dict__["parameters"] = parameters __props__.__dict__["pipeline_task"] = pipeline_task __props__.__dict__["python_wheel_task"] = python_wheel_task __props__.__dict__["queue"] = queue __props__.__dict__["retry_on_timeout"] = retry_on_timeout __props__.__dict__["run_as"] = run_as + __props__.__dict__["run_job_task"] = run_job_task __props__.__dict__["schedule"] = schedule __props__.__dict__["spark_jar_task"] = spark_jar_task __props__.__dict__["spark_python_task"] = spark_python_task @@ -1431,6 +1489,11 @@ def notification_settings(self) -> pulumi.Output[Optional['outputs.JobNotificati """ return pulumi.get(self, "notification_settings") + @property + @pulumi.getter + def parameters(self) -> pulumi.Output[Optional[Sequence['outputs.JobParameter']]]: + return pulumi.get(self, "parameters") + @property @pulumi.getter(name="pipelineTask") def pipeline_task(self) -> pulumi.Output[Optional['outputs.JobPipelineTask']]: @@ -1459,6 +1522,11 @@ def retry_on_timeout(self) -> pulumi.Output[Optional[bool]]: def run_as(self) -> pulumi.Output[Optional['outputs.JobRunAs']]: return pulumi.get(self, "run_as") + @property + @pulumi.getter(name="runJobTask") + def run_job_task(self) -> pulumi.Output[Optional['outputs.JobRunJobTask']]: + return pulumi.get(self, "run_job_task") + @property @pulumi.getter def schedule(self) -> pulumi.Output[Optional['outputs.JobSchedule']]: diff --git a/sdk/python/pulumi_databricks/outputs.py b/sdk/python/pulumi_databricks/outputs.py index 8a0fd4d2..7a2fba70 100644 --- a/sdk/python/pulumi_databricks/outputs.py +++ b/sdk/python/pulumi_databricks/outputs.py @@ -106,10 +106,12 @@ 'JobNewClusterWorkloadTypeClients', 'JobNotebookTask', 'JobNotificationSettings', + 'JobParameter', 'JobPipelineTask', 'JobPythonWheelTask', 'JobQueue', 'JobRunAs', + 'JobRunJobTask', 'JobSchedule', 'JobSparkJarTask', 'JobSparkPythonTask', @@ -150,6 +152,7 @@ 'JobTaskNotificationSettings', 'JobTaskPipelineTask', 'JobTaskPythonWheelTask', + 'JobTaskRunJobTask', 'JobTaskSparkJarTask', 'JobTaskSparkPythonTask', 'JobTaskSparkSubmitTask', @@ -360,10 +363,12 @@ 'GetJobJobSettingsSettingsNewClusterWorkloadTypeClientsResult', 'GetJobJobSettingsSettingsNotebookTaskResult', 'GetJobJobSettingsSettingsNotificationSettingsResult', + 'GetJobJobSettingsSettingsParameterResult', 'GetJobJobSettingsSettingsPipelineTaskResult', 'GetJobJobSettingsSettingsPythonWheelTaskResult', 'GetJobJobSettingsSettingsQueueResult', 'GetJobJobSettingsSettingsRunAsResult', + 'GetJobJobSettingsSettingsRunJobTaskResult', 'GetJobJobSettingsSettingsScheduleResult', 'GetJobJobSettingsSettingsSparkJarTaskResult', 'GetJobJobSettingsSettingsSparkPythonTaskResult', @@ -404,6 +409,7 @@ 'GetJobJobSettingsSettingsTaskNotificationSettingsResult', 'GetJobJobSettingsSettingsTaskPipelineTaskResult', 'GetJobJobSettingsSettingsTaskPythonWheelTaskResult', + 'GetJobJobSettingsSettingsTaskRunJobTaskResult', 'GetJobJobSettingsSettingsTaskSparkJarTaskResult', 'GetJobJobSettingsSettingsTaskSparkPythonTaskResult', 'GetJobJobSettingsSettingsTaskSparkSubmitTaskResult', @@ -926,6 +932,8 @@ def __key_warning(key: str): suggest = "boot_disk_size" elif key == "googleServiceAccount": suggest = "google_service_account" + elif key == "localSsdCount": + suggest = "local_ssd_count" elif key == "usePreemptibleExecutors": suggest = "use_preemptible_executors" elif key == "zoneId": @@ -946,6 +954,7 @@ def __init__(__self__, *, availability: Optional[str] = None, boot_disk_size: Optional[int] = None, google_service_account: Optional[str] = None, + local_ssd_count: Optional[int] = None, use_preemptible_executors: Optional[bool] = None, zone_id: Optional[str] = None): if availability is not None: @@ -954,6 +963,8 @@ def __init__(__self__, *, pulumi.set(__self__, "boot_disk_size", boot_disk_size) if google_service_account is not None: pulumi.set(__self__, "google_service_account", google_service_account) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) if use_preemptible_executors is not None: pulumi.set(__self__, "use_preemptible_executors", use_preemptible_executors) if zone_id is not None: @@ -974,9 +985,17 @@ def boot_disk_size(self) -> Optional[int]: def google_service_account(self) -> Optional[str]: return pulumi.get(self, "google_service_account") + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[int]: + return pulumi.get(self, "local_ssd_count") + @property @pulumi.getter(name="usePreemptibleExecutors") def use_preemptible_executors(self) -> Optional[bool]: + warnings.warn("""Please use 'availability' instead.""", DeprecationWarning) + pulumi.log.warn("""use_preemptible_executors is deprecated: Please use 'availability' instead.""") + return pulumi.get(self, "use_preemptible_executors") @property @@ -1443,7 +1462,7 @@ def __init__(__self__, *, availability: Optional[str] = None, spot_bid_max_price: Optional[float] = None): """ - :param str availability: Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + :param str availability: Availability type used for all nodes. Valid values are `SPOT_AZURE` and `ON_DEMAND_AZURE`. :param float spot_bid_max_price: The max price for Azure spot instances. Use `-1` to specify the lowest price. """ if availability is not None: @@ -1455,7 +1474,7 @@ def __init__(__self__, *, @pulumi.getter def availability(self) -> Optional[str]: """ - Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + Availability type used for all nodes. Valid values are `SPOT_AZURE` and `ON_DEMAND_AZURE`. """ return pulumi.get(self, "availability") @@ -1575,6 +1594,8 @@ def __key_warning(key: str): suggest = None if key == "gcpAvailability": suggest = "gcp_availability" + elif key == "localSsdCount": + suggest = "local_ssd_count" if suggest: pulumi.log.warn(f"Key '{key}' not found in InstancePoolGcpAttributes. Access the value via the '{suggest}' property getter instead.") @@ -1588,15 +1609,33 @@ def get(self, key: str, default = None) -> Any: return super().get(key, default) def __init__(__self__, *, - gcp_availability: Optional[str] = None): + gcp_availability: Optional[str] = None, + local_ssd_count: Optional[int] = None): + """ + :param str gcp_availability: Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + :param int local_ssd_count: Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. + """ if gcp_availability is not None: pulumi.set(__self__, "gcp_availability", gcp_availability) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) @property @pulumi.getter(name="gcpAvailability") def gcp_availability(self) -> Optional[str]: + """ + Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. + """ return pulumi.get(self, "gcp_availability") + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[int]: + """ + Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. + """ + return pulumi.get(self, "local_ssd_count") + @pulumi.output_type class InstancePoolInstancePoolFleetAttributes(dict): @@ -3142,6 +3181,8 @@ def __key_warning(key: str): suggest = "boot_disk_size" elif key == "googleServiceAccount": suggest = "google_service_account" + elif key == "localSsdCount": + suggest = "local_ssd_count" elif key == "usePreemptibleExecutors": suggest = "use_preemptible_executors" elif key == "zoneId": @@ -3162,6 +3203,7 @@ def __init__(__self__, *, availability: Optional[str] = None, boot_disk_size: Optional[int] = None, google_service_account: Optional[str] = None, + local_ssd_count: Optional[int] = None, use_preemptible_executors: Optional[bool] = None, zone_id: Optional[str] = None): if availability is not None: @@ -3170,6 +3212,8 @@ def __init__(__self__, *, pulumi.set(__self__, "boot_disk_size", boot_disk_size) if google_service_account is not None: pulumi.set(__self__, "google_service_account", google_service_account) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) if use_preemptible_executors is not None: pulumi.set(__self__, "use_preemptible_executors", use_preemptible_executors) if zone_id is not None: @@ -3190,6 +3234,11 @@ def boot_disk_size(self) -> Optional[int]: def google_service_account(self) -> Optional[str]: return pulumi.get(self, "google_service_account") + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[int]: + return pulumi.get(self, "local_ssd_count") + @property @pulumi.getter(name="usePreemptibleExecutors") def use_preemptible_executors(self) -> Optional[bool]: @@ -4424,6 +4473,8 @@ def __key_warning(key: str): suggest = "boot_disk_size" elif key == "googleServiceAccount": suggest = "google_service_account" + elif key == "localSsdCount": + suggest = "local_ssd_count" elif key == "usePreemptibleExecutors": suggest = "use_preemptible_executors" elif key == "zoneId": @@ -4444,6 +4495,7 @@ def __init__(__self__, *, availability: Optional[str] = None, boot_disk_size: Optional[int] = None, google_service_account: Optional[str] = None, + local_ssd_count: Optional[int] = None, use_preemptible_executors: Optional[bool] = None, zone_id: Optional[str] = None): if availability is not None: @@ -4452,6 +4504,8 @@ def __init__(__self__, *, pulumi.set(__self__, "boot_disk_size", boot_disk_size) if google_service_account is not None: pulumi.set(__self__, "google_service_account", google_service_account) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) if use_preemptible_executors is not None: pulumi.set(__self__, "use_preemptible_executors", use_preemptible_executors) if zone_id is not None: @@ -4472,6 +4526,11 @@ def boot_disk_size(self) -> Optional[int]: def google_service_account(self) -> Optional[str]: return pulumi.get(self, "google_service_account") + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[int]: + return pulumi.get(self, "local_ssd_count") + @property @pulumi.getter(name="usePreemptibleExecutors") def use_preemptible_executors(self) -> Optional[bool]: @@ -4895,7 +4954,7 @@ def __init__(__self__, *, no_alert_for_skipped_runs: Optional[bool] = None): """ :param bool no_alert_for_canceled_runs: (Bool) don't send alert for cancelled runs. - :param bool no_alert_for_skipped_runs: (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + :param bool no_alert_for_skipped_runs: (Bool) don't send alert for skipped runs. """ if no_alert_for_canceled_runs is not None: pulumi.set(__self__, "no_alert_for_canceled_runs", no_alert_for_canceled_runs) @@ -4914,11 +4973,42 @@ def no_alert_for_canceled_runs(self) -> Optional[bool]: @pulumi.getter(name="noAlertForSkippedRuns") def no_alert_for_skipped_runs(self) -> Optional[bool]: """ - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + (Bool) don't send alert for skipped runs. """ return pulumi.get(self, "no_alert_for_skipped_runs") +@pulumi.output_type +class JobParameter(dict): + def __init__(__self__, *, + default: Optional[str] = None, + name: Optional[str] = None): + """ + :param str default: Default value of the parameter. + :param str name: An optional name for the job. The default value is Untitled. + """ + if default is not None: + pulumi.set(__self__, "default", default) + if name is not None: + pulumi.set(__self__, "name", name) + + @property + @pulumi.getter + def default(self) -> Optional[str]: + """ + Default value of the parameter. + """ + return pulumi.get(self, "default") + + @property + @pulumi.getter + def name(self) -> Optional[str]: + """ + An optional name for the job. The default value is Untitled. + """ + return pulumi.get(self, "name") + + @pulumi.output_type class JobPipelineTask(dict): @staticmethod @@ -5126,6 +5216,55 @@ def user_name(self) -> Optional[str]: return pulumi.get(self, "user_name") +@pulumi.output_type +class JobRunJobTask(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "jobId": + suggest = "job_id" + elif key == "jobParameters": + suggest = "job_parameters" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in JobRunJobTask. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + JobRunJobTask.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + JobRunJobTask.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + job_id: str, + job_parameters: Optional[Mapping[str, Any]] = None): + """ + :param str job_id: (String) ID of the job + :param Mapping[str, Any] job_parameters: (Map) Job parameters for the task + """ + pulumi.set(__self__, "job_id", job_id) + if job_parameters is not None: + pulumi.set(__self__, "job_parameters", job_parameters) + + @property + @pulumi.getter(name="jobId") + def job_id(self) -> str: + """ + (String) ID of the job + """ + return pulumi.get(self, "job_id") + + @property + @pulumi.getter(name="jobParameters") + def job_parameters(self) -> Optional[Mapping[str, Any]]: + """ + (Map) Job parameters for the task + """ + return pulumi.get(self, "job_parameters") + + @pulumi.output_type class JobSchedule(dict): @staticmethod @@ -5361,6 +5500,8 @@ def __key_warning(key: str): suggest = "retry_on_timeout" elif key == "runIf": suggest = "run_if" + elif key == "runJobTask": + suggest = "run_job_task" elif key == "sparkJarTask": suggest = "spark_jar_task" elif key == "sparkPythonTask": @@ -5405,6 +5546,7 @@ def __init__(__self__, *, python_wheel_task: Optional['outputs.JobTaskPythonWheelTask'] = None, retry_on_timeout: Optional[bool] = None, run_if: Optional[str] = None, + run_job_task: Optional['outputs.JobTaskRunJobTask'] = None, spark_jar_task: Optional['outputs.JobTaskSparkJarTask'] = None, spark_python_task: Optional['outputs.JobTaskSparkPythonTask'] = None, spark_submit_task: Optional['outputs.JobTaskSparkSubmitTask'] = None, @@ -5422,6 +5564,7 @@ def __init__(__self__, *, :param 'JobTaskNewClusterArgs' new_cluster: Same set of parameters as for Cluster resource. :param 'JobTaskNotificationSettingsArgs' notification_settings: An optional block controlling the notification settings on the job level (described below). :param bool retry_on_timeout: (Bool) An optional policy to specify whether to retry a job when it times out. The default behavior is to not retry on timeout. + :param str run_if: An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. :param str task_key: string specifying an unique key for a given task. * `*_task` - (Required) one of the specific task blocks described below: :param int timeout_seconds: (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout. @@ -5464,6 +5607,8 @@ def __init__(__self__, *, pulumi.set(__self__, "retry_on_timeout", retry_on_timeout) if run_if is not None: pulumi.set(__self__, "run_if", run_if) + if run_job_task is not None: + pulumi.set(__self__, "run_job_task", run_job_task) if spark_jar_task is not None: pulumi.set(__self__, "spark_jar_task", spark_jar_task) if spark_python_task is not None: @@ -5600,8 +5745,16 @@ def retry_on_timeout(self) -> Optional[bool]: @property @pulumi.getter(name="runIf") def run_if(self) -> Optional[str]: + """ + An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. + """ return pulumi.get(self, "run_if") + @property + @pulumi.getter(name="runJobTask") + def run_job_task(self) -> Optional['outputs.JobTaskRunJobTask']: + return pulumi.get(self, "run_job_task") + @property @pulumi.getter(name="sparkJarTask") def spark_jar_task(self) -> Optional['outputs.JobTaskSparkJarTask']: @@ -6886,6 +7039,8 @@ def __key_warning(key: str): suggest = "boot_disk_size" elif key == "googleServiceAccount": suggest = "google_service_account" + elif key == "localSsdCount": + suggest = "local_ssd_count" elif key == "usePreemptibleExecutors": suggest = "use_preemptible_executors" elif key == "zoneId": @@ -6906,6 +7061,7 @@ def __init__(__self__, *, availability: Optional[str] = None, boot_disk_size: Optional[int] = None, google_service_account: Optional[str] = None, + local_ssd_count: Optional[int] = None, use_preemptible_executors: Optional[bool] = None, zone_id: Optional[str] = None): if availability is not None: @@ -6914,6 +7070,8 @@ def __init__(__self__, *, pulumi.set(__self__, "boot_disk_size", boot_disk_size) if google_service_account is not None: pulumi.set(__self__, "google_service_account", google_service_account) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) if use_preemptible_executors is not None: pulumi.set(__self__, "use_preemptible_executors", use_preemptible_executors) if zone_id is not None: @@ -6934,6 +7092,11 @@ def boot_disk_size(self) -> Optional[int]: def google_service_account(self) -> Optional[str]: return pulumi.get(self, "google_service_account") + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[int]: + return pulumi.get(self, "local_ssd_count") + @property @pulumi.getter(name="usePreemptibleExecutors") def use_preemptible_executors(self) -> Optional[bool]: @@ -7361,7 +7524,7 @@ def __init__(__self__, *, """ :param bool alert_on_last_attempt: (Bool) do not send notifications to recipients specified in `on_start` for the retried runs and do not send notifications to recipients specified in `on_failure` until the last retry of the run. :param bool no_alert_for_canceled_runs: (Bool) don't send alert for cancelled runs. - :param bool no_alert_for_skipped_runs: (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + :param bool no_alert_for_skipped_runs: (Bool) don't send alert for skipped runs. """ if alert_on_last_attempt is not None: pulumi.set(__self__, "alert_on_last_attempt", alert_on_last_attempt) @@ -7390,7 +7553,7 @@ def no_alert_for_canceled_runs(self) -> Optional[bool]: @pulumi.getter(name="noAlertForSkippedRuns") def no_alert_for_skipped_runs(self) -> Optional[bool]: """ - (Bool) don't send alert for skipped runs. (It's recommended to use the corresponding setting in the `notification_settings` configuration block). + (Bool) don't send alert for skipped runs. """ return pulumi.get(self, "no_alert_for_skipped_runs") @@ -7524,6 +7687,55 @@ def parameters(self) -> Optional[Sequence[str]]: return pulumi.get(self, "parameters") +@pulumi.output_type +class JobTaskRunJobTask(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "jobId": + suggest = "job_id" + elif key == "jobParameters": + suggest = "job_parameters" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in JobTaskRunJobTask. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + JobTaskRunJobTask.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + JobTaskRunJobTask.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + job_id: str, + job_parameters: Optional[Mapping[str, Any]] = None): + """ + :param str job_id: (String) ID of the job + :param Mapping[str, Any] job_parameters: (Map) Job parameters for the task + """ + pulumi.set(__self__, "job_id", job_id) + if job_parameters is not None: + pulumi.set(__self__, "job_parameters", job_parameters) + + @property + @pulumi.getter(name="jobId") + def job_id(self) -> str: + """ + (String) ID of the job + """ + return pulumi.get(self, "job_id") + + @property + @pulumi.getter(name="jobParameters") + def job_parameters(self) -> Optional[Mapping[str, Any]]: + """ + (Map) Job parameters for the task + """ + return pulumi.get(self, "job_parameters") + + @pulumi.output_type class JobTaskSparkJarTask(dict): @staticmethod @@ -8897,6 +9109,8 @@ def __key_warning(key: str): suggest = "workload_size" elif key == "environmentVars": suggest = "environment_vars" + elif key == "instanceProfileArn": + suggest = "instance_profile_arn" elif key == "scaleToZeroEnabled": suggest = "scale_to_zero_enabled" @@ -8916,6 +9130,7 @@ def __init__(__self__, *, model_version: str, workload_size: str, environment_vars: Optional[Mapping[str, Any]] = None, + instance_profile_arn: Optional[str] = None, name: Optional[str] = None, scale_to_zero_enabled: Optional[bool] = None): """ @@ -8930,6 +9145,8 @@ def __init__(__self__, *, pulumi.set(__self__, "workload_size", workload_size) if environment_vars is not None: pulumi.set(__self__, "environment_vars", environment_vars) + if instance_profile_arn is not None: + pulumi.set(__self__, "instance_profile_arn", instance_profile_arn) if name is not None: pulumi.set(__self__, "name", name) if scale_to_zero_enabled is not None: @@ -8964,6 +9181,11 @@ def workload_size(self) -> str: def environment_vars(self) -> Optional[Mapping[str, Any]]: return pulumi.get(self, "environment_vars") + @property + @pulumi.getter(name="instanceProfileArn") + def instance_profile_arn(self) -> Optional[str]: + return pulumi.get(self, "instance_profile_arn") + @property @pulumi.getter def name(self) -> Optional[str]: @@ -10595,6 +10817,8 @@ def __key_warning(key: str): suggest = None if key == "googleServiceAccount": suggest = "google_service_account" + elif key == "localSsdCount": + suggest = "local_ssd_count" elif key == "zoneId": suggest = "zone_id" @@ -10612,11 +10836,14 @@ def get(self, key: str, default = None) -> Any: def __init__(__self__, *, availability: Optional[str] = None, google_service_account: Optional[str] = None, + local_ssd_count: Optional[int] = None, zone_id: Optional[str] = None): if availability is not None: pulumi.set(__self__, "availability", availability) if google_service_account is not None: pulumi.set(__self__, "google_service_account", google_service_account) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) if zone_id is not None: pulumi.set(__self__, "zone_id", zone_id) @@ -10630,6 +10857,11 @@ def availability(self) -> Optional[str]: def google_service_account(self) -> Optional[str]: return pulumi.get(self, "google_service_account") + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[int]: + return pulumi.get(self, "local_ssd_count") + @property @pulumi.getter(name="zoneId") def zone_id(self) -> Optional[str]: @@ -13638,6 +13870,7 @@ def __init__(__self__, *, availability: Optional[str] = None, boot_disk_size: Optional[int] = None, google_service_account: Optional[str] = None, + local_ssd_count: Optional[int] = None, use_preemptible_executors: Optional[bool] = None, zone_id: Optional[str] = None): if availability is not None: @@ -13646,6 +13879,8 @@ def __init__(__self__, *, pulumi.set(__self__, "boot_disk_size", boot_disk_size) if google_service_account is not None: pulumi.set(__self__, "google_service_account", google_service_account) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) if use_preemptible_executors is not None: pulumi.set(__self__, "use_preemptible_executors", use_preemptible_executors) if zone_id is not None: @@ -13666,6 +13901,11 @@ def boot_disk_size(self) -> Optional[int]: def google_service_account(self) -> Optional[str]: return pulumi.get(self, "google_service_account") + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[int]: + return pulumi.get(self, "local_ssd_count") + @property @pulumi.getter(name="usePreemptibleExecutors") def use_preemptible_executors(self) -> Optional[bool]: @@ -14157,15 +14397,23 @@ def ebs_volume_type(self) -> Optional[str]: @pulumi.output_type class GetInstancePoolPoolInfoGcpAttributesResult(dict): def __init__(__self__, *, - gcp_availability: Optional[str] = None): + gcp_availability: Optional[str] = None, + local_ssd_count: Optional[int] = None): if gcp_availability is not None: pulumi.set(__self__, "gcp_availability", gcp_availability) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) @property @pulumi.getter(name="gcpAvailability") def gcp_availability(self) -> Optional[str]: return pulumi.get(self, "gcp_availability") + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[int]: + return pulumi.get(self, "local_ssd_count") + @pulumi.output_type class GetInstancePoolPoolInfoInstancePoolFleetAttributeResult(dict): @@ -14394,11 +14642,13 @@ def __init__(__self__, *, new_cluster: Optional['outputs.GetJobJobSettingsSettingsNewClusterResult'] = None, notebook_task: Optional['outputs.GetJobJobSettingsSettingsNotebookTaskResult'] = None, notification_settings: Optional['outputs.GetJobJobSettingsSettingsNotificationSettingsResult'] = None, + parameters: Optional[Sequence['outputs.GetJobJobSettingsSettingsParameterResult']] = None, pipeline_task: Optional['outputs.GetJobJobSettingsSettingsPipelineTaskResult'] = None, python_wheel_task: Optional['outputs.GetJobJobSettingsSettingsPythonWheelTaskResult'] = None, queue: Optional['outputs.GetJobJobSettingsSettingsQueueResult'] = None, retry_on_timeout: Optional[bool] = None, run_as: Optional['outputs.GetJobJobSettingsSettingsRunAsResult'] = None, + run_job_task: Optional['outputs.GetJobJobSettingsSettingsRunJobTaskResult'] = None, schedule: Optional['outputs.GetJobJobSettingsSettingsScheduleResult'] = None, spark_jar_task: Optional['outputs.GetJobJobSettingsSettingsSparkJarTaskResult'] = None, spark_python_task: Optional['outputs.GetJobJobSettingsSettingsSparkPythonTaskResult'] = None, @@ -14444,6 +14694,8 @@ def __init__(__self__, *, pulumi.set(__self__, "notebook_task", notebook_task) if notification_settings is not None: pulumi.set(__self__, "notification_settings", notification_settings) + if parameters is not None: + pulumi.set(__self__, "parameters", parameters) if pipeline_task is not None: pulumi.set(__self__, "pipeline_task", pipeline_task) if python_wheel_task is not None: @@ -14454,6 +14706,8 @@ def __init__(__self__, *, pulumi.set(__self__, "retry_on_timeout", retry_on_timeout) if run_as is not None: pulumi.set(__self__, "run_as", run_as) + if run_job_task is not None: + pulumi.set(__self__, "run_job_task", run_job_task) if schedule is not None: pulumi.set(__self__, "schedule", schedule) if spark_jar_task is not None: @@ -14561,6 +14815,11 @@ def notebook_task(self) -> Optional['outputs.GetJobJobSettingsSettingsNotebookTa def notification_settings(self) -> Optional['outputs.GetJobJobSettingsSettingsNotificationSettingsResult']: return pulumi.get(self, "notification_settings") + @property + @pulumi.getter + def parameters(self) -> Optional[Sequence['outputs.GetJobJobSettingsSettingsParameterResult']]: + return pulumi.get(self, "parameters") + @property @pulumi.getter(name="pipelineTask") def pipeline_task(self) -> Optional['outputs.GetJobJobSettingsSettingsPipelineTaskResult']: @@ -14586,6 +14845,11 @@ def retry_on_timeout(self) -> Optional[bool]: def run_as(self) -> Optional['outputs.GetJobJobSettingsSettingsRunAsResult']: return pulumi.get(self, "run_as") + @property + @pulumi.getter(name="runJobTask") + def run_job_task(self) -> Optional['outputs.GetJobJobSettingsSettingsRunJobTaskResult']: + return pulumi.get(self, "run_job_task") + @property @pulumi.getter def schedule(self) -> Optional['outputs.GetJobJobSettingsSettingsScheduleResult']: @@ -15466,6 +15730,7 @@ def __init__(__self__, *, availability: Optional[str] = None, boot_disk_size: Optional[int] = None, google_service_account: Optional[str] = None, + local_ssd_count: Optional[int] = None, use_preemptible_executors: Optional[bool] = None, zone_id: Optional[str] = None): if availability is not None: @@ -15474,6 +15739,8 @@ def __init__(__self__, *, pulumi.set(__self__, "boot_disk_size", boot_disk_size) if google_service_account is not None: pulumi.set(__self__, "google_service_account", google_service_account) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) if use_preemptible_executors is not None: pulumi.set(__self__, "use_preemptible_executors", use_preemptible_executors) if zone_id is not None: @@ -15494,6 +15761,11 @@ def boot_disk_size(self) -> Optional[int]: def google_service_account(self) -> Optional[str]: return pulumi.get(self, "google_service_account") + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[int]: + return pulumi.get(self, "local_ssd_count") + @property @pulumi.getter(name="usePreemptibleExecutors") def use_preemptible_executors(self) -> Optional[bool]: @@ -16378,6 +16650,7 @@ def __init__(__self__, *, availability: Optional[str] = None, boot_disk_size: Optional[int] = None, google_service_account: Optional[str] = None, + local_ssd_count: Optional[int] = None, use_preemptible_executors: Optional[bool] = None, zone_id: Optional[str] = None): if availability is not None: @@ -16386,6 +16659,8 @@ def __init__(__self__, *, pulumi.set(__self__, "boot_disk_size", boot_disk_size) if google_service_account is not None: pulumi.set(__self__, "google_service_account", google_service_account) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) if use_preemptible_executors is not None: pulumi.set(__self__, "use_preemptible_executors", use_preemptible_executors) if zone_id is not None: @@ -16406,6 +16681,11 @@ def boot_disk_size(self) -> Optional[int]: def google_service_account(self) -> Optional[str]: return pulumi.get(self, "google_service_account") + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[int]: + return pulumi.get(self, "local_ssd_count") + @property @pulumi.getter(name="usePreemptibleExecutors") def use_preemptible_executors(self) -> Optional[bool]: @@ -16676,6 +16956,33 @@ def no_alert_for_skipped_runs(self) -> Optional[bool]: return pulumi.get(self, "no_alert_for_skipped_runs") +@pulumi.output_type +class GetJobJobSettingsSettingsParameterResult(dict): + def __init__(__self__, *, + default: Optional[str] = None, + name: Optional[str] = None): + """ + :param str name: the job name of Job if the resource was matched by id. + """ + if default is not None: + pulumi.set(__self__, "default", default) + if name is not None: + pulumi.set(__self__, "name", name) + + @property + @pulumi.getter + def default(self) -> Optional[str]: + return pulumi.get(self, "default") + + @property + @pulumi.getter + def name(self) -> Optional[str]: + """ + the job name of Job if the resource was matched by id. + """ + return pulumi.get(self, "name") + + @pulumi.output_type class GetJobJobSettingsSettingsPipelineTaskResult(dict): def __init__(__self__, *, @@ -16760,6 +17067,26 @@ def user_name(self) -> Optional[str]: return pulumi.get(self, "user_name") +@pulumi.output_type +class GetJobJobSettingsSettingsRunJobTaskResult(dict): + def __init__(__self__, *, + job_id: str, + job_parameters: Optional[Mapping[str, Any]] = None): + pulumi.set(__self__, "job_id", job_id) + if job_parameters is not None: + pulumi.set(__self__, "job_parameters", job_parameters) + + @property + @pulumi.getter(name="jobId") + def job_id(self) -> str: + return pulumi.get(self, "job_id") + + @property + @pulumi.getter(name="jobParameters") + def job_parameters(self) -> Optional[Mapping[str, Any]]: + return pulumi.get(self, "job_parameters") + + @pulumi.output_type class GetJobJobSettingsSettingsScheduleResult(dict): def __init__(__self__, *, @@ -16878,6 +17205,7 @@ def __init__(__self__, *, pipeline_task: Optional['outputs.GetJobJobSettingsSettingsTaskPipelineTaskResult'] = None, python_wheel_task: Optional['outputs.GetJobJobSettingsSettingsTaskPythonWheelTaskResult'] = None, run_if: Optional[str] = None, + run_job_task: Optional['outputs.GetJobJobSettingsSettingsTaskRunJobTaskResult'] = None, spark_jar_task: Optional['outputs.GetJobJobSettingsSettingsTaskSparkJarTaskResult'] = None, spark_python_task: Optional['outputs.GetJobJobSettingsSettingsTaskSparkPythonTaskResult'] = None, spark_submit_task: Optional['outputs.GetJobJobSettingsSettingsTaskSparkSubmitTaskResult'] = None, @@ -16921,6 +17249,8 @@ def __init__(__self__, *, pulumi.set(__self__, "python_wheel_task", python_wheel_task) if run_if is not None: pulumi.set(__self__, "run_if", run_if) + if run_job_task is not None: + pulumi.set(__self__, "run_job_task", run_job_task) if spark_jar_task is not None: pulumi.set(__self__, "spark_jar_task", spark_jar_task) if spark_python_task is not None: @@ -17029,6 +17359,11 @@ def python_wheel_task(self) -> Optional['outputs.GetJobJobSettingsSettingsTaskPy def run_if(self) -> Optional[str]: return pulumi.get(self, "run_if") + @property + @pulumi.getter(name="runJobTask") + def run_job_task(self) -> Optional['outputs.GetJobJobSettingsSettingsTaskRunJobTaskResult']: + return pulumi.get(self, "run_job_task") + @property @pulumi.getter(name="sparkJarTask") def spark_jar_task(self) -> Optional['outputs.GetJobJobSettingsSettingsTaskSparkJarTaskResult']: @@ -17918,6 +18253,7 @@ def __init__(__self__, *, availability: Optional[str] = None, boot_disk_size: Optional[int] = None, google_service_account: Optional[str] = None, + local_ssd_count: Optional[int] = None, use_preemptible_executors: Optional[bool] = None, zone_id: Optional[str] = None): if availability is not None: @@ -17926,6 +18262,8 @@ def __init__(__self__, *, pulumi.set(__self__, "boot_disk_size", boot_disk_size) if google_service_account is not None: pulumi.set(__self__, "google_service_account", google_service_account) + if local_ssd_count is not None: + pulumi.set(__self__, "local_ssd_count", local_ssd_count) if use_preemptible_executors is not None: pulumi.set(__self__, "use_preemptible_executors", use_preemptible_executors) if zone_id is not None: @@ -17946,6 +18284,11 @@ def boot_disk_size(self) -> Optional[int]: def google_service_account(self) -> Optional[str]: return pulumi.get(self, "google_service_account") + @property + @pulumi.getter(name="localSsdCount") + def local_ssd_count(self) -> Optional[int]: + return pulumi.get(self, "local_ssd_count") + @property @pulumi.getter(name="usePreemptibleExecutors") def use_preemptible_executors(self) -> Optional[bool]: @@ -18281,6 +18624,26 @@ def parameters(self) -> Optional[Sequence[str]]: return pulumi.get(self, "parameters") +@pulumi.output_type +class GetJobJobSettingsSettingsTaskRunJobTaskResult(dict): + def __init__(__self__, *, + job_id: str, + job_parameters: Optional[Mapping[str, Any]] = None): + pulumi.set(__self__, "job_id", job_id) + if job_parameters is not None: + pulumi.set(__self__, "job_parameters", job_parameters) + + @property + @pulumi.getter(name="jobId") + def job_id(self) -> str: + return pulumi.get(self, "job_id") + + @property + @pulumi.getter(name="jobParameters") + def job_parameters(self) -> Optional[Mapping[str, Any]]: + return pulumi.get(self, "job_parameters") + + @pulumi.output_type class GetJobJobSettingsSettingsTaskSparkJarTaskResult(dict): def __init__(__self__, *,