From 7e32dca4a6adbb6f75e9338f168f7c9d098e271f Mon Sep 17 00:00:00 2001 From: dpebot Date: Tue, 6 Nov 2018 05:14:53 -0800 Subject: [PATCH] [CHANGE ME] Re-generated dataproc to pick up changes in the API or client library generator. --- .../gapic/cluster_controller_client.py | 261 ++++++++---- .../google/cloud/dataproc_v1/gapic/enums.py | 12 +- .../gapic/job_controller_client.py | 90 +++-- .../cluster_controller_grpc_transport.py | 5 +- .../job_controller_grpc_transport.py | 13 +- .../gapic/cluster_controller_client.py | 381 +++++++++++++----- .../cloud/dataproc_v1beta2/gapic/enums.py | 12 +- .../gapic/job_controller_client.py | 103 ++--- .../cluster_controller_grpc_transport.py | 5 +- .../job_controller_grpc_transport.py | 13 +- ...orkflow_template_service_grpc_transport.py | 27 +- .../gapic/workflow_template_service_client.py | 68 ++-- ...workflow_template_service_client_config.py | 1 - 13 files changed, 657 insertions(+), 334 deletions(-) diff --git a/dataproc/google/cloud/dataproc_v1/gapic/cluster_controller_client.py b/dataproc/google/cloud/dataproc_v1/gapic/cluster_controller_client.py index 54cfef92569f..ac07b5c21294 100644 --- a/dataproc/google/cloud/dataproc_v1/gapic/cluster_controller_client.py +++ b/dataproc/google/cloud/dataproc_v1/gapic/cluster_controller_client.py @@ -146,9 +146,10 @@ def __init__(self, ) if client_info is None: - client_info = ( - google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO) - client_info.gapic_version = _GAPIC_LIBRARY_VERSION + client_info = google.api_core.gapic_v1.client_info.ClientInfo( + gapic_version=_GAPIC_LIBRARY_VERSION, ) + else: + client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info # Parse out the default settings for retry and timeout for each RPC @@ -180,13 +181,13 @@ def create_cluster(self, >>> >>> client = dataproc_v1.ClusterControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> - >>> # TODO: Initialize ``cluster``: + >>> # TODO: Initialize `cluster`: >>> cluster = {} >>> >>> response = client.create_cluster(project_id, region, cluster) @@ -205,6 +206,7 @@ def create_cluster(self, belongs to. region (str): Required. The Cloud Dataproc region in which to handle the request. cluster (Union[dict, ~google.cloud.dataproc_v1.types.Cluster]): Required. The cluster to create. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dataproc_v1.types.Cluster` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -268,19 +270,19 @@ def update_cluster(self, >>> >>> client = dataproc_v1.ClusterControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> - >>> # TODO: Initialize ``cluster_name``: + >>> # TODO: Initialize `cluster_name`: >>> cluster_name = '' >>> - >>> # TODO: Initialize ``cluster``: + >>> # TODO: Initialize `cluster`: >>> cluster = {} >>> - >>> # TODO: Initialize ``update_mask``: + >>> # TODO: Initialize `update_mask`: >>> update_mask = {} >>> >>> response = client.update_cluster(project_id, region, cluster_name, cluster, update_mask) @@ -300,51 +302,172 @@ def update_cluster(self, region (str): Required. The Cloud Dataproc region in which to handle the request. cluster_name (str): Required. The cluster name. cluster (Union[dict, ~google.cloud.dataproc_v1.types.Cluster]): Required. The changes to the cluster. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dataproc_v1.types.Cluster` - update_mask (Union[dict, ~google.cloud.dataproc_v1.types.FieldMask]): Required. Specifies the path, relative to ``Cluster``, of - the field to update. For example, to change the number of workers - in a cluster to 5, the ``update_mask`` parameter would be - specified as ``config.worker_config.num_instances``, - and the ``PATCH`` request body would specify the new value, as follows: + update_mask (Union[dict, ~google.cloud.dataproc_v1.types.FieldMask]): Required. Specifies the path, relative to ``Cluster``, of the field to + update. For example, to change the number of workers in a cluster to 5, + the ``update_mask`` parameter would be specified as + ``config.worker_config.num_instances``, and the ``PATCH`` request body + would specify the new value, as follows: :: - { - \"config\":{ - \"workerConfig\":{ - \"numInstances\":\"5\" - } - } - } + { + "config":{ + "workerConfig":{ + "numInstances":"5" + } + } + } - Similarly, to change the number of preemptible workers in a cluster to 5, - the ``update_mask`` parameter would be - ``config.secondary_worker_config.num_instances``, and the ``PATCH`` request - body would be set as follows: + Similarly, to change the number of preemptible workers in a cluster to + 5, the ``update_mask`` parameter would be + ``config.secondary_worker_config.num_instances``, and the ``PATCH`` + request body would be set as follows: :: - { - \"config\":{ - \"secondaryWorkerConfig\":{ - \"numInstances\":\"5\" - } - } - } + { + "config":{ + "secondaryWorkerConfig":{ + "numInstances":"5" + } + } + } + + Note: Currently, only the following fields can be updated: + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + - Currently, only the following fields can be updated: + .. raw:: html - * ``labels``: Update labels - * ``config.worker_config.num_instances``: Resize primary - worker group - * ``config.secondary_worker_config.num_instances``: Resize - secondary worker group + - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1.types.FieldMask` + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + +
+ + Mask + + .. raw:: html + + + + Purpose + + .. raw:: html + +
+ + labels + + .. raw:: html + + + + Update labels + + .. raw:: html - .. note:: +
+ + config.worker\_config.num\_instances + + .. raw:: html + + + + Resize primary worker group + + .. raw:: html + +
+ + config.secondary\_worker\_config.num\_instances + + .. raw:: html + + + + Resize secondary worker group + + .. raw:: html + +
+ + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.dataproc_v1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -406,13 +529,13 @@ def delete_cluster(self, >>> >>> client = dataproc_v1.ClusterControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> - >>> # TODO: Initialize ``cluster_name``: + >>> # TODO: Initialize `cluster_name`: >>> cluster_name = '' >>> >>> response = client.delete_cluster(project_id, region, cluster_name) @@ -490,13 +613,13 @@ def get_cluster(self, >>> >>> client = dataproc_v1.ClusterControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> - >>> # TODO: Initialize ``cluster_name``: + >>> # TODO: Initialize `cluster_name`: >>> cluster_name = '' >>> >>> response = client.get_cluster(project_id, region, cluster_name) @@ -559,10 +682,10 @@ def list_clusters(self, >>> >>> client = dataproc_v1.ClusterControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> >>> # Iterate over all results @@ -574,7 +697,7 @@ def list_clusters(self, >>> # Alternatively: >>> >>> # Iterate over results one page at a time - >>> for page in client.list_clusters(project_id, region, options=CallOptions(page_token=INITIAL_PAGE)): + >>> for page in client.list_clusters(project_id, region).pages: ... for element in page: ... # process element ... pass @@ -588,20 +711,21 @@ def list_clusters(self, field = value [AND [field = value]] ... - where **field** is one of ``status.state``, ``clusterName``, or ``labels.[KEY]``, - and ``[KEY]`` is a label key. **value** can be ``*`` to match all values. - ``status.state`` can be one of the following: ``ACTIVE``, ``INACTIVE``, - ``CREATING``, ``RUNNING``, ``ERROR``, ``DELETING``, or ``UPDATING``. ``ACTIVE`` - contains the ``CREATING``, ``UPDATING``, and ``RUNNING`` states. ``INACTIVE`` - contains the ``DELETING`` and ``ERROR`` states. - ``clusterName`` is the name of the cluster provided at creation time. - Only the logical ``AND`` operator is supported; space-separated items are - treated as having an implicit ``AND`` operator. + where **field** is one of ``status.state``, ``clusterName``, or + ``labels.[KEY]``, and ``[KEY]`` is a label key. **value** can be ``*`` + to match all values. ``status.state`` can be one of the following: + ``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, ``ERROR``, + ``DELETING``, or ``UPDATING``. ``ACTIVE`` contains the ``CREATING``, + ``UPDATING``, and ``RUNNING`` states. ``INACTIVE`` contains the + ``DELETING`` and ``ERROR`` states. ``clusterName`` is the name of the + cluster provided at creation time. Only the logical ``AND`` operator is + supported; space-separated items are treated as having an implicit + ``AND`` operator. Example filter: - status.state = ACTIVE AND clusterName = mycluster - AND labels.env = staging AND labels.starred = * + status.state = ACTIVE AND clusterName = mycluster AND labels.env = + staging AND labels.starred = \* page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page @@ -668,22 +792,21 @@ def diagnose_cluster(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - Gets cluster diagnostic information. - After the operation completes, the Operation.response field - contains ``DiagnoseClusterOutputLocation``. + Gets cluster diagnostic information. After the operation completes, the + Operation.response field contains ``DiagnoseClusterOutputLocation``. Example: >>> from google.cloud import dataproc_v1 >>> >>> client = dataproc_v1.ClusterControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> - >>> # TODO: Initialize ``cluster_name``: + >>> # TODO: Initialize `cluster_name`: >>> cluster_name = '' >>> >>> response = client.diagnose_cluster(project_id, region, cluster_name) diff --git a/dataproc/google/cloud/dataproc_v1/gapic/enums.py b/dataproc/google/cloud/dataproc_v1/gapic/enums.py index 0bf510f5d462..4cfcd1d34905 100644 --- a/dataproc/google/cloud/dataproc_v1/gapic/enums.py +++ b/dataproc/google/cloud/dataproc_v1/gapic/enums.py @@ -77,9 +77,9 @@ class Substate(enum.IntEnum): class LoggingConfig(object): class Level(enum.IntEnum): """ - The Log4j level for job execution. When running an - `Apache Hive `_ job, Cloud - Dataproc configures the Hive client to an equivalent verbosity level. + The Log4j level for job execution. When running an `Apache + Hive `__ job, Cloud Dataproc configures the + Hive client to an equivalent verbosity level. Attributes: LEVEL_UNSPECIFIED (int): Level is unspecified. Use default level for log4j. @@ -144,7 +144,7 @@ class Substate(enum.IntEnum): Applies to RUNNING state. QUEUED (int): The Job has been received and is awaiting execution (it may be waiting - for a condition to be met). See the \"details\" field for the reason for + for a condition to be met). See the "details" field for the reason for the delay. Applies to RUNNING state. @@ -169,7 +169,7 @@ class State(enum.IntEnum): Attributes: STATE_UNSPECIFIED (int): Status is unspecified. NEW (int): Status is NEW. - NEW_SAVING (int): Status is NEW_SAVING. + NEW_SAVING (int): Status is NEW\_SAVING. SUBMITTED (int): Status is SUBMITTED. ACCEPTED (int): Status is ACCEPTED. RUNNING (int): Status is RUNNING. @@ -196,7 +196,7 @@ class JobStateMatcher(enum.IntEnum): Attributes: ALL (int): Match all jobs, regardless of state. ACTIVE (int): Only match jobs in non-terminal states: PENDING, RUNNING, or - CANCEL_PENDING. + CANCEL\_PENDING. NON_ACTIVE (int): Only match jobs in terminal states: CANCELLED, DONE, or ERROR. """ ALL = 0 diff --git a/dataproc/google/cloud/dataproc_v1/gapic/job_controller_client.py b/dataproc/google/cloud/dataproc_v1/gapic/job_controller_client.py index 74363e17c852..728c29cf1e9b 100644 --- a/dataproc/google/cloud/dataproc_v1/gapic/job_controller_client.py +++ b/dataproc/google/cloud/dataproc_v1/gapic/job_controller_client.py @@ -143,9 +143,10 @@ def __init__(self, ) if client_info is None: - client_info = ( - google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO) - client_info.gapic_version = _GAPIC_LIBRARY_VERSION + client_info = google.api_core.gapic_v1.client_info.ClientInfo( + gapic_version=_GAPIC_LIBRARY_VERSION, ) + else: + client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info # Parse out the default settings for retry and timeout for each RPC @@ -177,13 +178,13 @@ def submit_job(self, >>> >>> client = dataproc_v1.JobControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> - >>> # TODO: Initialize ``job``: + >>> # TODO: Initialize `job`: >>> job = {} >>> >>> response = client.submit_job(project_id, region, job) @@ -193,6 +194,7 @@ def submit_job(self, belongs to. region (str): Required. The Cloud Dataproc region in which to handle the request. job (Union[dict, ~google.cloud.dataproc_v1.types.Job]): Required. The job resource. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dataproc_v1.types.Job` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -247,13 +249,13 @@ def get_job(self, >>> >>> client = dataproc_v1.JobControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> - >>> # TODO: Initialize ``job_id``: + >>> # TODO: Initialize `job_id`: >>> job_id = '' >>> >>> response = client.get_job(project_id, region, job_id) @@ -318,10 +320,10 @@ def list_jobs(self, >>> >>> client = dataproc_v1.JobControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> >>> # Iterate over all results @@ -333,7 +335,7 @@ def list_jobs(self, >>> # Alternatively: >>> >>> # Iterate over results one page at a time - >>> for page in client.list_jobs(project_id, region, options=CallOptions(page_token=INITIAL_PAGE)): + >>> for page in client.list_jobs(project_id, region).pages: ... for element in page: ... # process element ... pass @@ -349,8 +351,8 @@ def list_jobs(self, of resources in a page. cluster_name (str): Optional. If set, the returned jobs list includes only jobs that were submitted to the named cluster. - job_state_matcher (~google.cloud.dataproc_v1.types.JobStateMatcher): Optional. Specifies enumerated categories of jobs to list. - (default = match ALL jobs). + job_state_matcher (~google.cloud.dataproc_v1.types.JobStateMatcher): Optional. Specifies enumerated categories of jobs to list. (default = + match ALL jobs). If ``filter`` is provided, ``jobStateMatcher`` will be ignored. filter_ (str): Optional. A filter constraining the jobs to list. Filters are @@ -358,15 +360,15 @@ def list_jobs(self, [field = value] AND [field [= value]] ... - where **field** is ``status.state`` or ``labels.[KEY]``, and ``[KEY]`` is a label - key. **value** can be ``*`` to match all values. - ``status.state`` can be either ``ACTIVE`` or ``NON_ACTIVE``. - Only the logical ``AND`` operator is supported; space-separated items are - treated as having an implicit ``AND`` operator. + where **field** is ``status.state`` or ``labels.[KEY]``, and ``[KEY]`` + is a label key. **value** can be ``*`` to match all values. + ``status.state`` can be either ``ACTIVE`` or ``NON_ACTIVE``. Only the + logical ``AND`` operator is supported; space-separated items are treated + as having an implicit ``AND`` operator. Example filter: - status.state = ACTIVE AND labels.env = staging AND labels.starred = * + status.state = ACTIVE AND labels.env = staging AND labels.starred = \* retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -438,19 +440,19 @@ def update_job(self, >>> >>> client = dataproc_v1.JobControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> - >>> # TODO: Initialize ``job_id``: + >>> # TODO: Initialize `job_id`: >>> job_id = '' >>> - >>> # TODO: Initialize ``job``: + >>> # TODO: Initialize `job`: >>> job = {} >>> - >>> # TODO: Initialize ``update_mask``: + >>> # TODO: Initialize `update_mask`: >>> update_mask = {} >>> >>> response = client.update_job(project_id, region, job_id, job, update_mask) @@ -461,14 +463,15 @@ def update_job(self, region (str): Required. The Cloud Dataproc region in which to handle the request. job_id (str): Required. The job ID. job (Union[dict, ~google.cloud.dataproc_v1.types.Job]): Required. The changes to the job. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dataproc_v1.types.Job` - update_mask (Union[dict, ~google.cloud.dataproc_v1.types.FieldMask]): Required. Specifies the path, relative to Job, of - the field to update. For example, to update the labels of a Job the - update_mask parameter would be specified as - labels, and the ``PATCH`` request body would specify the new - value. Note: Currently, labels is the only - field that can be updated. + update_mask (Union[dict, ~google.cloud.dataproc_v1.types.FieldMask]): Required. Specifies the path, relative to Job, of the field to update. + For example, to update the labels of a Job the update\_mask parameter + would be specified as labels, and the ``PATCH`` request body would + specify the new value. Note: Currently, labels is the only field that + can be updated. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dataproc_v1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -518,23 +521,24 @@ def cancel_job(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - Starts a job cancellation request. To access the job resource - after cancellation, call - `regions/{region}/jobs.list `_ or - `regions/{region}/jobs.get `_. + Starts a job cancellation request. To access the job resource after + cancellation, call + `regions/{region}/jobs.list `__ + or + `regions/{region}/jobs.get `__. Example: >>> from google.cloud import dataproc_v1 >>> >>> client = dataproc_v1.JobControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> - >>> # TODO: Initialize ``job_id``: + >>> # TODO: Initialize `job_id`: >>> job_id = '' >>> >>> response = client.cancel_job(project_id, region, job_id) @@ -589,21 +593,21 @@ def delete_job(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - Deletes the job from the project. If the job is active, the delete fails, - and the response returns ``FAILED_PRECONDITION``. + Deletes the job from the project. If the job is active, the delete + fails, and the response returns ``FAILED_PRECONDITION``. Example: >>> from google.cloud import dataproc_v1 >>> >>> client = dataproc_v1.JobControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> - >>> # TODO: Initialize ``job_id``: + >>> # TODO: Initialize `job_id`: >>> job_id = '' >>> >>> client.delete_job(project_id, region, job_id) diff --git a/dataproc/google/cloud/dataproc_v1/gapic/transports/cluster_controller_grpc_transport.py b/dataproc/google/cloud/dataproc_v1/gapic/transports/cluster_controller_grpc_transport.py index db7724ea80b9..7cdd05a987a8 100644 --- a/dataproc/google/cloud/dataproc_v1/gapic/transports/cluster_controller_grpc_transport.py +++ b/dataproc/google/cloud/dataproc_v1/gapic/transports/cluster_controller_grpc_transport.py @@ -168,9 +168,8 @@ def list_clusters(self): def diagnose_cluster(self): """Return the gRPC stub for {$apiMethod.name}. - Gets cluster diagnostic information. - After the operation completes, the Operation.response field - contains ``DiagnoseClusterOutputLocation``. + Gets cluster diagnostic information. After the operation completes, the + Operation.response field contains ``DiagnoseClusterOutputLocation``. Returns: Callable: A callable which accepts the appropriate diff --git a/dataproc/google/cloud/dataproc_v1/gapic/transports/job_controller_grpc_transport.py b/dataproc/google/cloud/dataproc_v1/gapic/transports/job_controller_grpc_transport.py index 5bef214f950a..4985993d21e0 100644 --- a/dataproc/google/cloud/dataproc_v1/gapic/transports/job_controller_grpc_transport.py +++ b/dataproc/google/cloud/dataproc_v1/gapic/transports/job_controller_grpc_transport.py @@ -147,10 +147,11 @@ def update_job(self): def cancel_job(self): """Return the gRPC stub for {$apiMethod.name}. - Starts a job cancellation request. To access the job resource - after cancellation, call - `regions/{region}/jobs.list `_ or - `regions/{region}/jobs.get `_. + Starts a job cancellation request. To access the job resource after + cancellation, call + `regions/{region}/jobs.list `__ + or + `regions/{region}/jobs.get `__. Returns: Callable: A callable which accepts the appropriate @@ -163,8 +164,8 @@ def cancel_job(self): def delete_job(self): """Return the gRPC stub for {$apiMethod.name}. - Deletes the job from the project. If the job is active, the delete fails, - and the response returns ``FAILED_PRECONDITION``. + Deletes the job from the project. If the job is active, the delete + fails, and the response returns ``FAILED_PRECONDITION``. Returns: Callable: A callable which accepts the appropriate diff --git a/dataproc/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py b/dataproc/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py index 6db0d50f9b74..2b51643aa46e 100644 --- a/dataproc/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py +++ b/dataproc/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py @@ -147,9 +147,10 @@ def __init__(self, ) if client_info is None: - client_info = ( - google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO) - client_info.gapic_version = _GAPIC_LIBRARY_VERSION + client_info = google.api_core.gapic_v1.client_info.ClientInfo( + gapic_version=_GAPIC_LIBRARY_VERSION, ) + else: + client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info # Parse out the default settings for retry and timeout for each RPC @@ -182,13 +183,13 @@ def create_cluster(self, >>> >>> client = dataproc_v1beta2.ClusterControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> - >>> # TODO: Initialize ``cluster``: + >>> # TODO: Initialize `cluster`: >>> cluster = {} >>> >>> response = client.create_cluster(project_id, region, cluster) @@ -207,19 +208,20 @@ def create_cluster(self, belongs to. region (str): Required. The Cloud Dataproc region in which to handle the request. cluster (Union[dict, ~google.cloud.dataproc_v1beta2.types.Cluster]): Required. The cluster to create. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dataproc_v1beta2.types.Cluster` request_id (str): Optional. A unique id used to identify the request. If the server - receives two ``CreateClusterRequest`` requests with the same - id, then the second request will be ignored and the - first ``google.longrunning.Operation`` created and stored in the backend - is returned. + receives two ``CreateClusterRequest`` requests with the same id, then + the second request will be ignored and the first + ``google.longrunning.Operation`` created and stored in the backend is + returned. It is recommended to always set this value to a - `UUID `_. + `UUID `__. - The id must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 characters. + The id must contain only letters (a-z, A-Z), numbers (0-9), underscores + (\_), and hyphens (-). The maximum length is 40 characters. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -284,19 +286,19 @@ def update_cluster(self, >>> >>> client = dataproc_v1beta2.ClusterControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> - >>> # TODO: Initialize ``cluster_name``: + >>> # TODO: Initialize `cluster_name`: >>> cluster_name = '' >>> - >>> # TODO: Initialize ``cluster``: + >>> # TODO: Initialize `cluster`: >>> cluster = {} >>> - >>> # TODO: Initialize ``update_mask``: + >>> # TODO: Initialize `update_mask`: >>> update_mask = {} >>> >>> response = client.update_cluster(project_id, region, cluster_name, cluster, update_mask) @@ -316,50 +318,248 @@ def update_cluster(self, region (str): Required. The Cloud Dataproc region in which to handle the request. cluster_name (str): Required. The cluster name. cluster (Union[dict, ~google.cloud.dataproc_v1beta2.types.Cluster]): Required. The changes to the cluster. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dataproc_v1beta2.types.Cluster` - update_mask (Union[dict, ~google.cloud.dataproc_v1beta2.types.FieldMask]): Required. Specifies the path, relative to ``Cluster``, of - the field to update. For example, to change the number of workers - in a cluster to 5, the ``update_mask`` parameter would be - specified as ``config.worker_config.num_instances``, - and the ``PATCH`` request body would specify the new value, as follows: + update_mask (Union[dict, ~google.cloud.dataproc_v1beta2.types.FieldMask]): Required. Specifies the path, relative to ``Cluster``, of the field to + update. For example, to change the number of workers in a cluster to 5, + the ``update_mask`` parameter would be specified as + ``config.worker_config.num_instances``, and the ``PATCH`` request body + would specify the new value, as follows: :: - { - \"config\":{ - \"workerConfig\":{ - \"numInstances\":\"5\" - } - } - } + { + "config":{ + "workerConfig":{ + "numInstances":"5" + } + } + } - Similarly, to change the number of preemptible workers in a cluster to 5, the - ``update_mask`` parameter would be ``config.secondary_worker_config.num_instances``, - and the ``PATCH`` request body would be set as follows: + Similarly, to change the number of preemptible workers in a cluster to + 5, the ``update_mask`` parameter would be + ``config.secondary_worker_config.num_instances``, and the ``PATCH`` + request body would be set as follows: :: - { - \"config\":{ - \"secondaryWorkerConfig\":{ - \"numInstances\":\"5\" - } - } - } + { + "config":{ + "secondaryWorkerConfig":{ + "numInstances":"5" + } + } + } + + Note: currently only the following fields can be updated: + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + - * ``labels``: Update labels - * ``config.worker_config.num_instances``: Resize primary - worker group - * ``config.secondary_worker_config.num_instances``: Resize - secondary worker group + .. raw:: html - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1beta2.types.FieldMask` + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + + + + .. raw:: html + +
+ + Mask + + .. raw:: html + + + + Purpose + + .. raw:: html + +
+ + labels + + .. raw:: html + + + + Updates labels + + .. raw:: html + +
+ + config.worker\_config.num\_instances + + .. raw:: html + + + + Resize primary worker group + + .. raw:: html + +
+ + config.secondary\_worker\_config.num\_instances - .. note:: + .. raw:: html - Currently, only the following fields can be updated: + + + Resize secondary worker group + + .. raw:: html + +
+ + config.lifecycle\_config.auto\_delete\_ttl + + .. raw:: html + + + + Reset MAX TTL duration + + .. raw:: html + +
+ + config.lifecycle\_config.auto\_delete\_time + + .. raw:: html + + + + Update MAX TTL deletion timestamp + + .. raw:: html + +
+ + config.lifecycle\_config.idle\_delete\_ttl + + .. raw:: html + + + + Update Idle TTL duration + + .. raw:: html + +
+ + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.dataproc_v1beta2.types.FieldMask` graceful_decommission_timeout (Union[dict, ~google.cloud.dataproc_v1beta2.types.Duration]): Optional. Timeout for graceful YARN decomissioning. Graceful decommissioning allows removing nodes from the cluster without interrupting jobs in progress. Timeout specifies how long to wait for jobs @@ -368,19 +568,20 @@ def update_cluster(self, the maximum allowed timeout is 1 day. Only supported on Dataproc image versions 1.2 and higher. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dataproc_v1beta2.types.Duration` request_id (str): Optional. A unique id used to identify the request. If the server - receives two ``UpdateClusterRequest`` requests with the same - id, then the second request will be ignored and the - first ``google.longrunning.Operation`` created and stored in the - backend is returned. + receives two ``UpdateClusterRequest`` requests with the same id, then + the second request will be ignored and the first + ``google.longrunning.Operation`` created and stored in the backend is + returned. It is recommended to always set this value to a - `UUID `_. + `UUID `__. - The id must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 characters. + The id must contain only letters (a-z, A-Z), numbers (0-9), underscores + (\_), and hyphens (-). The maximum length is 40 characters. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -446,13 +647,13 @@ def delete_cluster(self, >>> >>> client = dataproc_v1beta2.ClusterControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> - >>> # TODO: Initialize ``cluster_name``: + >>> # TODO: Initialize `cluster_name`: >>> cluster_name = '' >>> >>> response = client.delete_cluster(project_id, region, cluster_name) @@ -472,18 +673,18 @@ def delete_cluster(self, region (str): Required. The Cloud Dataproc region in which to handle the request. cluster_name (str): Required. The cluster name. cluster_uuid (str): Optional. Specifying the ``cluster_uuid`` means the RPC should fail - (with error NOT_FOUND) if cluster with specified UUID does not exist. + (with error NOT\_FOUND) if cluster with specified UUID does not exist. request_id (str): Optional. A unique id used to identify the request. If the server - receives two ``DeleteClusterRequest`` requests with the same - id, then the second request will be ignored and the - first ``google.longrunning.Operation`` created and stored in the - backend is returned. + receives two ``DeleteClusterRequest`` requests with the same id, then + the second request will be ignored and the first + ``google.longrunning.Operation`` created and stored in the backend is + returned. It is recommended to always set this value to a - `UUID `_. + `UUID `__. - The id must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 characters. + The id must contain only letters (a-z, A-Z), numbers (0-9), underscores + (\_), and hyphens (-). The maximum length is 40 characters. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -545,13 +746,13 @@ def get_cluster(self, >>> >>> client = dataproc_v1beta2.ClusterControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> - >>> # TODO: Initialize ``cluster_name``: + >>> # TODO: Initialize `cluster_name`: >>> cluster_name = '' >>> >>> response = client.get_cluster(project_id, region, cluster_name) @@ -614,10 +815,10 @@ def list_clusters(self, >>> >>> client = dataproc_v1beta2.ClusterControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> >>> # Iterate over all results @@ -629,7 +830,7 @@ def list_clusters(self, >>> # Alternatively: >>> >>> # Iterate over results one page at a time - >>> for page in client.list_clusters(project_id, region, options=CallOptions(page_token=INITIAL_PAGE)): + >>> for page in client.list_clusters(project_id, region).pages: ... for element in page: ... # process element ... pass @@ -643,20 +844,21 @@ def list_clusters(self, field = value [AND [field = value]] ... - where **field** is one of ``status.state``, ``clusterName``, or ``labels.[KEY]``, - and ``[KEY]`` is a label key. **value** can be ``*`` to match all values. - ``status.state`` can be one of the following: ``ACTIVE``, ``INACTIVE``, - ``CREATING``, ``RUNNING``, ``ERROR``, ``DELETING``, or ``UPDATING``. ``ACTIVE`` - contains the ``CREATING``, ``UPDATING``, and ``RUNNING`` states. ``INACTIVE`` - contains the ``DELETING`` and ``ERROR`` states. - ``clusterName`` is the name of the cluster provided at creation time. - Only the logical ``AND`` operator is supported; space-separated items are - treated as having an implicit ``AND`` operator. + where **field** is one of ``status.state``, ``clusterName``, or + ``labels.[KEY]``, and ``[KEY]`` is a label key. **value** can be ``*`` + to match all values. ``status.state`` can be one of the following: + ``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, ``ERROR``, + ``DELETING``, or ``UPDATING``. ``ACTIVE`` contains the ``CREATING``, + ``UPDATING``, and ``RUNNING`` states. ``INACTIVE`` contains the + ``DELETING`` and ``ERROR`` states. ``clusterName`` is the name of the + cluster provided at creation time. Only the logical ``AND`` operator is + supported; space-separated items are treated as having an implicit + ``AND`` operator. Example filter: - status.state = ACTIVE AND clusterName = mycluster - AND labels.env = staging AND labels.starred = * + status.state = ACTIVE AND clusterName = mycluster AND labels.env = + staging AND labels.starred = \* page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page @@ -723,22 +925,21 @@ def diagnose_cluster(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - Gets cluster diagnostic information. - After the operation completes, the Operation.response field - contains ``DiagnoseClusterOutputLocation``. + Gets cluster diagnostic information. After the operation completes, the + Operation.response field contains ``DiagnoseClusterOutputLocation``. Example: >>> from google.cloud import dataproc_v1beta2 >>> >>> client = dataproc_v1beta2.ClusterControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> - >>> # TODO: Initialize ``cluster_name``: + >>> # TODO: Initialize `cluster_name`: >>> cluster_name = '' >>> >>> response = client.diagnose_cluster(project_id, region, cluster_name) diff --git a/dataproc/google/cloud/dataproc_v1beta2/gapic/enums.py b/dataproc/google/cloud/dataproc_v1beta2/gapic/enums.py index e46211adfeb7..c1a4de2b21df 100644 --- a/dataproc/google/cloud/dataproc_v1beta2/gapic/enums.py +++ b/dataproc/google/cloud/dataproc_v1beta2/gapic/enums.py @@ -62,9 +62,9 @@ class Substate(enum.IntEnum): class LoggingConfig(object): class Level(enum.IntEnum): """ - The Log4j level for job execution. When running an - `Apache Hive `_ job, Cloud - Dataproc configures the Hive client to an equivalent verbosity level. + The Log4j level for job execution. When running an `Apache + Hive `__ job, Cloud Dataproc configures the + Hive client to an equivalent verbosity level. Attributes: LEVEL_UNSPECIFIED (int): Level is unspecified. Use default level for log4j. @@ -131,7 +131,7 @@ class Substate(enum.IntEnum): Applies to RUNNING state. QUEUED (int): The Job has been received and is awaiting execution (it may be waiting - for a condition to be met). See the \"details\" field for the reason for + for a condition to be met). See the "details" field for the reason for the delay. Applies to RUNNING state. @@ -156,7 +156,7 @@ class State(enum.IntEnum): Attributes: STATE_UNSPECIFIED (int): Status is unspecified. NEW (int): Status is NEW. - NEW_SAVING (int): Status is NEW_SAVING. + NEW_SAVING (int): Status is NEW\_SAVING. SUBMITTED (int): Status is SUBMITTED. ACCEPTED (int): Status is ACCEPTED. RUNNING (int): Status is RUNNING. @@ -183,7 +183,7 @@ class JobStateMatcher(enum.IntEnum): Attributes: ALL (int): Match all jobs, regardless of state. ACTIVE (int): Only match jobs in non-terminal states: PENDING, RUNNING, or - CANCEL_PENDING. + CANCEL\_PENDING. NON_ACTIVE (int): Only match jobs in terminal states: CANCELLED, DONE, or ERROR. """ ALL = 0 diff --git a/dataproc/google/cloud/dataproc_v1beta2/gapic/job_controller_client.py b/dataproc/google/cloud/dataproc_v1beta2/gapic/job_controller_client.py index a77f4aa52fa8..0e70dfe5e4bb 100644 --- a/dataproc/google/cloud/dataproc_v1beta2/gapic/job_controller_client.py +++ b/dataproc/google/cloud/dataproc_v1beta2/gapic/job_controller_client.py @@ -144,9 +144,10 @@ def __init__(self, ) if client_info is None: - client_info = ( - google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO) - client_info.gapic_version = _GAPIC_LIBRARY_VERSION + client_info = google.api_core.gapic_v1.client_info.ClientInfo( + gapic_version=_GAPIC_LIBRARY_VERSION, ) + else: + client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info # Parse out the default settings for retry and timeout for each RPC @@ -179,13 +180,13 @@ def submit_job(self, >>> >>> client = dataproc_v1beta2.JobControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> - >>> # TODO: Initialize ``job``: + >>> # TODO: Initialize `job`: >>> job = {} >>> >>> response = client.submit_job(project_id, region, job) @@ -195,19 +196,19 @@ def submit_job(self, belongs to. region (str): Required. The Cloud Dataproc region in which to handle the request. job (Union[dict, ~google.cloud.dataproc_v1beta2.types.Job]): Required. The job resource. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dataproc_v1beta2.types.Job` request_id (str): Optional. A unique id used to identify the request. If the server - receives two ``SubmitJobRequest`` requests with the same - id, then the second request will be ignored and the - first ``Job`` created and stored in the backend - is returned. + receives two ``SubmitJobRequest`` requests with the same id, then the + second request will be ignored and the first ``Job`` created and stored + in the backend is returned. It is recommended to always set this value to a - `UUID `_. + `UUID `__. - The id must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 characters. + The id must contain only letters (a-z, A-Z), numbers (0-9), underscores + (\_), and hyphens (-). The maximum length is 40 characters. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -261,13 +262,13 @@ def get_job(self, >>> >>> client = dataproc_v1beta2.JobControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> - >>> # TODO: Initialize ``job_id``: + >>> # TODO: Initialize `job_id`: >>> job_id = '' >>> >>> response = client.get_job(project_id, region, job_id) @@ -332,10 +333,10 @@ def list_jobs(self, >>> >>> client = dataproc_v1beta2.JobControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> >>> # Iterate over all results @@ -347,7 +348,7 @@ def list_jobs(self, >>> # Alternatively: >>> >>> # Iterate over results one page at a time - >>> for page in client.list_jobs(project_id, region, options=CallOptions(page_token=INITIAL_PAGE)): + >>> for page in client.list_jobs(project_id, region).pages: ... for element in page: ... # process element ... pass @@ -363,8 +364,8 @@ def list_jobs(self, of resources in a page. cluster_name (str): Optional. If set, the returned jobs list includes only jobs that were submitted to the named cluster. - job_state_matcher (~google.cloud.dataproc_v1beta2.types.JobStateMatcher): Optional. Specifies enumerated categories of jobs to list. - (default = match ALL jobs). + job_state_matcher (~google.cloud.dataproc_v1beta2.types.JobStateMatcher): Optional. Specifies enumerated categories of jobs to list. (default = + match ALL jobs). If ``filter`` is provided, ``jobStateMatcher`` will be ignored. filter_ (str): Optional. A filter constraining the jobs to list. Filters are @@ -372,15 +373,15 @@ def list_jobs(self, [field = value] AND [field [= value]] ... - where **field** is ``status.state`` or ``labels.[KEY]``, and ``[KEY]`` is a label - key. **value** can be ``*`` to match all values. - ``status.state`` can be either ``ACTIVE`` or ``NON_ACTIVE``. - Only the logical ``AND`` operator is supported; space-separated items are - treated as having an implicit ``AND`` operator. + where **field** is ``status.state`` or ``labels.[KEY]``, and ``[KEY]`` + is a label key. **value** can be ``*`` to match all values. + ``status.state`` can be either ``ACTIVE`` or ``NON_ACTIVE``. Only the + logical ``AND`` operator is supported; space-separated items are treated + as having an implicit ``AND`` operator. Example filter: - status.state = ACTIVE AND labels.env = staging AND labels.starred = * + status.state = ACTIVE AND labels.env = staging AND labels.starred = \* retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -452,19 +453,19 @@ def update_job(self, >>> >>> client = dataproc_v1beta2.JobControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> - >>> # TODO: Initialize ``job_id``: + >>> # TODO: Initialize `job_id`: >>> job_id = '' >>> - >>> # TODO: Initialize ``job``: + >>> # TODO: Initialize `job`: >>> job = {} >>> - >>> # TODO: Initialize ``update_mask``: + >>> # TODO: Initialize `update_mask`: >>> update_mask = {} >>> >>> response = client.update_job(project_id, region, job_id, job, update_mask) @@ -475,14 +476,15 @@ def update_job(self, region (str): Required. The Cloud Dataproc region in which to handle the request. job_id (str): Required. The job ID. job (Union[dict, ~google.cloud.dataproc_v1beta2.types.Job]): Required. The changes to the job. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dataproc_v1beta2.types.Job` - update_mask (Union[dict, ~google.cloud.dataproc_v1beta2.types.FieldMask]): Required. Specifies the path, relative to Job, of - the field to update. For example, to update the labels of a Job the - update_mask parameter would be specified as - labels, and the ``PATCH`` request body would specify the new - value. Note: Currently, labels is the only - field that can be updated. + update_mask (Union[dict, ~google.cloud.dataproc_v1beta2.types.FieldMask]): Required. Specifies the path, relative to Job, of the field to update. + For example, to update the labels of a Job the update\_mask parameter + would be specified as labels, and the ``PATCH`` request body would + specify the new value. Note: Currently, labels is the only field that + can be updated. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dataproc_v1beta2.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -532,23 +534,24 @@ def cancel_job(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - Starts a job cancellation request. To access the job resource - after cancellation, call - `regions/{region}/jobs.list `_ or - `regions/{region}/jobs.get `_. + Starts a job cancellation request. To access the job resource after + cancellation, call + `regions/{region}/jobs.list `__ + or + `regions/{region}/jobs.get `__. Example: >>> from google.cloud import dataproc_v1beta2 >>> >>> client = dataproc_v1beta2.JobControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> - >>> # TODO: Initialize ``job_id``: + >>> # TODO: Initialize `job_id`: >>> job_id = '' >>> >>> response = client.cancel_job(project_id, region, job_id) @@ -603,21 +606,21 @@ def delete_job(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - Deletes the job from the project. If the job is active, the delete fails, - and the response returns ``FAILED_PRECONDITION``. + Deletes the job from the project. If the job is active, the delete + fails, and the response returns ``FAILED_PRECONDITION``. Example: >>> from google.cloud import dataproc_v1beta2 >>> >>> client = dataproc_v1beta2.JobControllerClient() >>> - >>> # TODO: Initialize ``project_id``: + >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> - >>> # TODO: Initialize ``region``: + >>> # TODO: Initialize `region`: >>> region = '' >>> - >>> # TODO: Initialize ``job_id``: + >>> # TODO: Initialize `job_id`: >>> job_id = '' >>> >>> client.delete_job(project_id, region, job_id) diff --git a/dataproc/google/cloud/dataproc_v1beta2/gapic/transports/cluster_controller_grpc_transport.py b/dataproc/google/cloud/dataproc_v1beta2/gapic/transports/cluster_controller_grpc_transport.py index 5afd61a41c12..10a2d0e3c1f7 100644 --- a/dataproc/google/cloud/dataproc_v1beta2/gapic/transports/cluster_controller_grpc_transport.py +++ b/dataproc/google/cloud/dataproc_v1beta2/gapic/transports/cluster_controller_grpc_transport.py @@ -168,9 +168,8 @@ def list_clusters(self): def diagnose_cluster(self): """Return the gRPC stub for {$apiMethod.name}. - Gets cluster diagnostic information. - After the operation completes, the Operation.response field - contains ``DiagnoseClusterOutputLocation``. + Gets cluster diagnostic information. After the operation completes, the + Operation.response field contains ``DiagnoseClusterOutputLocation``. Returns: Callable: A callable which accepts the appropriate diff --git a/dataproc/google/cloud/dataproc_v1beta2/gapic/transports/job_controller_grpc_transport.py b/dataproc/google/cloud/dataproc_v1beta2/gapic/transports/job_controller_grpc_transport.py index 931f5d0cfc22..9d2ab28d3920 100644 --- a/dataproc/google/cloud/dataproc_v1beta2/gapic/transports/job_controller_grpc_transport.py +++ b/dataproc/google/cloud/dataproc_v1beta2/gapic/transports/job_controller_grpc_transport.py @@ -147,10 +147,11 @@ def update_job(self): def cancel_job(self): """Return the gRPC stub for {$apiMethod.name}. - Starts a job cancellation request. To access the job resource - after cancellation, call - `regions/{region}/jobs.list `_ or - `regions/{region}/jobs.get `_. + Starts a job cancellation request. To access the job resource after + cancellation, call + `regions/{region}/jobs.list `__ + or + `regions/{region}/jobs.get `__. Returns: Callable: A callable which accepts the appropriate @@ -163,8 +164,8 @@ def cancel_job(self): def delete_job(self): """Return the gRPC stub for {$apiMethod.name}. - Deletes the job from the project. If the job is active, the delete fails, - and the response returns ``FAILED_PRECONDITION``. + Deletes the job from the project. If the job is active, the delete + fails, and the response returns ``FAILED_PRECONDITION``. Returns: Callable: A callable which accepts the appropriate diff --git a/dataproc/google/cloud/dataproc_v1beta2/gapic/transports/workflow_template_service_grpc_transport.py b/dataproc/google/cloud/dataproc_v1beta2/gapic/transports/workflow_template_service_grpc_transport.py index 9a742c02e336..e3da3a473cba 100644 --- a/dataproc/google/cloud/dataproc_v1beta2/gapic/transports/workflow_template_service_grpc_transport.py +++ b/dataproc/google/cloud/dataproc_v1beta2/gapic/transports/workflow_template_service_grpc_transport.py @@ -136,22 +136,17 @@ def instantiate_workflow_template(self): Instantiates a template and begins execution. - The returned Operation can be used to track execution of - workflow by polling - ``operations.get``. - The Operation will complete when entire workflow is finished. - - The running workflow can be aborted via - ``operations.cancel``. - This will cause any inflight jobs to be cancelled and workflow-owned - clusters to be deleted. - - The ``Operation.metadata`` will be - ``WorkflowMetadata``. - - On successful completion, - ``Operation.response`` will be - ``Empty``. + The returned Operation can be used to track execution of workflow by + polling ``operations.get``. The Operation will complete when entire + workflow is finished. + + The running workflow can be aborted via ``operations.cancel``. This will + cause any inflight jobs to be cancelled and workflow-owned clusters to + be deleted. + + The ``Operation.metadata`` will be ``WorkflowMetadata``. + + On successful completion, ``Operation.response`` will be ``Empty``. Returns: Callable: A callable which accepts the appropriate diff --git a/dataproc/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client.py b/dataproc/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client.py index 4e87225b0030..4cb746a65464 100644 --- a/dataproc/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client.py +++ b/dataproc/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client.py @@ -171,9 +171,10 @@ def __init__(self, ) if client_info is None: - client_info = ( - google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO) - client_info.gapic_version = _GAPIC_LIBRARY_VERSION + client_info = google.api_core.gapic_v1.client_info.ClientInfo( + gapic_version=_GAPIC_LIBRARY_VERSION, ) + else: + client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info # Parse out the default settings for retry and timeout for each RPC @@ -207,16 +208,17 @@ def create_workflow_template( >>> >>> parent = client.region_path('[PROJECT]', '[REGION]') >>> - >>> # TODO: Initialize ``template``: + >>> # TODO: Initialize `template`: >>> template = {} >>> >>> response = client.create_workflow_template(parent, template) Args: - parent (str): Required. The \"resource name\" of the region, as described - in https://cloud.google.com/apis/design/resource_names of the form + parent (str): Required. The "resource name" of the region, as described in + https://cloud.google.com/apis/design/resource\_names of the form ``projects/{project_id}/regions/{region}`` template (Union[dict, ~google.cloud.dataproc_v1beta2.types.WorkflowTemplate]): Required. The Dataproc workflow template to create. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -279,8 +281,8 @@ def get_workflow_template(self, >>> response = client.get_workflow_template(name) Args: - name (str): Required. The \"resource name\" of the workflow template, as described - in https://cloud.google.com/apis/design/resource_names of the form + name (str): Required. The "resource name" of the workflow template, as described in + https://cloud.google.com/apis/design/resource\_names of the form ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` version (int): Optional. The version of workflow template to retrieve. Only previously instatiated versions can be retrieved. @@ -335,22 +337,17 @@ def instantiate_workflow_template( """ Instantiates a template and begins execution. - The returned Operation can be used to track execution of - workflow by polling - ``operations.get``. - The Operation will complete when entire workflow is finished. + The returned Operation can be used to track execution of workflow by + polling ``operations.get``. The Operation will complete when entire + workflow is finished. - The running workflow can be aborted via - ``operations.cancel``. - This will cause any inflight jobs to be cancelled and workflow-owned - clusters to be deleted. + The running workflow can be aborted via ``operations.cancel``. This will + cause any inflight jobs to be cancelled and workflow-owned clusters to + be deleted. - The ``Operation.metadata`` will be - ``WorkflowMetadata``. + The ``Operation.metadata`` will be ``WorkflowMetadata``. - On successful completion, - ``Operation.response`` will be - ``Empty``. + On successful completion, ``Operation.response`` will be ``Empty``. Example: >>> from google.cloud import dataproc_v1beta2 @@ -371,8 +368,8 @@ def instantiate_workflow_template( >>> metadata = response.metadata() Args: - name (str): Required. The \"resource name\" of the workflow template, as described - in https://cloud.google.com/apis/design/resource_names of the form + name (str): Required. The "resource name" of the workflow template, as described in + https://cloud.google.com/apis/design/resource\_names of the form ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` version (int): Optional. The version of workflow template to instantiate. If specified, the workflow will be instantiated only if the current version of @@ -380,15 +377,15 @@ def instantiate_workflow_template( This option cannot be used to instantiate a previous version of workflow template. - instance_id (str): Optional. A tag that prevents multiple concurrent workflow - instances with the same tag from running. This mitigates risk of - concurrent instances started due to retries. + instance_id (str): Optional. A tag that prevents multiple concurrent workflow instances + with the same tag from running. This mitigates risk of concurrent + instances started due to retries. It is recommended to always set this value to a - `UUID `_. + `UUID `__. - The tag must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 characters. + The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores + (\_), and hyphens (-). The maximum length is 40 characters. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -449,7 +446,7 @@ def update_workflow_template( >>> >>> client = dataproc_v1beta2.WorkflowTemplateServiceClient() >>> - >>> # TODO: Initialize ``template``: + >>> # TODO: Initialize `template`: >>> template = {} >>> >>> response = client.update_workflow_template(template) @@ -458,6 +455,7 @@ def update_workflow_template( template (Union[dict, ~google.cloud.dataproc_v1beta2.types.WorkflowTemplate]): Required. The updated workflow template. The ``template.version`` field must match the current version. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -522,14 +520,14 @@ def list_workflow_templates( >>> # Alternatively: >>> >>> # Iterate over results one page at a time - >>> for page in client.list_workflow_templates(parent, options=CallOptions(page_token=INITIAL_PAGE)): + >>> for page in client.list_workflow_templates(parent).pages: ... for element in page: ... # process element ... pass Args: - parent (str): Required. The \"resource name\" of the region, as described - in https://cloud.google.com/apis/design/resource_names of the form + parent (str): Required. The "resource name" of the region, as described in + https://cloud.google.com/apis/design/resource\_names of the form ``projects/{project_id}/regions/{region}`` page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- @@ -608,8 +606,8 @@ def delete_workflow_template( >>> client.delete_workflow_template(name) Args: - name (str): Required. The \"resource name\" of the workflow template, as described - in https://cloud.google.com/apis/design/resource_names of the form + name (str): Required. The "resource name" of the workflow template, as described in + https://cloud.google.com/apis/design/resource\_names of the form ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` version (int): Optional. The version of workflow template to delete. If specified, will only delete the template if the current server version matches diff --git a/dataproc/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client_config.py b/dataproc/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client_config.py index c16064698bdb..c6d882c2f269 100644 --- a/dataproc/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client_config.py +++ b/dataproc/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client_config.py @@ -3,7 +3,6 @@ "google.cloud.dataproc.v1beta2.WorkflowTemplateService": { "retry_codes": { "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "no_retry": [], "non_idempotent": [] }, "retry_params": {