diff --git a/bigquery/api/async_query.py b/bigquery/api/async_query.py index 4df547159cd2..b454f65aebb2 100755 --- a/bigquery/api/async_query.py +++ b/bigquery/api/async_query.py @@ -33,12 +33,12 @@ # [START async_query] def async_query(bigquery, project_id, query, batch=False, num_retries=5): - # Generate a unique job_id so retries + # Generate a unique job ID so retries # don't accidentally duplicate query job_data = { 'jobReference': { 'projectId': project_id, - 'job_id': str(uuid.uuid4()) + 'jobId': str(uuid.uuid4()) }, 'configuration': { 'query': { diff --git a/bigquery/api/export_data_to_cloud_storage.py b/bigquery/api/export_data_to_cloud_storage.py index 2bbaced7abaa..6a738184dbe1 100755 --- a/bigquery/api/export_data_to_cloud_storage.py +++ b/bigquery/api/export_data_to_cloud_storage.py @@ -54,7 +54,7 @@ def export_table(bigquery, cloud_storage_path, Returns: an extract job resource representing the job, see https://cloud.google.com/bigquery/docs/reference/v2/jobs """ - # Generate a unique job_id so retries + # Generate a unique job ID so retries # don't accidentally duplicate export job_data = { 'jobReference': { diff --git a/bigquery/api/load_data_from_csv.py b/bigquery/api/load_data_from_csv.py index 4668be10b3af..7c17e08eec55 100755 --- a/bigquery/api/load_data_from_csv.py +++ b/bigquery/api/load_data_from_csv.py @@ -50,12 +50,12 @@ def load_table(bigquery, project_id, dataset_id, table_name, source_schema, https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load """ - # Generate a unique job_id so retries + # Generate a unique job ID so retries # don't accidentally duplicate query job_data = { 'jobReference': { 'projectId': project_id, - 'job_id': str(uuid.uuid4()) + 'jobId': str(uuid.uuid4()) }, 'configuration': { 'load': {