Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove unused BigQuery append samples #6100

Merged
merged 1 commit into from
Sep 25, 2018
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
79 changes: 0 additions & 79 deletions bigquery/docs/snippets.py
Original file line number Diff line number Diff line change
Expand Up @@ -1512,85 +1512,6 @@ def test_load_table_from_uri_autodetect(client, to_delete, capsys):
assert 'Loaded 50 rows.' in out


def test_load_table_from_uri_append(client, to_delete, capsys):
"""Appends data to a table from a GCS URI using various formats

Each file format has its own tested load from URI sample. Because most of
the code is common for autodetect, append, and truncate, this sample
includes snippets for all supported formats but only calls a single load
job.

This code snippet is made up of shared code, then format-specific code,
followed by more shared code. Note that only the last format in the
format-specific code section will be tested in this test.
"""
dataset_id = 'load_table_dataset_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)

job_config = bigquery.LoadJobConfig()
job_config.schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING')
]
table_ref = dataset.table('us_states')
body = six.BytesIO(b'Washington,WA')
client.load_table_from_file(
body, table_ref, job_config=job_config).result()

# SHared code
# [START bigquery_load_table_gcs_csv_append]
# [START bigquery_load_table_gcs_json_append]
# from google.cloud import bigquery
# client = bigquery.Client()
# table_ref = client.dataset('my_dataset').table('existing_table')

previous_rows = client.get_table(table_ref).num_rows
assert previous_rows > 0

job_config = bigquery.LoadJobConfig()
job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND
# [END bigquery_load_table_gcs_csv_append]
# [END bigquery_load_table_gcs_json_append]

# Format-specific code
# [START bigquery_load_table_gcs_csv_append]
job_config.skip_leading_rows = 1
# The source format defaults to CSV, so the line below is optional.
job_config.source_format = bigquery.SourceFormat.CSV
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.csv'
# [END bigquery_load_table_gcs_csv_append]
# unset csv-specific attribute
del job_config._properties['load']['skipLeadingRows']

# [START bigquery_load_table_gcs_json_append]
job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.json'
# [END bigquery_load_table_gcs_json_append]

# Shared code
# [START bigquery_load_table_gcs_csv_append]
# [START bigquery_load_table_gcs_json_append]
load_job = client.load_table_from_uri(
uri,
table_ref,
job_config=job_config) # API request
print('Starting job {}'.format(load_job.job_id))

load_job.result() # Waits for table load to complete.
print('Job finished.')

destination_table = client.get_table(table_ref)
print('Loaded {} rows.'.format(destination_table.num_rows - previous_rows))
# [END bigquery_load_table_gcs_csv_append]
# [END bigquery_load_table_gcs_json_append]

out, _ = capsys.readouterr()
assert previous_rows == 1
assert 'Loaded 50 rows.' in out


def test_load_table_from_uri_truncate(client, to_delete, capsys):
"""Replaces table data with data from a GCS URI using various formats

Expand Down