Skip to content

Commit

Permalink
Merge branch 'master' into googleapis#6---Bigtable-read_rows-no-deadline
Browse files Browse the repository at this point in the history
  • Loading branch information
mf2199 authored May 28, 2020
2 parents 2f69b38 + 9f4068c commit 9bd6386
Show file tree
Hide file tree
Showing 8 changed files with 43 additions and 32 deletions.
10 changes: 10 additions & 0 deletions .github/CODEOWNERS
Validating CODEOWNERS rules …
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# Code owners file.
# This file controls who is tagged for review for any given pull request.
#
# For syntax help see:
# https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax


# The bigtable-dpe team is the default owner for anything not
# explicitly taken by someone else.
* @googleapis/bigtable-dpe
7 changes: 7 additions & 0 deletions docs/index.rst
Original file line number Diff line number Diff line change
@@ -1,5 +1,12 @@
.. include:: README.rst

.. note::

Because this client uses :mod:`grpcio` library, it is safe to
share instances across threads. In multiprocessing scenarios, the best
practice is to create client instances *after* the invocation of
:func:`os.fork` by :class:`multiprocessing.Pool` or
:class:`multiprocessing.Process`.

Using the API
-------------
Expand Down
12 changes: 0 additions & 12 deletions google/cloud/bigtable/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -286,18 +286,6 @@ def update(self):
before calling :meth:`update`.
:type location: :str:``CreationOnly``
:param location: The location where this cluster's nodes and storage
reside. For best performance, clients should be located as
close as possible to this cluster. Currently only zones are
supported, so values should be of the form
``projects/<project>/locations/<zone>``.
:type serve_nodes: :int
:param serve_nodes: The number of nodes allocated to this cluster.
More nodes enable higher throughput and more consistent
performance.
:rtype: :class:`Operation`
:returns: The long-running operation corresponding to the
update operation.
Expand Down
3 changes: 0 additions & 3 deletions google/cloud/bigtable/instance.py
Original file line number Diff line number Diff line change
Expand Up @@ -537,9 +537,6 @@ def cluster(
:type cluster_id: str
:param cluster_id: The ID of the cluster.
:type instance: :class:`~google.cloud.bigtable.instance.Instance`
:param instance: The instance where the cluster resides.
:type location_id: str
:param location_id: (Creation Only) The location where this cluster's
nodes and storage reside. For best performance,
Expand Down
9 changes: 3 additions & 6 deletions google/cloud/bigtable/table.py
Original file line number Diff line number Diff line change
Expand Up @@ -359,7 +359,7 @@ def create(self, initial_split_keys=[], column_families={}):
into several tablets.
:type column_families: dict
:param column_failies: (Optional) A map columns to create. The key is
:param column_families: (Optional) A map columns to create. The key is
the column_id str and the value is a
:class:`GarbageCollectionRule`
"""
Expand Down Expand Up @@ -734,8 +734,8 @@ def drop_by_prefix(self, row_key_prefix, timeout=None):
:start-after: [START bigtable_drop_by_prefix]
:end-before: [END bigtable_drop_by_prefix]
:type row_prefix: bytes
:param row_prefix: Delete all rows that start with this row key
:type row_key_prefix: bytes
:param row_key_prefix: Delete all rows that start with this row key
prefix. Prefix cannot be zero length.
:type timeout: float
Expand Down Expand Up @@ -768,9 +768,6 @@ def mutations_batcher(self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES
:start-after: [START bigtable_mutations_batcher]
:end-before: [END bigtable_mutations_batcher]
:type table: class
:param table: class:`~google.cloud.bigtable.table.Table`.
:type flush_count: int
:param flush_count: (Optional) Maximum number of rows per batch. If it
reaches the max number of rows it calls finish_batch() to
Expand Down
4 changes: 2 additions & 2 deletions noxfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def default(session):
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=97",
"--cov-fail-under=0",
os.path.join("tests", "unit"),
*session.posargs,
)
Expand Down Expand Up @@ -141,7 +141,7 @@ def docs(session):
"""Build the docs for this library."""

session.install("-e", ".")
session.install("sphinx", "alabaster", "recommonmark")
session.install("sphinx<3.0.0", "alabaster", "recommonmark")

shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
Expand Down
16 changes: 7 additions & 9 deletions synth.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,16 @@
import synthtool as s
from synthtool import gcp

gapic = gcp.GAPICGenerator()
gapic = gcp.GAPICBazel()
common = gcp.CommonTemplates()

# ----------------------------------------------------------------------------
# Generate bigtable and bigtable_admin GAPIC layer
# ----------------------------------------------------------------------------
library = gapic.py_library(
"bigtable",
"v2",
config_path="/google/bigtable/artman_bigtable.yaml",
artman_output_name="bigtable-v2",
service="bigtable",
version="v2",
bazel_target="//google/bigtable/v2:bigtable-v2-py",
include_protos=True,
)

Expand All @@ -36,10 +35,9 @@

# Generate admin client
library = gapic.py_library(
"bigtable_admin",
"v2",
config_path="/google/bigtable/admin/artman_bigtableadmin.yaml",
artman_output_name="bigtable-admin-v2",
service="bigtable_admin",
version="v2",
bazel_target="//google/bigtable/admin/v2:bigtable-admin-v2-py",
include_protos=True,
)

Expand Down
14 changes: 14 additions & 0 deletions tests/system.py
Original file line number Diff line number Diff line change
Expand Up @@ -657,6 +657,13 @@ def tearDown(self):
for table in self.tables_to_delete:
table.delete()

def _skip_if_emulated(self, message):
# NOTE: This method is necessary because ``Config.IN_EMULATOR``
# is set at runtime rather than import time, which means we
# can't use the @unittest.skipIf decorator.
if Config.IN_EMULATOR:
self.skipTest(message)

def test_list_tables(self):
# Since `Config.INSTANCE_DATA` is newly created in `setUpModule`, the
# table created in `setUpClass` here will be the only one.
Expand Down Expand Up @@ -691,6 +698,7 @@ def test_create_table(self):
self.assertEqual(sorted_tables, expected_tables)

def test_test_iam_permissions(self):
self._skip_if_emulated("Method not implemented in bigtable emulator")
temp_table_id = "test-test-iam-policy-table"
temp_table = Config.INSTANCE_DATA.table(temp_table_id)
temp_table.create()
Expand All @@ -701,6 +709,7 @@ def test_test_iam_permissions(self):
self.assertEqual(permissions, permissions_allowed)

def test_get_iam_policy(self):
self._skip_if_emulated("Method not implemented in bigtable emulator")
temp_table_id = "test-get-iam-policy-table"
temp_table = Config.INSTANCE_DATA.table(temp_table_id)
temp_table.create()
Expand All @@ -711,6 +720,7 @@ def test_get_iam_policy(self):
self.assertEqual(policy["version"], 0)

def test_set_iam_policy(self):
self._skip_if_emulated("Method not implemented in bigtable emulator")
temp_table_id = "test-set-iam-policy-table"
temp_table = Config.INSTANCE_DATA.table(temp_table_id)
temp_table.create()
Expand Down Expand Up @@ -742,6 +752,7 @@ def test_create_table_with_families(self):
self.assertEqual(retrieved_col_fam.gc_rule, gc_rule)

def test_create_table_with_split_keys(self):
self._skip_if_emulated("Split keys are not supported by Bigtable emulator")
temp_table_id = "foo-bar-baz-split-table"
initial_split_keys = [b"split_key_1", b"split_key_10", b"split_key_20"]
temp_table = Config.INSTANCE_DATA.table(temp_table_id)
Expand Down Expand Up @@ -1014,6 +1025,9 @@ def test_yield_rows_with_row_set(self):
self.assertEqual(found_row_keys, expected_row_keys)

def test_read_large_cell_limit(self):
self._maybe_emulator_skip(
"Maximum gRPC received message size for emulator is 4194304 bytes."
)
row = self._table.row(ROW_KEY)
self.rows_to_delete.append(row)

Expand Down

0 comments on commit 9bd6386

Please sign in to comment.