diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000..e43d91c0b --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,10 @@ +# Code owners file. +# This file controls who is tagged for review for any given pull request. +# +# For syntax help see: +# https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax + + +# The bigtable-dpe team is the default owner for anything not +# explicitly taken by someone else. +* @googleapis/bigtable-dpe diff --git a/docs/index.rst b/docs/index.rst index 8c76f79b8..88d8e09ec 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,5 +1,12 @@ .. include:: README.rst +.. note:: + + Because this client uses :mod:`grpcio` library, it is safe to + share instances across threads. In multiprocessing scenarios, the best + practice is to create client instances *after* the invocation of + :func:`os.fork` by :class:`multiprocessing.Pool` or + :class:`multiprocessing.Process`. Using the API ------------- diff --git a/google/cloud/bigtable/cluster.py b/google/cloud/bigtable/cluster.py index 5ff1d0404..edb5d261b 100644 --- a/google/cloud/bigtable/cluster.py +++ b/google/cloud/bigtable/cluster.py @@ -286,18 +286,6 @@ def update(self): before calling :meth:`update`. - :type location: :str:``CreationOnly`` - :param location: The location where this cluster's nodes and storage - reside. For best performance, clients should be located as - close as possible to this cluster. Currently only zones are - supported, so values should be of the form - ``projects//locations/``. - - :type serve_nodes: :int - :param serve_nodes: The number of nodes allocated to this cluster. - More nodes enable higher throughput and more consistent - performance. - :rtype: :class:`Operation` :returns: The long-running operation corresponding to the update operation. diff --git a/google/cloud/bigtable/instance.py b/google/cloud/bigtable/instance.py index dbdd20640..e0a30590b 100644 --- a/google/cloud/bigtable/instance.py +++ b/google/cloud/bigtable/instance.py @@ -537,9 +537,6 @@ def cluster( :type cluster_id: str :param cluster_id: The ID of the cluster. - :type instance: :class:`~google.cloud.bigtable.instance.Instance` - :param instance: The instance where the cluster resides. - :type location_id: str :param location_id: (Creation Only) The location where this cluster's nodes and storage reside. For best performance, diff --git a/google/cloud/bigtable/table.py b/google/cloud/bigtable/table.py index 69379b21d..4852ff6e1 100644 --- a/google/cloud/bigtable/table.py +++ b/google/cloud/bigtable/table.py @@ -359,7 +359,7 @@ def create(self, initial_split_keys=[], column_families={}): into several tablets. :type column_families: dict - :param column_failies: (Optional) A map columns to create. The key is + :param column_families: (Optional) A map columns to create. The key is the column_id str and the value is a :class:`GarbageCollectionRule` """ @@ -734,8 +734,8 @@ def drop_by_prefix(self, row_key_prefix, timeout=None): :start-after: [START bigtable_drop_by_prefix] :end-before: [END bigtable_drop_by_prefix] - :type row_prefix: bytes - :param row_prefix: Delete all rows that start with this row key + :type row_key_prefix: bytes + :param row_key_prefix: Delete all rows that start with this row key prefix. Prefix cannot be zero length. :type timeout: float @@ -768,9 +768,6 @@ def mutations_batcher(self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES :start-after: [START bigtable_mutations_batcher] :end-before: [END bigtable_mutations_batcher] - :type table: class - :param table: class:`~google.cloud.bigtable.table.Table`. - :type flush_count: int :param flush_count: (Optional) Maximum number of rows per batch. If it reaches the max number of rows it calls finish_batch() to diff --git a/noxfile.py b/noxfile.py index 915591589..1065894e6 100644 --- a/noxfile.py +++ b/noxfile.py @@ -79,7 +79,7 @@ def default(session): "--cov-append", "--cov-config=.coveragerc", "--cov-report=", - "--cov-fail-under=97", + "--cov-fail-under=0", os.path.join("tests", "unit"), *session.posargs, ) @@ -141,7 +141,7 @@ def docs(session): """Build the docs for this library.""" session.install("-e", ".") - session.install("sphinx", "alabaster", "recommonmark") + session.install("sphinx<3.0.0", "alabaster", "recommonmark") shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( diff --git a/synth.py b/synth.py index 32ebc4af2..22499ee05 100644 --- a/synth.py +++ b/synth.py @@ -17,17 +17,16 @@ import synthtool as s from synthtool import gcp -gapic = gcp.GAPICGenerator() +gapic = gcp.GAPICBazel() common = gcp.CommonTemplates() # ---------------------------------------------------------------------------- # Generate bigtable and bigtable_admin GAPIC layer # ---------------------------------------------------------------------------- library = gapic.py_library( - "bigtable", - "v2", - config_path="/google/bigtable/artman_bigtable.yaml", - artman_output_name="bigtable-v2", + service="bigtable", + version="v2", + bazel_target="//google/bigtable/v2:bigtable-v2-py", include_protos=True, ) @@ -36,10 +35,9 @@ # Generate admin client library = gapic.py_library( - "bigtable_admin", - "v2", - config_path="/google/bigtable/admin/artman_bigtableadmin.yaml", - artman_output_name="bigtable-admin-v2", + service="bigtable_admin", + version="v2", + bazel_target="//google/bigtable/admin/v2:bigtable-admin-v2-py", include_protos=True, ) diff --git a/tests/system.py b/tests/system.py index e9e3ab791..dd77dd936 100644 --- a/tests/system.py +++ b/tests/system.py @@ -657,6 +657,13 @@ def tearDown(self): for table in self.tables_to_delete: table.delete() + def _skip_if_emulated(self, message): + # NOTE: This method is necessary because ``Config.IN_EMULATOR`` + # is set at runtime rather than import time, which means we + # can't use the @unittest.skipIf decorator. + if Config.IN_EMULATOR: + self.skipTest(message) + def test_list_tables(self): # Since `Config.INSTANCE_DATA` is newly created in `setUpModule`, the # table created in `setUpClass` here will be the only one. @@ -691,6 +698,7 @@ def test_create_table(self): self.assertEqual(sorted_tables, expected_tables) def test_test_iam_permissions(self): + self._skip_if_emulated("Method not implemented in bigtable emulator") temp_table_id = "test-test-iam-policy-table" temp_table = Config.INSTANCE_DATA.table(temp_table_id) temp_table.create() @@ -701,6 +709,7 @@ def test_test_iam_permissions(self): self.assertEqual(permissions, permissions_allowed) def test_get_iam_policy(self): + self._skip_if_emulated("Method not implemented in bigtable emulator") temp_table_id = "test-get-iam-policy-table" temp_table = Config.INSTANCE_DATA.table(temp_table_id) temp_table.create() @@ -711,6 +720,7 @@ def test_get_iam_policy(self): self.assertEqual(policy["version"], 0) def test_set_iam_policy(self): + self._skip_if_emulated("Method not implemented in bigtable emulator") temp_table_id = "test-set-iam-policy-table" temp_table = Config.INSTANCE_DATA.table(temp_table_id) temp_table.create() @@ -742,6 +752,7 @@ def test_create_table_with_families(self): self.assertEqual(retrieved_col_fam.gc_rule, gc_rule) def test_create_table_with_split_keys(self): + self._skip_if_emulated("Split keys are not supported by Bigtable emulator") temp_table_id = "foo-bar-baz-split-table" initial_split_keys = [b"split_key_1", b"split_key_10", b"split_key_20"] temp_table = Config.INSTANCE_DATA.table(temp_table_id) @@ -1014,6 +1025,9 @@ def test_yield_rows_with_row_set(self): self.assertEqual(found_row_keys, expected_row_keys) def test_read_large_cell_limit(self): + self._maybe_emulator_skip( + "Maximum gRPC received message size for emulator is 4194304 bytes." + ) row = self._table.row(ROW_KEY) self.rows_to_delete.append(row)