diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml
deleted file mode 100644
index 9a7846675..000000000
--- a/.github/.OwlBot.lock.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright 2025 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-docker:
- image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest
- digest: sha256:4a9e5d44b98e8672e2037ee22bc6b4f8e844a2d75fcb78ea8a4b38510112abc6
-# created: 2025-10-07
diff --git a/.github/.OwlBot.yaml b/.github/.OwlBot.yaml
deleted file mode 100644
index fe2f7841a..000000000
--- a/.github/.OwlBot.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2021 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-docker:
- image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest
-
-deep-remove-regex:
- - /owl-bot-staging
-
-deep-copy-regex:
- - source: /google/bigtable/admin/(v.*)/.*-py/(.*)
- dest: /owl-bot-staging/bigtable_admin/$1/$2
- - source: /google/bigtable/(v.*)/.*-py/(.*)
- dest: /owl-bot-staging/bigtable/$1/$2
-
-begin-after-commit-hash: a21f1091413a260393548c1b2ac44b7347923f08
-
diff --git a/.github/auto-approve.yml b/.github/auto-approve.yml
deleted file mode 100644
index 311ebbb85..000000000
--- a/.github/auto-approve.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-# https://github.com/googleapis/repo-automation-bots/tree/main/packages/auto-approve
-processes:
- - "OwlBotTemplateChanges"
diff --git a/.github/release-please.yml b/.github/release-please.yml
deleted file mode 100644
index 593e83f9f..000000000
--- a/.github/release-please.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-releaseType: python
-handleGHRelease: true
-# NOTE: this section is generated by synthtool.languages.python
-# See https://github.com/googleapis/synthtool/blob/master/synthtool/languages/python.py
-manifest: true
-branches:
-- branch: v1
- handleGHRelease: true
- releaseType: python
-- branch: v0
- handleGHRelease: true
- releaseType: python
diff --git a/.github/release-trigger.yml b/.github/release-trigger.yml
deleted file mode 100644
index 0bbdd8e4c..000000000
--- a/.github/release-trigger.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-enabled: true
-multiScmName: python-bigtable
diff --git a/.github/sync-repo-settings.yaml b/.github/sync-repo-settings.yaml
deleted file mode 100644
index 14e32d6fc..000000000
--- a/.github/sync-repo-settings.yaml
+++ /dev/null
@@ -1,58 +0,0 @@
-# Whether or not rebase-merging is enabled on this repository.
-# Defaults to `true`
-rebaseMergeAllowed: true
-
-# Whether or not squash-merging is enabled on this repository.
-# Defaults to `true`
-squashMergeAllowed: true
-
-# Whether or not PRs are merged with a merge commit on this repository.
-# Defaults to `false`
-mergeCommitAllowed: false
-
-# Rules for main branch protection
-branchProtectionRules:
-# Identifies the protection rule pattern. Name of the branch to be protected.
-# Defaults to `main`
-- pattern: main
- # Can admins overwrite branch protection.
- # Defaults to `true`
- isAdminEnforced: true
- # Number of approving reviews required to update matching branches.
- # Defaults to `1`
- requiredApprovingReviewCount: 1
- # Are reviews from code owners required to update matching branches.
- # Defaults to `false`
- requiresCodeOwnerReviews: true
- # Require up to date branches
- requiresStrictStatusChecks: false
- # List of required status check contexts that must pass for commits to be accepted to matching branches.
- requiredStatusCheckContexts:
- - 'Kokoro'
- - 'Kokoro system'
- - 'cla/google'
- - 'OwlBot Post Processor'
- - 'lint'
- - 'mypy'
- - 'docs'
- - 'docfx'
- - 'unit-3.9'
- - 'unit-3.10'
- - 'unit-3.11'
- - 'unit-3.12'
- - 'unit-3.13'
- - 'unit-3.14'
-# List of explicit permissions to add (additive only)
-permissionRules:
- # Team slug to add to repository permissions
- - team: yoshi-admins
- # Access level required, one of push|pull|admin|maintain|triage
- permission: admin
- # Team slug to add to repository permissions
- - team: yoshi-python-admins
- # Access level required, one of push|pull|admin|maintain|triage
- permission: admin
- # Team slug to add to repository permissions
- - team: yoshi-python
- # Access level required, one of push|pull|admin|maintain|triage
- permission: push
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index 4866193af..9a0598202 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -12,7 +12,7 @@ jobs:
- name: Setup Python
uses: actions/setup-python@v5
with:
- python-version: "3.8"
+ python-version: "3.13"
- name: Install nox
run: |
python -m pip install --upgrade setuptools pip wheel
diff --git a/.github/workflows/unittest.yml b/.github/workflows/unittest.yml
index d59bbb1b8..dad646c6b 100644
--- a/.github/workflows/unittest.yml
+++ b/.github/workflows/unittest.yml
@@ -45,7 +45,7 @@ jobs:
- name: Setup Python
uses: actions/setup-python@v5
with:
- python-version: "3.8"
+ python-version: "3.13"
- name: Install coverage
run: |
python -m pip install --upgrade setuptools pip wheel
diff --git a/.librarian/generator-input/.repo-metadata.json b/.librarian/generator-input/.repo-metadata.json
new file mode 100644
index 000000000..9de4b5f92
--- /dev/null
+++ b/.librarian/generator-input/.repo-metadata.json
@@ -0,0 +1,80 @@
+{
+ "name": "bigtable",
+ "name_pretty": "Cloud Bigtable",
+ "product_documentation": "https://cloud.google.com/bigtable",
+ "client_documentation": "https://cloud.google.com/python/docs/reference/bigtable/latest",
+ "issue_tracker": "https://issuetracker.google.com/savedsearches/559777",
+ "release_level": "stable",
+ "language": "python",
+ "library_type": "GAPIC_COMBO",
+ "repo": "googleapis/python-bigtable",
+ "distribution_name": "google-cloud-bigtable",
+ "api_id": "bigtable.googleapis.com",
+ "requires_billing": true,
+ "samples": [
+ {
+ "name": "Hello World in Cloud Bigtable",
+ "description": "Demonstrates how to connect to Cloud Bigtable and run some basic operations. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello",
+ "file": "main.py",
+ "runnable": true,
+ "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
+ "override_path": "hello"
+ },
+ {
+ "name": "Hello World using HappyBase",
+ "description": "This sample demonstrates using the Google Cloud Client Library HappyBase package, an implementation of the HappyBase API to connect to and interact with Cloud Bigtable. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello-happybase",
+ "file": "main.py",
+ "runnable": true,
+ "custom_content": "usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
+ "override_path": "hello_happybase"
+ },
+ {
+ "name": "cbt Command Demonstration",
+ "description": "This page explains how to use the cbt command to connect to a Cloud Bigtable instance, perform basic administrative tasks, and read and write data in a table. More information about this quickstart is available at https://cloud.google.com/bigtable/docs/quickstart-cbt",
+ "file": "instanceadmin.py",
+ "runnable": true,
+ "custom_content": "usage: instanceadmin.py [-h] [run] [dev-instance] [del-instance] [add-cluster] [del-cluster] project_id instance_id cluster_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
+ "override_path": "instanceadmin"
+ },
+ {
+ "name": "Metric Scaler",
+ "description": "This sample demonstrates how to use Stackdriver Monitoring to scale Cloud Bigtable based on CPU usage.",
+ "file": "metricscaler.py",
+ "runnable": true,
+ "custom_content": "usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD] [--low_cpu_threshold LOW_CPU_THRESHOLD] [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP] bigtable_instance bigtable_cluster
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD]
[--low_cpu_threshold LOW_CPU_THRESHOLD]
[--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP]
bigtable_instance bigtable_cluster
Scales Cloud Bigtable clusters based on CPU usage.
positional arguments:
bigtable_instance ID of the Cloud Bigtable instance to connect to.
bigtable_cluster ID of the Cloud Bigtable cluster to connect to.
optional arguments:
-h, --help show this help message and exit
--high_cpu_threshold HIGH_CPU_THRESHOLD
If Cloud Bigtable CPU usage is above this threshold,
scale up
--low_cpu_threshold LOW_CPU_THRESHOLD
If Cloud Bigtable CPU usage is below this threshold,
scale down
--short_sleep SHORT_SLEEP
How long to sleep in seconds between checking metrics
after no scale operation
--long_sleep LONG_SLEEP
How long to sleep in seconds between checking metrics
after a scaling operation
",
+ "override_path": "metricscaler"
+ },
+ {
+ "name": "Quickstart",
+ "description": "Demonstrates of Cloud Bigtable. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.",
+ "file": "main.py",
+ "runnable": true,
+ "custom_content": "usage: main.py [-h] [--table TABLE] project_id instance_id
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Existing table used in the quickstart. (default: my-table)
",
+ "override_path": "quickstart"
+ },
+ {
+ "name": "Quickstart using HappyBase",
+ "description": "Demonstrates of Cloud Bigtable using HappyBase. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.",
+ "file": "main.py",
+ "runnable": true,
+ "custom_content": "usage: main.py [-h] [--table TABLE] project_id instance_id
usage: main.py [-h] [--table TABLE] project_id instance_id
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Existing table used in the quickstart. (default: my-table)usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
+ "override_path": "tableadmin"
+ }
+ ],
+ "default_version": "v2",
+ "codeowner_team": "@googleapis/api-bigtable @googleapis/api-bigtable-partners",
+ "api_shortname": "bigtable"
+}
diff --git a/owlbot.py b/.librarian/generator-input/librarian.py
similarity index 73%
rename from owlbot.py
rename to .librarian/generator-input/librarian.py
index b6b741b54..5b943d24b 100644
--- a/owlbot.py
+++ b/.librarian/generator-input/librarian.py
@@ -26,51 +26,6 @@
common = gcp.CommonTemplates()
-# This is a customized version of the s.get_staging_dirs() function from synthtool to
-# cater for copying 2 different folders from googleapis-gen
-# which are bigtable and bigtable/admin.
-# Source https://github.com/googleapis/synthtool/blob/master/synthtool/transforms.py#L280
-def get_staging_dirs(
- default_version: Optional[str] = None, sub_directory: Optional[str] = None
-) -> List[Path]:
- """Returns the list of directories, one per version, copied from
- https://github.com/googleapis/googleapis-gen. Will return in lexical sorting
- order with the exception of the default_version which will be last (if specified).
-
- Args:
- default_version (str): the default version of the API. The directory for this version
- will be the last item in the returned list if specified.
- sub_directory (str): if a `sub_directory` is provided, only the directories within the
- specified `sub_directory` will be returned.
-
- Returns: the empty list if no file were copied.
- """
-
- staging = Path("owl-bot-staging")
-
- if sub_directory:
- staging /= sub_directory
-
- if staging.is_dir():
- # Collect the subdirectories of the staging directory.
- versions = [v.name for v in staging.iterdir() if v.is_dir()]
- # Reorder the versions so the default version always comes last.
- versions = [v for v in versions if v != default_version]
- versions.sort()
- if default_version is not None:
- versions += [default_version]
- dirs = [staging / v for v in versions]
- for dir in dirs:
- s._tracked_paths.add(dir)
- return dirs
- else:
- return []
-
-# This library ships clients for two different APIs,
-# BigTable and BigTable Admin
-bigtable_default_version = "v2"
-bigtable_admin_default_version = "v2"
-
# These flags are needed because certain post-processing operations
# append things after a certain line of text, and can infinitely loop
# in a Github PR. We use these flags to only do those operations
@@ -80,16 +35,12 @@ def get_staging_dirs(
is_fresh_admin_v2_copy = False
is_fresh_admin_docs_copy = False
-for library in get_staging_dirs(bigtable_default_version, "bigtable"):
- s.move(library / "google/cloud/bigtable_v2", excludes=["**/gapic_version.py"])
- s.move(library / "tests")
- s.move(library / "scripts")
-
-for library in get_staging_dirs(bigtable_admin_default_version, "bigtable_admin"):
+for library in s.get_staging_dirs("v2"):
+ s.move(library / "google/cloud/bigtable_v2")
is_fresh_admin_copy = \
- s.move(library / "google/cloud/bigtable_admin", excludes=["**/gapic_version.py"])
+ s.move(library / "google/cloud/bigtable_admin")
is_fresh_admin_v2_copy = \
- s.move(library / "google/cloud/bigtable_admin_v2", excludes=["**/gapic_version.py"])
+ s.move(library / "google/cloud/bigtable_admin_v2")
s.move(library / "tests")
s.move(library / "samples")
s.move(library / "scripts")
@@ -111,10 +62,13 @@ def get_staging_dirs(
],
system_test_python_versions=["3.9"],
unit_test_python_versions=["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13", "3.14"],
+ default_python_version="3.13",
)
-s.move(templated_files, excludes=[".coveragerc", "README.rst", ".github/release-please.yml", "noxfile.py", "renovate.json"])
+s.move(templated_files, excludes=[".coveragerc", "README.rst", ".github/**", ".kokoro/**", "noxfile.py", "renovate.json"])
+
+s.shell.run(["nox", "-s", "blacken"], hide_output=False)
# ----------------------------------------------------------------------------
# Always supply app_profile_id in routing headers: https://github.com/googleapis/python-bigtable/pull/1109
@@ -130,16 +84,19 @@ def get_staging_dirs(
s.replace(
"tests/unit/gapic/bigtable_v2/test_bigtable.py",
'assert \(\n\s*gapic_v1\.routing_header\.to_grpc_metadata\(expected_headers\) in kw\["metadata"\]\n.*',
- """
- # assert the expected headers are present, in any order
- routing_string = next(iter([m[1] for m in kw["metadata"] if m[0] == 'x-goog-request-params']))
- assert all([f"{k}={v}" in routing_string for k,v in expected_headers.items()])
- """
+ """# assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
+ )
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])"""
)
s.replace(
"tests/unit/gapic/bigtable_v2/test_bigtable.py",
'expected_headers = {"name": "projects/sample1/instances/sample2"}',
- 'expected_headers = {"name": "projects/sample1/instances/sample2", "app_profile_id": ""}'
+ """expected_headers = {
+ "name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
+ }"""
)
s.replace(
"tests/unit/gapic/bigtable_v2/test_bigtable.py",
@@ -147,13 +104,13 @@ def get_staging_dirs(
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3"
}
- """,
+""",
"""
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
- "app_profile_id": ""
+ "app_profile_id": "",
}
- """
+"""
)
# ----------------------------------------------------------------------------
@@ -162,15 +119,6 @@ def get_staging_dirs(
python.py_samples(skip_readmes=True)
-s.replace(
- "samples/beam/noxfile.py",
- """INSTALL_LIBRARY_FROM_SOURCE \= os.environ.get\("INSTALL_LIBRARY_FROM_SOURCE", False\) in \(
- "True",
- "true",
-\)""",
- """# todo(kolea2): temporary workaround to install pinned dep version
-INSTALL_LIBRARY_FROM_SOURCE = False""")
-
# --------------------------------------------------------------------------
# Admin Overlay work
# --------------------------------------------------------------------------
@@ -188,9 +136,8 @@ def add_overlay_to_init_py(init_py_location, import_statements, should_add):
add_overlay_to_init_py(
"google/cloud/bigtable_admin_v2/__init__.py",
- """from .overlay import * # noqa: F403
-__all__ += overlay.__all__ # noqa: F405
-""",
+ """from .overlay import * # noqa: F403\n
+__all__ += overlay.__all__ # noqa: F405""",
is_fresh_admin_v2_copy,
)
@@ -199,8 +146,7 @@ def add_overlay_to_init_py(init_py_location, import_statements, should_add):
"""import google.cloud.bigtable_admin_v2.overlay # noqa: F401
from google.cloud.bigtable_admin_v2.overlay import * # noqa: F401, F403
-__all__ += google.cloud.bigtable_admin_v2.overlay.__all__
-""",
+__all__ += google.cloud.bigtable_admin_v2.overlay.__all__""",
is_fresh_admin_copy,
)
@@ -318,5 +264,3 @@ def add_overlay_to_init_py(init_py_location, import_statements, should_add):
r"class GcRule\(proto\.Message\)\:",
"class GcRule(oneof_message.OneofMessage):",
)
-
-s.shell.run(["nox", "-s", "blacken"], hide_output=False)
diff --git a/.librarian/generator-input/noxfile.py b/.librarian/generator-input/noxfile.py
new file mode 100644
index 000000000..16c8a6327
--- /dev/null
+++ b/.librarian/generator-input/noxfile.py
@@ -0,0 +1,569 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Generated by synthtool. DO NOT EDIT!
+
+from __future__ import absolute_import
+
+import os
+import pathlib
+import re
+import shutil
+from typing import Dict, List
+import warnings
+
+import nox
+
+FLAKE8_VERSION = "flake8==6.1.0"
+BLACK_VERSION = "black[jupyter]==23.3.0"
+ISORT_VERSION = "isort==5.11.0"
+LINT_PATHS = ["google", "tests", "noxfile.py", "setup.py"]
+
+DEFAULT_PYTHON_VERSION = "3.13"
+
+UNIT_TEST_PYTHON_VERSIONS: List[str] = [
+ "3.7",
+ "3.8",
+ "3.9",
+ "3.10",
+ "3.11",
+ "3.12",
+ "3.13",
+ "3.14",
+]
+UNIT_TEST_STANDARD_DEPENDENCIES = [
+ "mock",
+ "asyncmock",
+ "pytest",
+ "pytest-cov",
+ "pytest-asyncio",
+ BLACK_VERSION,
+ "autoflake",
+]
+UNIT_TEST_EXTERNAL_DEPENDENCIES: List[str] = []
+UNIT_TEST_LOCAL_DEPENDENCIES: List[str] = []
+UNIT_TEST_DEPENDENCIES: List[str] = []
+UNIT_TEST_EXTRAS: List[str] = []
+UNIT_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {}
+
+SYSTEM_TEST_PYTHON_VERSIONS: List[str] = ["3.9", "3.14"]
+SYSTEM_TEST_STANDARD_DEPENDENCIES: List[str] = [
+ "mock",
+ "pytest",
+ "google-cloud-testutils",
+]
+SYSTEM_TEST_EXTERNAL_DEPENDENCIES: List[str] = [
+ "pytest-asyncio==0.21.2",
+ BLACK_VERSION,
+ "pyyaml==6.0.2",
+]
+SYSTEM_TEST_LOCAL_DEPENDENCIES: List[str] = []
+SYSTEM_TEST_DEPENDENCIES: List[str] = []
+SYSTEM_TEST_EXTRAS: List[str] = []
+SYSTEM_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {}
+
+CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
+
+# 'docfx' is excluded since it only needs to run in 'docs-presubmit'
+nox.options.sessions = [
+ "unit-3.9",
+ "unit-3.10",
+ "unit-3.11",
+ "unit-3.12",
+ "unit-3.13",
+ "unit-3.14",
+ "system_emulated",
+ "system",
+ "mypy",
+ "cover",
+ "lint",
+ "lint_setup_py",
+ "blacken",
+ "docs",
+ "format",
+]
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def lint(session):
+ """Run linters.
+
+ Returns a failure if the linters find linting errors or sufficiently
+ serious code quality issues.
+ """
+ session.install(FLAKE8_VERSION, BLACK_VERSION)
+ session.run(
+ "black",
+ "--check",
+ *LINT_PATHS,
+ )
+ session.run("flake8", "google", "tests")
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def blacken(session):
+ """Run black. Format code to uniform standard."""
+ session.install(BLACK_VERSION)
+ session.run(
+ "black",
+ *LINT_PATHS,
+ )
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def format(session):
+ """
+ Run isort to sort imports. Then run black
+ to format code to uniform standard.
+ """
+ session.install(BLACK_VERSION, ISORT_VERSION)
+ # Use the --fss option to sort imports using strict alphabetical order.
+ # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections
+ session.run(
+ "isort",
+ "--fss",
+ *LINT_PATHS,
+ )
+ session.run(
+ "black",
+ *LINT_PATHS,
+ )
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def mypy(session):
+ """Verify type hints are mypy compatible."""
+ session.install("-e", ".")
+ session.install(
+ "mypy", "types-setuptools", "types-protobuf", "types-mock", "types-requests"
+ )
+ session.install("google-cloud-testutils")
+ session.run("mypy", "-p", "google.cloud.bigtable.data")
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def lint_setup_py(session):
+ """Verify that setup.py is valid (including RST check)."""
+ session.install("setuptools", "docutils", "pygments")
+ session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
+
+
+def install_unittest_dependencies(session, *constraints):
+ standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES
+ session.install(*standard_deps, *constraints)
+
+ if UNIT_TEST_EXTERNAL_DEPENDENCIES:
+ warnings.warn(
+ "'unit_test_external_dependencies' is deprecated. Instead, please "
+ "use 'unit_test_dependencies' or 'unit_test_local_dependencies'.",
+ DeprecationWarning,
+ )
+ session.install(*UNIT_TEST_EXTERNAL_DEPENDENCIES, *constraints)
+
+ if UNIT_TEST_LOCAL_DEPENDENCIES:
+ session.install(*UNIT_TEST_LOCAL_DEPENDENCIES, *constraints)
+
+ if UNIT_TEST_EXTRAS_BY_PYTHON:
+ extras = UNIT_TEST_EXTRAS_BY_PYTHON.get(session.python, [])
+ elif UNIT_TEST_EXTRAS:
+ extras = UNIT_TEST_EXTRAS
+ else:
+ extras = []
+
+ if extras:
+ session.install("-e", f".[{','.join(extras)}]", *constraints)
+ else:
+ session.install("-e", ".", *constraints)
+
+
+@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
+@nox.parametrize(
+ "protobuf_implementation",
+ ["python", "upb", "cpp"],
+)
+def unit(session, protobuf_implementation):
+ # Install all test dependencies, then install this package in-place.
+ py_version = tuple([int(v) for v in session.python.split(".")])
+ if protobuf_implementation == "cpp" and py_version >= (3, 11):
+ session.skip("cpp implementation is not supported in python 3.11+")
+
+ constraints_path = str(
+ CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
+ )
+ install_unittest_dependencies(session, "-c", constraints_path)
+
+ # TODO(https://github.com/googleapis/synthtool/issues/1976):
+ # Remove the 'cpp' implementation once support for Protobuf 3.x is dropped.
+ # The 'cpp' implementation requires Protobuf<4.
+ if protobuf_implementation == "cpp":
+ session.install("protobuf<4")
+
+ # Run py.test against the unit tests.
+ session.run(
+ "py.test",
+ "--quiet",
+ f"--junitxml=unit_{session.python}_sponge_log.xml",
+ "--cov=google",
+ "--cov=tests/unit",
+ "--cov-append",
+ "--cov-config=.coveragerc",
+ "--cov-report=",
+ "--cov-fail-under=0",
+ os.path.join("tests", "unit"),
+ *session.posargs,
+ env={
+ "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation,
+ },
+ )
+
+
+def install_systemtest_dependencies(session, *constraints):
+ # Use pre-release gRPC for system tests.
+ # Exclude version 1.52.0rc1 which has a known issue.
+ # See https://github.com/grpc/grpc/issues/32163
+ session.install("--pre", "grpcio!=1.52.0rc1")
+
+ session.install(*SYSTEM_TEST_STANDARD_DEPENDENCIES, *constraints)
+
+ if SYSTEM_TEST_EXTERNAL_DEPENDENCIES:
+ session.install(*SYSTEM_TEST_EXTERNAL_DEPENDENCIES, *constraints)
+
+ if SYSTEM_TEST_LOCAL_DEPENDENCIES:
+ session.install("-e", *SYSTEM_TEST_LOCAL_DEPENDENCIES, *constraints)
+
+ if SYSTEM_TEST_DEPENDENCIES:
+ session.install("-e", *SYSTEM_TEST_DEPENDENCIES, *constraints)
+
+ if SYSTEM_TEST_EXTRAS_BY_PYTHON:
+ extras = SYSTEM_TEST_EXTRAS_BY_PYTHON.get(session.python, [])
+ elif SYSTEM_TEST_EXTRAS:
+ extras = SYSTEM_TEST_EXTRAS
+ else:
+ extras = []
+
+ if extras:
+ session.install("-e", f".[{','.join(extras)}]", *constraints)
+ else:
+ session.install("-e", ".", *constraints)
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def system_emulated(session):
+ import subprocess
+ import signal
+
+ try:
+ subprocess.call(["gcloud", "--version"])
+ except OSError:
+ session.skip("gcloud not found but required for emulator support")
+
+ # Currently, CI/CD doesn't have beta component of gcloud.
+ subprocess.call(["gcloud", "components", "install", "beta", "bigtable"])
+
+ hostport = "localhost:8789"
+ session.env["BIGTABLE_EMULATOR_HOST"] = hostport
+
+ p = subprocess.Popen(
+ ["gcloud", "beta", "emulators", "bigtable", "start", "--host-port", hostport]
+ )
+
+ try:
+ system(session)
+ finally:
+ # Stop Emulator
+ os.killpg(os.getpgid(p.pid), signal.SIGKILL)
+
+
+@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
+@nox.parametrize("client_type", ["async", "sync", "legacy"])
+def conformance(session, client_type):
+ # install dependencies
+ constraints_path = str(
+ CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
+ )
+ install_unittest_dependencies(session, "-c", constraints_path)
+ with session.chdir("test_proxy"):
+ # download the conformance test suite
+ session.run(
+ "bash",
+ "-e",
+ "run_tests.sh",
+ external=True,
+ env={"CLIENT_TYPE": client_type},
+ )
+
+
+@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
+def system(session):
+ """Run the system test suite."""
+ constraints_path = str(
+ CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
+ )
+ system_test_path = os.path.join("tests", "system.py")
+ system_test_folder_path = os.path.join("tests", "system")
+
+ # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
+ if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
+ session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
+ # Install pyopenssl for mTLS testing.
+ if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true":
+ session.install("pyopenssl")
+
+ system_test_exists = os.path.exists(system_test_path)
+ system_test_folder_exists = os.path.exists(system_test_folder_path)
+ # Sanity check: only run tests if found.
+ if not system_test_exists and not system_test_folder_exists:
+ session.skip("System tests were not found")
+
+ install_systemtest_dependencies(session, "-c", constraints_path)
+
+ # Run py.test against the system tests.
+ if system_test_exists:
+ session.run(
+ "py.test",
+ "--quiet",
+ f"--junitxml=system_{session.python}_sponge_log.xml",
+ system_test_path,
+ *session.posargs,
+ )
+ if system_test_folder_exists:
+ session.run(
+ "py.test",
+ "--quiet",
+ f"--junitxml=system_{session.python}_sponge_log.xml",
+ system_test_folder_path,
+ *session.posargs,
+ )
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def cover(session):
+ """Run the final coverage report.
+
+ This outputs the coverage report aggregating coverage from the unit
+ test runs (not system test runs), and then erases coverage data.
+ """
+ session.install("coverage", "pytest-cov")
+ session.run("coverage", "report", "--show-missing", "--fail-under=99")
+
+ session.run("coverage", "erase")
+
+
+@nox.session(python="3.10")
+def docs(session):
+ """Build the docs for this library."""
+
+ session.install("-e", ".")
+ session.install(
+ # We need to pin to specific versions of the `sphinxcontrib-*` packages
+ # which still support sphinx 4.x.
+ # See https://github.com/googleapis/sphinx-docfx-yaml/issues/344
+ # and https://github.com/googleapis/sphinx-docfx-yaml/issues/345.
+ "sphinxcontrib-applehelp==1.0.4",
+ "sphinxcontrib-devhelp==1.0.2",
+ "sphinxcontrib-htmlhelp==2.0.1",
+ "sphinxcontrib-qthelp==1.0.3",
+ "sphinxcontrib-serializinghtml==1.1.5",
+ "sphinx==4.5.0",
+ "alabaster",
+ "recommonmark",
+ )
+
+ shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
+ session.run(
+ "sphinx-build",
+ "-W", # warnings as errors
+ "-T", # show full traceback on exception
+ "-N", # no colors
+ "-b",
+ "html",
+ "-d",
+ os.path.join("docs", "_build", "doctrees", ""),
+ os.path.join("docs", ""),
+ os.path.join("docs", "_build", "html", ""),
+ )
+
+
+@nox.session(python="3.10")
+def docfx(session):
+ """Build the docfx yaml files for this library."""
+
+ session.install("-e", ".")
+ session.install(
+ # We need to pin to specific versions of the `sphinxcontrib-*` packages
+ # which still support sphinx 4.x.
+ # See https://github.com/googleapis/sphinx-docfx-yaml/issues/344
+ # and https://github.com/googleapis/sphinx-docfx-yaml/issues/345.
+ "sphinxcontrib-applehelp==1.0.4",
+ "sphinxcontrib-devhelp==1.0.2",
+ "sphinxcontrib-htmlhelp==2.0.1",
+ "sphinxcontrib-qthelp==1.0.3",
+ "sphinxcontrib-serializinghtml==1.1.5",
+ "gcp-sphinx-docfx-yaml",
+ "alabaster",
+ "recommonmark",
+ )
+
+ shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
+ session.run(
+ "sphinx-build",
+ "-T", # show full traceback on exception
+ "-N", # no colors
+ "-D",
+ (
+ "extensions=sphinx.ext.autodoc,"
+ "sphinx.ext.autosummary,"
+ "docfx_yaml.extension,"
+ "sphinx.ext.intersphinx,"
+ "sphinx.ext.coverage,"
+ "sphinx.ext.napoleon,"
+ "sphinx.ext.todo,"
+ "sphinx.ext.viewcode,"
+ "recommonmark"
+ ),
+ "-b",
+ "html",
+ "-d",
+ os.path.join("docs", "_build", "doctrees", ""),
+ os.path.join("docs", ""),
+ os.path.join("docs", "_build", "html", ""),
+ )
+ # Customization: Add extra sections to the table of contents for the Classic vs Async clients
+ session.install("pyyaml")
+ session.run("python", "docs/scripts/patch_devsite_toc.py")
+
+
+@nox.session(python="3.14")
+@nox.parametrize(
+ "protobuf_implementation",
+ ["python", "upb", "cpp"],
+)
+def prerelease_deps(session, protobuf_implementation):
+ """Run all tests with prerelease versions of dependencies installed."""
+
+ py_version = tuple([int(v) for v in session.python.split(".")])
+ if protobuf_implementation == "cpp" and py_version >= (3, 11):
+ session.skip("cpp implementation is not supported in python 3.11+")
+
+ # Install all dependencies
+ session.install("-e", ".[all, tests, tracing]")
+ unit_deps_all = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_EXTERNAL_DEPENDENCIES
+ session.install(*unit_deps_all)
+ system_deps_all = (
+ SYSTEM_TEST_STANDARD_DEPENDENCIES + SYSTEM_TEST_EXTERNAL_DEPENDENCIES
+ )
+ session.install(*system_deps_all)
+
+ # Because we test minimum dependency versions on the minimum Python
+ # version, the first version we test with in the unit tests sessions has a
+ # constraints file containing all dependencies and extras.
+ with open(
+ CURRENT_DIRECTORY
+ / "testing"
+ / f"constraints-{UNIT_TEST_PYTHON_VERSIONS[0]}.txt",
+ encoding="utf-8",
+ ) as constraints_file:
+ constraints_text = constraints_file.read()
+
+ # Ignore leading whitespace and comment lines.
+ constraints_deps = [
+ match.group(1)
+ for match in re.finditer(
+ r"^\s*(\S+)(?===\S+)", constraints_text, flags=re.MULTILINE
+ )
+ ]
+
+ session.install(*constraints_deps)
+
+ prerel_deps = [
+ "protobuf",
+ # dependency of grpc
+ "six",
+ "grpc-google-iam-v1",
+ "googleapis-common-protos",
+ "grpcio",
+ "grpcio-status",
+ "google-api-core",
+ "google-auth",
+ "proto-plus",
+ "google-cloud-testutils",
+ # dependencies of google-cloud-testutils"
+ "click",
+ ]
+
+ for dep in prerel_deps:
+ session.install("--pre", "--no-deps", "--upgrade", dep)
+
+ # Remaining dependencies
+ other_deps = [
+ "requests",
+ ]
+ session.install(*other_deps)
+
+ # Print out prerelease package versions
+ session.run(
+ "python", "-c", "import google.protobuf; print(google.protobuf.__version__)"
+ )
+ session.run("python", "-c", "import grpc; print(grpc.__version__)")
+ session.run("python", "-c", "import google.auth; print(google.auth.__version__)")
+
+ session.run(
+ "py.test",
+ "tests/unit",
+ env={
+ "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation,
+ },
+ )
+
+ system_test_path = os.path.join("tests", "system.py")
+ system_test_folder_path = os.path.join("tests", "system")
+
+ # Only run system tests if found.
+ if os.path.exists(system_test_path):
+ session.run(
+ "py.test",
+ "--verbose",
+ f"--junitxml=system_{session.python}_sponge_log.xml",
+ system_test_path,
+ *session.posargs,
+ env={
+ "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation,
+ },
+ )
+ if os.path.exists(system_test_folder_path):
+ session.run(
+ "py.test",
+ "--verbose",
+ f"--junitxml=system_{session.python}_sponge_log.xml",
+ system_test_folder_path,
+ *session.posargs,
+ env={
+ "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation,
+ },
+ )
+
+
+@nox.session(python="3.10")
+def generate_sync(session):
+ """
+ Re-generate sync files for the library from CrossSync-annotated async source
+ """
+ session.install(BLACK_VERSION)
+ session.install("autoflake")
+ session.run("python", ".cross_sync/generate.py", ".")
diff --git a/.librarian/generator-input/setup.py b/.librarian/generator-input/setup.py
new file mode 100644
index 000000000..fd8062970
--- /dev/null
+++ b/.librarian/generator-input/setup.py
@@ -0,0 +1,100 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import os
+
+import setuptools
+
+
+package_root = os.path.abspath(os.path.dirname(__file__))
+
+# Package metadata.
+
+name = "google-cloud-bigtable"
+description = "Google Cloud Bigtable API client library"
+
+version = {}
+with open(os.path.join(package_root, "google/cloud/bigtable/gapic_version.py")) as fp:
+ exec(fp.read(), version)
+version = version["__version__"]
+
+
+# Should be one of:
+# 'Development Status :: 3 - Alpha'
+# 'Development Status :: 4 - Beta'
+# 'Development Status :: 5 - Production/Stable'
+release_status = "Development Status :: 5 - Production/Stable"
+dependencies = [
+ "google-api-core[grpc] >= 2.17.0, <3.0.0",
+ "google-cloud-core >= 1.4.4, <3.0.0",
+ "google-auth >= 2.23.0, <3.0.0,!=2.24.0,!=2.25.0",
+ "grpc-google-iam-v1 >= 0.12.4, <1.0.0",
+ "proto-plus >= 1.22.3, <2.0.0",
+ "proto-plus >= 1.25.0, <2.0.0; python_version>='3.13'",
+ "protobuf>=3.20.2,<7.0.0,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5",
+ "google-crc32c>=1.5.0, <2.0.0dev",
+]
+extras = {"libcst": "libcst >= 0.2.5"}
+
+
+# Setup boilerplate below this line.
+
+package_root = os.path.abspath(os.path.dirname(__file__))
+
+readme_filename = os.path.join(package_root, "README.rst")
+with io.open(readme_filename, encoding="utf-8") as readme_file:
+ readme = readme_file.read()
+
+# Only include packages under the 'google' namespace. Do not include tests,
+# benchmarks, etc.
+packages = [
+ package
+ for package in setuptools.find_namespace_packages()
+ if package.startswith("google")
+]
+
+setuptools.setup(
+ name=name,
+ version=version,
+ description=description,
+ long_description=readme,
+ author="Google LLC",
+ author_email="googleapis-packages@google.com",
+ license="Apache 2.0",
+ url="https://github.com/googleapis/python-bigtable",
+ classifiers=[
+ release_status,
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: Apache Software License",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "Programming Language :: Python :: 3.13",
+ "Programming Language :: Python :: 3.14",
+ "Operating System :: OS Independent",
+ "Topic :: Internet",
+ ],
+ platforms="Posix; MacOS X; Windows",
+ packages=packages,
+ install_requires=dependencies,
+ extras_require=extras,
+ python_requires=">=3.7",
+ include_package_data=True,
+ zip_safe=False,
+)
diff --git a/.librarian/state.yaml b/.librarian/state.yaml
new file mode 100644
index 000000000..aca1e2fca
--- /dev/null
+++ b/.librarian/state.yaml
@@ -0,0 +1,40 @@
+image: us-central1-docker.pkg.dev/cloud-sdk-librarian-prod/images-prod/python-librarian-generator@sha256:b8058df4c45e9a6e07f6b4d65b458d0d059241dd34c814f151c8bf6b89211209
+libraries:
+ - id: google-cloud-bigtable
+ version: 2.35.0
+ last_generated_commit: a17b84add8318f780fcc8a027815d5fee644b9f7
+ apis:
+ - path: google/bigtable/v2
+ service_config: bigtable_v2.yaml
+ - path: google/bigtable/admin/v2
+ service_config: bigtableadmin_v2.yaml
+ source_roots:
+ - .
+ preserve_regex: []
+ remove_regex:
+ - ^.pre-commit-config.yaml
+ - ^.repo-metadata.json
+ - ^.trampolinerc
+ - ^docs/admin_client/bigtable
+ - ^docs/admin_client/services_.rst
+ - ^docs/admin_client/types_.rst
+ - ^docs/summary_overview.md
+ - ^google/cloud/bigtable_v2
+ - ^google/cloud/bigtable_admin/
+ - ^google/cloud/bigtable_admin_v2/services
+ - ^google/cloud/bigtable_admin_v2/types
+ - ^google/cloud/bigtable_admin_v2/__init__.py
+ - ^google/cloud/bigtable_admin_v2/gapic
+ - ^google/cloud/bigtable_admin_v2/py.typed
+ - ^samples/AUTHORING_GUIDE.md
+ - ^samples/CONTRIBUTING.md
+ - ^samples/generated_samples
+ - ^tests/unit/gapic
+ - ^noxfile.py
+ - ^scripts/fixup_bigtable
+ - ^setup.py
+ - ^SECURITY.md
+ - ^tests/__init__.py
+ - ^tests/unit/__init__.py
+ - ^tests/unit/gapic
+ tag_format: v{version}
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
deleted file mode 100644
index 7887ba932..000000000
--- a/.release-please-manifest.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- ".": "2.34.0"
-}
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2a0251dc1..cbb707694 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,26 @@
[1]: https://pypi.org/project/google-cloud-bigtable/#history
+## [2.35.0](https://github.com/googleapis/python-bigtable/compare/v2.34.0...v2.35.0) (2025-12-16)
+
+
+### Features
+
+* support mTLS certificates when available (#1249) ([ca20219cf45305de25dfb715f69dd63bce9981b7](https://github.com/googleapis/python-bigtable/commit/ca20219cf45305de25dfb715f69dd63bce9981b7))
+* add basic interceptor to client (#1206) ([6561cfac605ba7c5b3f750c3bdca9108e517ba77](https://github.com/googleapis/python-bigtable/commit/6561cfac605ba7c5b3f750c3bdca9108e517ba77))
+* add PeerInfo proto in Bigtable API ([72dfdc440c22db0f4c372e6f11a9f7dc83fed350](https://github.com/googleapis/python-bigtable/commit/72dfdc440c22db0f4c372e6f11a9f7dc83fed350))
+* Add Type API updates needed to support structured keys in materialized views ([72dfdc440c22db0f4c372e6f11a9f7dc83fed350](https://github.com/googleapis/python-bigtable/commit/72dfdc440c22db0f4c372e6f11a9f7dc83fed350))
+* Add encodings for STRUCT and the Timestamp type ([72dfdc440c22db0f4c372e6f11a9f7dc83fed350](https://github.com/googleapis/python-bigtable/commit/72dfdc440c22db0f4c372e6f11a9f7dc83fed350))
+
+
+### Bug Fixes
+
+* async client uses fixed grace period (#1236) ([544db1cd7af876298b8637f495b6c7b2a0bcf16c](https://github.com/googleapis/python-bigtable/commit/544db1cd7af876298b8637f495b6c7b2a0bcf16c))
+* re-export AddToCell for consistency (#1241) ([2a5baf11d30dc383a7b48d5f43b6cbb6160782e3](https://github.com/googleapis/python-bigtable/commit/2a5baf11d30dc383a7b48d5f43b6cbb6160782e3))
+* retry cancelled errors (#1235) ([e3fd5d8668303db4ed35e9bf6be48b46954f9d67](https://github.com/googleapis/python-bigtable/commit/e3fd5d8668303db4ed35e9bf6be48b46954f9d67))
+* Add ReadRows/SampleRowKeys bindings for materialized views ([72dfdc440c22db0f4c372e6f11a9f7dc83fed350](https://github.com/googleapis/python-bigtable/commit/72dfdc440c22db0f4c372e6f11a9f7dc83fed350))
+* Deprecate credentials_file argument ([72dfdc440c22db0f4c372e6f11a9f7dc83fed350](https://github.com/googleapis/python-bigtable/commit/72dfdc440c22db0f4c372e6f11a9f7dc83fed350))
+
## [2.34.0](https://github.com/googleapis/python-bigtable/compare/v2.33.0...v2.34.0) (2025-10-16)
diff --git a/google/cloud/bigtable/data/__init__.py b/google/cloud/bigtable/data/__init__.py
index 9439f0f8d..c18eae683 100644
--- a/google/cloud/bigtable/data/__init__.py
+++ b/google/cloud/bigtable/data/__init__.py
@@ -31,6 +31,7 @@
from google.cloud.bigtable.data.mutations import Mutation
from google.cloud.bigtable.data.mutations import RowMutationEntry
+from google.cloud.bigtable.data.mutations import AddToCell
from google.cloud.bigtable.data.mutations import SetCell
from google.cloud.bigtable.data.mutations import DeleteRangeFromColumn
from google.cloud.bigtable.data.mutations import DeleteAllFromFamily
@@ -89,6 +90,7 @@
"RowRange",
"Mutation",
"RowMutationEntry",
+ "AddToCell",
"SetCell",
"DeleteRangeFromColumn",
"DeleteAllFromFamily",
diff --git a/google/cloud/bigtable/data/_async/client.py b/google/cloud/bigtable/data/_async/client.py
index 0af7154a6..54a410361 100644
--- a/google/cloud/bigtable/data/_async/client.py
+++ b/google/cloud/bigtable/data/_async/client.py
@@ -19,6 +19,7 @@
cast,
Any,
AsyncIterable,
+ Callable,
Optional,
Set,
Sequence,
@@ -58,6 +59,7 @@
from google.api_core.exceptions import DeadlineExceeded
from google.api_core.exceptions import ServiceUnavailable
from google.api_core.exceptions import Aborted
+from google.api_core.exceptions import Cancelled
from google.protobuf.message import Message
from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper
@@ -99,18 +101,24 @@
)
from google.cloud.bigtable.data._async.mutations_batcher import _MB_SIZE
from google.cloud.bigtable.data._async._swappable_channel import (
- AsyncSwappableChannel,
+ AsyncSwappableChannel as SwappableChannelType,
+ )
+ from google.cloud.bigtable.data._async.metrics_interceptor import (
+ AsyncBigtableMetricsInterceptor as MetricsInterceptorType,
)
else:
from typing import Iterable # noqa: F401
from grpc import insecure_channel
+ from grpc import intercept_channel
from google.cloud.bigtable_v2.services.bigtable.transports import BigtableGrpcTransport as TransportType # type: ignore
from google.cloud.bigtable_v2.services.bigtable import BigtableClient as GapicClient # type: ignore
from google.cloud.bigtable.data._sync_autogen.mutations_batcher import _MB_SIZE
from google.cloud.bigtable.data._sync_autogen._swappable_channel import ( # noqa: F401
- SwappableChannel,
+ SwappableChannel as SwappableChannelType,
+ )
+ from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import ( # noqa: F401
+ BigtableMetricsInterceptor as MetricsInterceptorType,
)
-
if TYPE_CHECKING:
from google.cloud.bigtable.data._helpers import RowKeySamples
@@ -205,7 +213,7 @@ def __init__(
credentials = google.auth.credentials.AnonymousCredentials()
if project is None:
project = _DEFAULT_BIGTABLE_EMULATOR_CLIENT
-
+ self._metrics_interceptor = MetricsInterceptorType()
# initialize client
ClientWithProject.__init__(
self,
@@ -259,12 +267,11 @@ def __init__(
stacklevel=2,
)
- @CrossSync.convert(replace_symbols={"AsyncSwappableChannel": "SwappableChannel"})
- def _build_grpc_channel(self, *args, **kwargs) -> AsyncSwappableChannel:
+ def _build_grpc_channel(self, *args, **kwargs) -> SwappableChannelType:
"""
This method is called by the gapic transport to create a grpc channel.
- The init arguments passed down are captured in a partial used by AsyncSwappableChannel
+ The init arguments passed down are captured in a partial used by SwappableChannel
to create new channel instances in the future, as part of the channel refresh logic
Emulators always use an inseucre channel
@@ -275,12 +282,30 @@ def _build_grpc_channel(self, *args, **kwargs) -> AsyncSwappableChannel:
Returns:
a custom wrapped swappable channel
"""
+ create_channel_fn: Callable[[], Channel]
if self._emulator_host is not None:
- # emulators use insecure channel
+ # Emulators use insecure channels
create_channel_fn = partial(insecure_channel, self._emulator_host)
- else:
+ elif CrossSync.is_async:
+ # For async client, use the default create_channel.
create_channel_fn = partial(TransportType.create_channel, *args, **kwargs)
- return AsyncSwappableChannel(create_channel_fn)
+ else:
+ # For sync client, wrap create_channel with interceptors.
+ def sync_create_channel_fn():
+ return intercept_channel(
+ TransportType.create_channel(*args, **kwargs),
+ self._metrics_interceptor,
+ )
+
+ create_channel_fn = sync_create_channel_fn
+
+ # Instantiate SwappableChannelType with the determined creation function.
+ new_channel = SwappableChannelType(create_channel_fn)
+ if CrossSync.is_async:
+ # Attach async interceptors to the channel instance itself.
+ new_channel._unary_unary_interceptors.append(self._metrics_interceptor)
+ new_channel._unary_stream_interceptors.append(self._metrics_interceptor)
+ return new_channel
@property
def universe_domain(self) -> str:
@@ -402,7 +427,7 @@ def _invalidate_channel_stubs(self):
self.transport._stubs = {}
self.transport._prep_wrapped_messages(self.client_info)
- @CrossSync.convert(replace_symbols={"AsyncSwappableChannel": "SwappableChannel"})
+ @CrossSync.convert
async def _manage_channel(
self,
refresh_interval_min: float = 60 * 35,
@@ -427,10 +452,10 @@ async def _manage_channel(
grace_period: time to allow previous channel to serve existing
requests before closing, in seconds
"""
- if not isinstance(self.transport.grpc_channel, AsyncSwappableChannel):
+ if not isinstance(self.transport.grpc_channel, SwappableChannelType):
warnings.warn("Channel does not support auto-refresh.")
return
- super_channel: AsyncSwappableChannel = self.transport.grpc_channel
+ super_channel: SwappableChannelType = self.transport.grpc_channel
first_refresh = self._channel_init_time + random.uniform(
refresh_interval_min, refresh_interval_max
)
@@ -456,12 +481,11 @@ async def _manage_channel(
old_channel = super_channel.swap_channel(new_channel)
self._invalidate_channel_stubs()
# give old_channel a chance to complete existing rpcs
- if CrossSync.is_async:
- await old_channel.close(grace_period)
- else:
- if grace_period:
- self._is_closed.wait(grace_period) # type: ignore
- old_channel.close() # type: ignore
+ if grace_period:
+ await CrossSync.event_wait(
+ self._is_closed, grace_period, async_break_early=False
+ )
+ await old_channel.close()
# subtract the time spent waiting for the channel to be replaced
next_refresh = random.uniform(refresh_interval_min, refresh_interval_max)
next_sleep = max(next_refresh - (time.monotonic() - start_timestamp), 0)
@@ -914,6 +938,7 @@ def __init__(
DeadlineExceeded,
ServiceUnavailable,
Aborted,
+ Cancelled,
),
default_mutate_rows_retryable_errors: Sequence[type[Exception]] = (
DeadlineExceeded,
diff --git a/google/cloud/bigtable/data/_async/metrics_interceptor.py b/google/cloud/bigtable/data/_async/metrics_interceptor.py
new file mode 100644
index 000000000..a154c0083
--- /dev/null
+++ b/google/cloud/bigtable/data/_async/metrics_interceptor.py
@@ -0,0 +1,78 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import annotations
+
+from google.cloud.bigtable.data._cross_sync import CrossSync
+
+if CrossSync.is_async:
+ from grpc.aio import UnaryUnaryClientInterceptor
+ from grpc.aio import UnaryStreamClientInterceptor
+else:
+ from grpc import UnaryUnaryClientInterceptor
+ from grpc import UnaryStreamClientInterceptor
+
+
+__CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen.metrics_interceptor"
+
+
+@CrossSync.convert_class(sync_name="BigtableMetricsInterceptor")
+class AsyncBigtableMetricsInterceptor(
+ UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor
+):
+ """
+ An async gRPC interceptor to add client metadata and print server metadata.
+ """
+
+ @CrossSync.convert
+ async def intercept_unary_unary(self, continuation, client_call_details, request):
+ """
+ Interceptor for unary rpcs:
+ - MutateRow
+ - CheckAndMutateRow
+ - ReadModifyWriteRow
+ """
+ try:
+ call = await continuation(client_call_details, request)
+ return call
+ except Exception as rpc_error:
+ raise rpc_error
+
+ @CrossSync.convert
+ async def intercept_unary_stream(self, continuation, client_call_details, request):
+ """
+ Interceptor for streaming rpcs:
+ - ReadRows
+ - MutateRows
+ - SampleRowKeys
+ """
+ try:
+ return self._streaming_generator_wrapper(
+ await continuation(client_call_details, request)
+ )
+ except Exception as rpc_error:
+ # handle errors while intializing stream
+ raise rpc_error
+
+ @staticmethod
+ @CrossSync.convert
+ async def _streaming_generator_wrapper(call):
+ """
+ Wrapped generator to be returned by intercept_unary_stream.
+ """
+ try:
+ async for response in call:
+ yield response
+ except Exception as e:
+ # handle errors while processing stream
+ raise e
diff --git a/google/cloud/bigtable/data/_sync_autogen/client.py b/google/cloud/bigtable/data/_sync_autogen/client.py
index adc849649..6a4da007a 100644
--- a/google/cloud/bigtable/data/_sync_autogen/client.py
+++ b/google/cloud/bigtable/data/_sync_autogen/client.py
@@ -17,7 +17,7 @@
# This file is automatically generated by CrossSync. Do not edit manually.
from __future__ import annotations
-from typing import cast, Any, Optional, Set, Sequence, TYPE_CHECKING
+from typing import cast, Any, Callable, Optional, Set, Sequence, TYPE_CHECKING
import abc
import time
import warnings
@@ -49,6 +49,7 @@
from google.api_core.exceptions import DeadlineExceeded
from google.api_core.exceptions import ServiceUnavailable
from google.api_core.exceptions import Aborted
+from google.api_core.exceptions import Cancelled
from google.protobuf.message import Message
from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper
import google.auth.credentials
@@ -77,12 +78,18 @@
from google.cloud.bigtable.data._cross_sync import CrossSync
from typing import Iterable
from grpc import insecure_channel
+from grpc import intercept_channel
from google.cloud.bigtable_v2.services.bigtable.transports import (
BigtableGrpcTransport as TransportType,
)
from google.cloud.bigtable_v2.services.bigtable import BigtableClient as GapicClient
from google.cloud.bigtable.data._sync_autogen.mutations_batcher import _MB_SIZE
-from google.cloud.bigtable.data._sync_autogen._swappable_channel import SwappableChannel
+from google.cloud.bigtable.data._sync_autogen._swappable_channel import (
+ SwappableChannel as SwappableChannelType,
+)
+from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import (
+ BigtableMetricsInterceptor as MetricsInterceptorType,
+)
if TYPE_CHECKING:
from google.cloud.bigtable.data._helpers import RowKeySamples
@@ -145,6 +152,7 @@ def __init__(
credentials = google.auth.credentials.AnonymousCredentials()
if project is None:
project = _DEFAULT_BIGTABLE_EMULATOR_CLIENT
+ self._metrics_interceptor = MetricsInterceptorType()
ClientWithProject.__init__(
self,
credentials=credentials,
@@ -188,7 +196,7 @@ def __init__(
stacklevel=2,
)
- def _build_grpc_channel(self, *args, **kwargs) -> SwappableChannel:
+ def _build_grpc_channel(self, *args, **kwargs) -> SwappableChannelType:
"""This method is called by the gapic transport to create a grpc channel.
The init arguments passed down are captured in a partial used by SwappableChannel
@@ -201,11 +209,20 @@ def _build_grpc_channel(self, *args, **kwargs) -> SwappableChannel:
- **kwargs: keyword arguments passed by the gapic layer to create a new channel with
Returns:
a custom wrapped swappable channel"""
+ create_channel_fn: Callable[[], Channel]
if self._emulator_host is not None:
create_channel_fn = partial(insecure_channel, self._emulator_host)
else:
- create_channel_fn = partial(TransportType.create_channel, *args, **kwargs)
- return SwappableChannel(create_channel_fn)
+
+ def sync_create_channel_fn():
+ return intercept_channel(
+ TransportType.create_channel(*args, **kwargs),
+ self._metrics_interceptor,
+ )
+
+ create_channel_fn = sync_create_channel_fn
+ new_channel = SwappableChannelType(create_channel_fn)
+ return new_channel
@property
def universe_domain(self) -> str:
@@ -326,10 +343,10 @@ def _manage_channel(
between `refresh_interval_min` and `refresh_interval_max`
grace_period: time to allow previous channel to serve existing
requests before closing, in seconds"""
- if not isinstance(self.transport.grpc_channel, SwappableChannel):
+ if not isinstance(self.transport.grpc_channel, SwappableChannelType):
warnings.warn("Channel does not support auto-refresh.")
return
- super_channel: SwappableChannel = self.transport.grpc_channel
+ super_channel: SwappableChannelType = self.transport.grpc_channel
first_refresh = self._channel_init_time + random.uniform(
refresh_interval_min, refresh_interval_max
)
@@ -348,7 +365,9 @@ def _manage_channel(
old_channel = super_channel.swap_channel(new_channel)
self._invalidate_channel_stubs()
if grace_period:
- self._is_closed.wait(grace_period)
+ CrossSync._Sync_Impl.event_wait(
+ self._is_closed, grace_period, async_break_early=False
+ )
old_channel.close()
next_refresh = random.uniform(refresh_interval_min, refresh_interval_max)
next_sleep = max(next_refresh - (time.monotonic() - start_timestamp), 0)
@@ -713,6 +732,7 @@ def __init__(
DeadlineExceeded,
ServiceUnavailable,
Aborted,
+ Cancelled,
),
default_mutate_rows_retryable_errors: Sequence[type[Exception]] = (
DeadlineExceeded,
diff --git a/google/cloud/bigtable/data/_sync_autogen/metrics_interceptor.py b/google/cloud/bigtable/data/_sync_autogen/metrics_interceptor.py
new file mode 100644
index 000000000..9e47313b0
--- /dev/null
+++ b/google/cloud/bigtable/data/_sync_autogen/metrics_interceptor.py
@@ -0,0 +1,59 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is automatically generated by CrossSync. Do not edit manually.
+
+from __future__ import annotations
+from grpc import UnaryUnaryClientInterceptor
+from grpc import UnaryStreamClientInterceptor
+
+
+class BigtableMetricsInterceptor(
+ UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor
+):
+ """
+ An async gRPC interceptor to add client metadata and print server metadata.
+ """
+
+ def intercept_unary_unary(self, continuation, client_call_details, request):
+ """Interceptor for unary rpcs:
+ - MutateRow
+ - CheckAndMutateRow
+ - ReadModifyWriteRow"""
+ try:
+ call = continuation(client_call_details, request)
+ return call
+ except Exception as rpc_error:
+ raise rpc_error
+
+ def intercept_unary_stream(self, continuation, client_call_details, request):
+ """Interceptor for streaming rpcs:
+ - ReadRows
+ - MutateRows
+ - SampleRowKeys"""
+ try:
+ return self._streaming_generator_wrapper(
+ continuation(client_call_details, request)
+ )
+ except Exception as rpc_error:
+ raise rpc_error
+
+ @staticmethod
+ def _streaming_generator_wrapper(call):
+ """Wrapped generator to be returned by intercept_unary_stream."""
+ try:
+ for response in call:
+ yield response
+ except Exception as e:
+ raise e
diff --git a/google/cloud/bigtable/gapic_version.py b/google/cloud/bigtable/gapic_version.py
index 4800b0559..a105a8349 100644
--- a/google/cloud/bigtable/gapic_version.py
+++ b/google/cloud/bigtable/gapic_version.py
@@ -13,4 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-__version__ = "2.34.0" # {x-release-please-version}
+__version__ = "2.35.0" # {x-release-please-version}
diff --git a/google/cloud/bigtable_admin/gapic_version.py b/google/cloud/bigtable_admin/gapic_version.py
index 4800b0559..6d72a226d 100644
--- a/google/cloud/bigtable_admin/gapic_version.py
+++ b/google/cloud/bigtable_admin/gapic_version.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,4 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-__version__ = "2.34.0" # {x-release-please-version}
+__version__ = "2.35.0" # {x-release-please-version}
diff --git a/google/cloud/bigtable_admin_v2/__init__.py b/google/cloud/bigtable_admin_v2/__init__.py
index 713b2408f..2102867f7 100644
--- a/google/cloud/bigtable_admin_v2/__init__.py
+++ b/google/cloud/bigtable_admin_v2/__init__.py
@@ -15,8 +15,18 @@
#
from google.cloud.bigtable_admin_v2 import gapic_version as package_version
+import google.api_core as api_core
+import sys
+
__version__ = package_version.__version__
+if sys.version_info >= (3, 8): # pragma: NO COVER
+ from importlib import metadata
+else: # pragma: NO COVER
+ # TODO(https://github.com/googleapis/python-api-core/issues/835): Remove
+ # this code path once we drop support for Python 3.7
+ import importlib_metadata as metadata
+
from .services.bigtable_instance_admin import BigtableInstanceAdminClient
from .services.bigtable_instance_admin import BigtableInstanceAdminAsyncClient
@@ -143,6 +153,100 @@
from .types.table import RestoreSourceType
from .types.types import Type
+if hasattr(api_core, "check_python_version") and hasattr(
+ api_core, "check_dependency_versions"
+): # pragma: NO COVER
+ api_core.check_python_version("google.cloud.bigtable_admin_v2") # type: ignore
+ api_core.check_dependency_versions("google.cloud.bigtable_admin_v2") # type: ignore
+else: # pragma: NO COVER
+ # An older version of api_core is installed which does not define the
+ # functions above. We do equivalent checks manually.
+ try:
+ import warnings
+ import sys
+
+ _py_version_str = sys.version.split()[0]
+ _package_label = "google.cloud.bigtable_admin_v2"
+ if sys.version_info < (3, 9):
+ warnings.warn(
+ "You are using a non-supported Python version "
+ + f"({_py_version_str}). Google will not post any further "
+ + f"updates to {_package_label} supporting this Python version. "
+ + "Please upgrade to the latest Python version, or at "
+ + f"least to Python 3.9, and then update {_package_label}.",
+ FutureWarning,
+ )
+ if sys.version_info[:2] == (3, 9):
+ warnings.warn(
+ f"You are using a Python version ({_py_version_str}) "
+ + f"which Google will stop supporting in {_package_label} in "
+ + "January 2026. Please "
+ + "upgrade to the latest Python version, or at "
+ + "least to Python 3.10, before then, and "
+ + f"then update {_package_label}.",
+ FutureWarning,
+ )
+
+ def parse_version_to_tuple(version_string: str):
+ """Safely converts a semantic version string to a comparable tuple of integers.
+ Example: "4.25.8" -> (4, 25, 8)
+ Ignores non-numeric parts and handles common version formats.
+ Args:
+ version_string: Version string in the format "x.y.z" or "x.y.z"
+ Returns:
+ Tuple of integers for the parsed version string.
+ """
+ parts = []
+ for part in version_string.split("."):
+ try:
+ parts.append(int(part))
+ except ValueError:
+ # If it's a non-numeric part (e.g., '1.0.0b1' -> 'b1'), stop here.
+ # This is a simplification compared to 'packaging.parse_version', but sufficient
+ # for comparing strictly numeric semantic versions.
+ break
+ return tuple(parts)
+
+ def _get_version(dependency_name):
+ try:
+ version_string: str = metadata.version(dependency_name)
+ parsed_version = parse_version_to_tuple(version_string)
+ return (parsed_version, version_string)
+ except Exception:
+ # Catch exceptions from metadata.version() (e.g., PackageNotFoundError)
+ # or errors during parse_version_to_tuple
+ return (None, "--")
+
+ _dependency_package = "google.protobuf"
+ _next_supported_version = "4.25.8"
+ _next_supported_version_tuple = (4, 25, 8)
+ _recommendation = " (we recommend 6.x)"
+ (_version_used, _version_used_string) = _get_version(_dependency_package)
+ if _version_used and _version_used < _next_supported_version_tuple:
+ warnings.warn(
+ f"Package {_package_label} depends on "
+ + f"{_dependency_package}, currently installed at version "
+ + f"{_version_used_string}. Future updates to "
+ + f"{_package_label} will require {_dependency_package} at "
+ + f"version {_next_supported_version} or higher{_recommendation}."
+ + " Please ensure "
+ + "that either (a) your Python environment doesn't pin the "
+ + f"version of {_dependency_package}, so that updates to "
+ + f"{_package_label} can require the higher version, or "
+ + "(b) you manually update your Python environment to use at "
+ + f"least version {_next_supported_version} of "
+ + f"{_dependency_package}.",
+ FutureWarning,
+ )
+ except Exception:
+ warnings.warn(
+ "Could not determine the version of Python "
+ + "currently being used. To continue receiving "
+ + "updates for {_package_label}, ensure you are "
+ + "using a supported version of Python; see "
+ + "https://devguide.python.org/versions/"
+ )
+
__all__ = (
"BaseBigtableTableAdminAsyncClient",
"BigtableInstanceAdminAsyncClient",
diff --git a/google/cloud/bigtable_admin_v2/gapic_version.py b/google/cloud/bigtable_admin_v2/gapic_version.py
index 4800b0559..6d72a226d 100644
--- a/google/cloud/bigtable_admin_v2/gapic_version.py
+++ b/google/cloud/bigtable_admin_v2/gapic_version.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,4 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-__version__ = "2.34.0" # {x-release-please-version}
+__version__ = "2.35.0" # {x-release-please-version}
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py
index a1aee2370..632496543 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py
@@ -2565,19 +2565,19 @@ async def sample_get_iam_policy():
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
- documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
- :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
- documentation](\ https://cloud.google.com/iam/docs/).
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
@@ -2704,19 +2704,19 @@ async def sample_set_iam_policy():
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
- documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
- :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
- documentation](\ https://cloud.google.com/iam/docs/).
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py
index 84df01058..9d64108bb 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py
@@ -161,6 +161,34 @@ def _get_default_mtls_endpoint(api_endpoint):
_DEFAULT_ENDPOINT_TEMPLATE = "bigtableadmin.{UNIVERSE_DOMAIN}"
_DEFAULT_UNIVERSE = "googleapis.com"
+ @staticmethod
+ def _use_client_cert_effective():
+ """Returns whether client certificate should be used for mTLS if the
+ google-auth version supports should_use_client_cert automatic mTLS enablement.
+
+ Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var.
+
+ Returns:
+ bool: whether client certificate should be used for mTLS
+ Raises:
+ ValueError: (If using a version of google-auth without should_use_client_cert and
+ GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.)
+ """
+ # check if google-auth version supports should_use_client_cert for automatic mTLS enablement
+ if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER
+ return mtls.should_use_client_cert()
+ else: # pragma: NO COVER
+ # if unsupported, fallback to reading from env var
+ use_client_cert_str = os.getenv(
+ "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
+ ).lower()
+ if use_client_cert_str not in ("true", "false"):
+ raise ValueError(
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be"
+ " either `true` or `false`"
+ )
+ return use_client_cert_str == "true"
+
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
@@ -503,12 +531,8 @@ def get_mtls_endpoint_and_cert_source(
)
if client_options is None:
client_options = client_options_lib.ClientOptions()
- use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
+ use_client_cert = BigtableInstanceAdminClient._use_client_cert_effective()
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
- if use_client_cert not in ("true", "false"):
- raise ValueError(
- "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
@@ -516,7 +540,7 @@ def get_mtls_endpoint_and_cert_source(
# Figure out the client cert source to use.
client_cert_source = None
- if use_client_cert == "true":
+ if use_client_cert:
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
@@ -548,20 +572,14 @@ def _read_environment_variables():
google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT
is not any of ["auto", "never", "always"].
"""
- use_client_cert = os.getenv(
- "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
- ).lower()
+ use_client_cert = BigtableInstanceAdminClient._use_client_cert_effective()
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower()
universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN")
- if use_client_cert not in ("true", "false"):
- raise ValueError(
- "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
- return use_client_cert == "true", use_mtls_endpoint, universe_domain_env
+ return use_client_cert, use_mtls_endpoint, universe_domain_env
@staticmethod
def _get_client_cert_source(provided_cert_source, use_cert_flag):
@@ -3067,19 +3085,19 @@ def sample_get_iam_policy():
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
- documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
- :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
- documentation](\ https://cloud.google.com/iam/docs/).
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
@@ -3207,19 +3225,19 @@ def sample_set_iam_policy():
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
- documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
- :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
- documentation](\ https://cloud.google.com/iam/docs/).
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py
index f5ceeeb68..3a05dd663 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py
@@ -81,9 +81,10 @@ def __init__(
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is mutually exclusive with credentials.
+ This argument is mutually exclusive with credentials. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py
index a294144ef..d5d5cf1e5 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py
@@ -160,9 +160,10 @@ def __init__(
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if a ``channel`` instance is provided.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if a ``channel`` instance is provided.
+ This argument will be removed in the next major version of this library.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if a ``channel`` instance is provided.
channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]):
@@ -296,9 +297,10 @@ def create_channel(
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is mutually exclusive with credentials.
+ This argument is mutually exclusive with credentials. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py
index aae0f44c4..7ce762764 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py
@@ -157,8 +157,9 @@ def create_channel(
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
- be loaded with :func:`google.auth.load_credentials_from_file`.
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
@@ -209,9 +210,10 @@ def __init__(
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if a ``channel`` instance is provided.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if a ``channel`` instance is provided.
+ This argument will be removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py
index 12af0792b..9879c4c45 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py
@@ -1719,9 +1719,10 @@ def __init__(
are specified, the client will attempt to ascertain the
credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is ignored if ``channel`` is provided.
+ This argument is ignored if ``channel`` is provided. This argument will be
+ removed in the next major version of this library.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py
index d79d1b088..7f772c87c 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py
@@ -936,14 +936,14 @@ async def sample_update_table():
specifying which fields (e.g. ``change_stream_config``)
in the ``table`` field should be updated. This mask is
relative to the ``table`` field, not to the request
- message. The wildcard (*) path is currently not
+ message. The wildcard (\*) path is currently not
supported. Currently UpdateTable is only supported for
the following fields:
- - ``change_stream_config``
- - ``change_stream_config.retention_period``
- - ``deletion_protection``
- - ``row_key_schema``
+ - ``change_stream_config``
+ - ``change_stream_config.retention_period``
+ - ``deletion_protection``
+ - ``row_key_schema``
If ``column_families`` is set in ``update_mask``, it
will return an UNIMPLEMENTED error.
@@ -3044,7 +3044,7 @@ async def sample_create_backup():
full backup name, of the form:
``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``.
This string must be between 1 and 50 characters in
- length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*.
+ length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*.
This corresponds to the ``backup_id`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -3293,7 +3293,7 @@ async def sample_update_backup():
required. Other fields are ignored. Update is only
supported for the following fields:
- - ``backup.expire_time``.
+ - ``backup.expire_time``.
This corresponds to the ``backup`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -3784,7 +3784,7 @@ async def sample_copy_backup():
name, of the form:
``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``.
This string must be between 1 and 50 characters in
- length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*.
+ length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*.
This corresponds to the ``backup_id`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -3971,19 +3971,19 @@ async def sample_get_iam_policy():
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
- documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
- :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
- documentation](\ https://cloud.google.com/iam/docs/).
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
@@ -4110,19 +4110,19 @@ async def sample_set_iam_policy():
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
- documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
- :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
- documentation](\ https://cloud.google.com/iam/docs/).
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py
index d0030af92..ce251db7d 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py
@@ -162,6 +162,34 @@ def _get_default_mtls_endpoint(api_endpoint):
_DEFAULT_ENDPOINT_TEMPLATE = "bigtableadmin.{UNIVERSE_DOMAIN}"
_DEFAULT_UNIVERSE = "googleapis.com"
+ @staticmethod
+ def _use_client_cert_effective():
+ """Returns whether client certificate should be used for mTLS if the
+ google-auth version supports should_use_client_cert automatic mTLS enablement.
+
+ Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var.
+
+ Returns:
+ bool: whether client certificate should be used for mTLS
+ Raises:
+ ValueError: (If using a version of google-auth without should_use_client_cert and
+ GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.)
+ """
+ # check if google-auth version supports should_use_client_cert for automatic mTLS enablement
+ if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER
+ return mtls.should_use_client_cert()
+ else: # pragma: NO COVER
+ # if unsupported, fallback to reading from env var
+ use_client_cert_str = os.getenv(
+ "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
+ ).lower()
+ if use_client_cert_str not in ("true", "false"):
+ raise ValueError(
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be"
+ " either `true` or `false`"
+ )
+ return use_client_cert_str == "true"
+
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
@@ -510,12 +538,8 @@ def get_mtls_endpoint_and_cert_source(
)
if client_options is None:
client_options = client_options_lib.ClientOptions()
- use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
+ use_client_cert = BaseBigtableTableAdminClient._use_client_cert_effective()
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
- if use_client_cert not in ("true", "false"):
- raise ValueError(
- "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
@@ -523,7 +547,7 @@ def get_mtls_endpoint_and_cert_source(
# Figure out the client cert source to use.
client_cert_source = None
- if use_client_cert == "true":
+ if use_client_cert:
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
@@ -555,20 +579,14 @@ def _read_environment_variables():
google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT
is not any of ["auto", "never", "always"].
"""
- use_client_cert = os.getenv(
- "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
- ).lower()
+ use_client_cert = BaseBigtableTableAdminClient._use_client_cert_effective()
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower()
universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN")
- if use_client_cert not in ("true", "false"):
- raise ValueError(
- "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
- return use_client_cert == "true", use_mtls_endpoint, universe_domain_env
+ return use_client_cert, use_mtls_endpoint, universe_domain_env
@staticmethod
def _get_client_cert_source(provided_cert_source, use_cert_flag):
@@ -1486,14 +1504,14 @@ def sample_update_table():
specifying which fields (e.g. ``change_stream_config``)
in the ``table`` field should be updated. This mask is
relative to the ``table`` field, not to the request
- message. The wildcard (*) path is currently not
+ message. The wildcard (\*) path is currently not
supported. Currently UpdateTable is only supported for
the following fields:
- - ``change_stream_config``
- - ``change_stream_config.retention_period``
- - ``deletion_protection``
- - ``row_key_schema``
+ - ``change_stream_config``
+ - ``change_stream_config.retention_period``
+ - ``deletion_protection``
+ - ``row_key_schema``
If ``column_families`` is set in ``update_mask``, it
will return an UNIMPLEMENTED error.
@@ -3549,7 +3567,7 @@ def sample_create_backup():
full backup name, of the form:
``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``.
This string must be between 1 and 50 characters in
- length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*.
+ length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*.
This corresponds to the ``backup_id`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -3792,7 +3810,7 @@ def sample_update_backup():
required. Other fields are ignored. Update is only
supported for the following fields:
- - ``backup.expire_time``.
+ - ``backup.expire_time``.
This corresponds to the ``backup`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -4272,7 +4290,7 @@ def sample_copy_backup():
name, of the form:
``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``.
This string must be between 1 and 50 characters in
- length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*.
+ length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*.
This corresponds to the ``backup_id`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -4456,19 +4474,19 @@ def sample_get_iam_policy():
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
- documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
- :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
- documentation](\ https://cloud.google.com/iam/docs/).
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
@@ -4596,19 +4614,19 @@ def sample_set_iam_policy():
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
- documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
- :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
- documentation](\ https://cloud.google.com/iam/docs/).
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py
index 8e2cb7304..8ad08df3f 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py
@@ -81,9 +81,10 @@ def __init__(
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is mutually exclusive with credentials.
+ This argument is mutually exclusive with credentials. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py
index 5f46c3aa3..f8d1058c8 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py
@@ -162,9 +162,10 @@ def __init__(
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if a ``channel`` instance is provided.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if a ``channel`` instance is provided.
+ This argument will be removed in the next major version of this library.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if a ``channel`` instance is provided.
channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]):
@@ -298,9 +299,10 @@ def create_channel(
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is mutually exclusive with credentials.
+ This argument is mutually exclusive with credentials. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py
index 159a96eda..5017f17d0 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py
@@ -159,8 +159,9 @@ def create_channel(
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
- be loaded with :func:`google.auth.load_credentials_from_file`.
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
@@ -211,9 +212,10 @@ def __init__(
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if a ``channel`` instance is provided.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if a ``channel`` instance is provided.
+ This argument will be removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py
index ec2462d4a..6c3815f79 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py
@@ -1896,9 +1896,10 @@ def __init__(
are specified, the client will attempt to ascertain the
credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is ignored if ``channel`` is provided.
+ This argument is ignored if ``channel`` is provided. This argument will be
+ removed in the next major version of this library.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
diff --git a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py
index d6403fc2a..69de07a2a 100644
--- a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py
+++ b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py
@@ -235,20 +235,20 @@ class CreateTableRequest(proto.Message):
Example:
- - Row keys :=
- ``["a", "apple", "custom", "customer_1", "customer_2",``
- ``"other", "zz"]``
- - initial_split_keys :=
- ``["apple", "customer_1", "customer_2", "other"]``
- - Key assignment:
-
- - Tablet 1 ``[, apple) => {"a"}.``
- - Tablet 2
- ``[apple, customer_1) => {"apple", "custom"}.``
- - Tablet 3
- ``[customer_1, customer_2) => {"customer_1"}.``
- - Tablet 4 ``[customer_2, other) => {"customer_2"}.``
- - Tablet 5 ``[other, ) => {"other", "zz"}.``
+ - Row keys :=
+ ``["a", "apple", "custom", "customer_1", "customer_2",``
+ ``"other", "zz"]``
+ - initial_split_keys :=
+ ``["apple", "customer_1", "customer_2", "other"]``
+ - Key assignment:
+
+ - Tablet 1 ``[, apple) => {"a"}.``
+ - Tablet 2
+ ``[apple, customer_1) => {"apple", "custom"}.``
+ - Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.``
+ - Tablet 4 ``[customer_2, other) => {"customer_2"}.``
+ - Tablet 5
+ ``[other, ) => {"other", "zz"}.``
"""
class Split(proto.Message):
@@ -482,13 +482,13 @@ class UpdateTableRequest(proto.Message):
which fields (e.g. ``change_stream_config``) in the
``table`` field should be updated. This mask is relative to
the ``table`` field, not to the request message. The
- wildcard (*) path is currently not supported. Currently
+ wildcard (\*) path is currently not supported. Currently
UpdateTable is only supported for the following fields:
- - ``change_stream_config``
- - ``change_stream_config.retention_period``
- - ``deletion_protection``
- - ``row_key_schema``
+ - ``change_stream_config``
+ - ``change_stream_config.retention_period``
+ - ``deletion_protection``
+ - ``row_key_schema``
If ``column_families`` is set in ``update_mask``, it will
return an UNIMPLEMENTED error.
@@ -1099,7 +1099,7 @@ class CreateBackupRequest(proto.Message):
name, of the form:
``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``.
This string must be between 1 and 50 characters in length
- and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*.
+ and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*.
backup (google.cloud.bigtable_admin_v2.types.Backup):
Required. The backup to create.
"""
@@ -1167,7 +1167,7 @@ class UpdateBackupRequest(proto.Message):
required. Other fields are ignored. Update is only supported
for the following fields:
- - ``backup.expire_time``.
+ - ``backup.expire_time``.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. A mask specifying which fields (e.g.
``expire_time``) in the Backup resource should be updated.
@@ -1246,16 +1246,16 @@ class ListBackupsRequest(proto.Message):
The fields eligible for filtering are:
- - ``name``
- - ``source_table``
- - ``state``
- - ``start_time`` (and values are of the format
- YYYY-MM-DDTHH:MM:SSZ)
- - ``end_time`` (and values are of the format
- YYYY-MM-DDTHH:MM:SSZ)
- - ``expire_time`` (and values are of the format
- YYYY-MM-DDTHH:MM:SSZ)
- - ``size_bytes``
+ - ``name``
+ - ``source_table``
+ - ``state``
+ - ``start_time`` (and values are of the format
+ YYYY-MM-DDTHH:MM:SSZ)
+ - ``end_time`` (and values are of the format
+ YYYY-MM-DDTHH:MM:SSZ)
+ - ``expire_time`` (and values are of the format
+ YYYY-MM-DDTHH:MM:SSZ)
+ - ``size_bytes``
To filter on multiple expressions, provide each separate
expression within parentheses. By default, each expression
@@ -1264,20 +1264,20 @@ class ListBackupsRequest(proto.Message):
Some examples of using filters are:
- - ``name:"exact"`` --> The backup's name is the string
- "exact".
- - ``name:howl`` --> The backup's name contains the string
- "howl".
- - ``source_table:prod`` --> The source_table's name
- contains the string "prod".
- - ``state:CREATING`` --> The backup is pending creation.
- - ``state:READY`` --> The backup is fully created and ready
- for use.
- - ``(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")``
- --> The backup name contains the string "howl" and
- start_time of the backup is before 2018-03-28T14:50:00Z.
- - ``size_bytes > 10000000000`` --> The backup's size is
- greater than 10GB
+ - ``name:"exact"`` --> The backup's name is the string
+ "exact".
+ - ``name:howl`` --> The backup's name contains the string
+ "howl".
+ - ``source_table:prod`` --> The source_table's name contains
+ the string "prod".
+ - ``state:CREATING`` --> The backup is pending creation.
+ - ``state:READY`` --> The backup is fully created and ready
+ for use.
+ - ``(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")``
+ --> The backup name contains the string "howl" and
+ start_time of the backup is before 2018-03-28T14:50:00Z.
+ - ``size_bytes > 10000000000`` --> The backup's size is
+ greater than 10GB
order_by (str):
An expression for specifying the sort order of the results
of the request. The string value should specify one or more
@@ -1286,13 +1286,13 @@ class ListBackupsRequest(proto.Message):
Fields supported are:
- - name
- - source_table
- - expire_time
- - start_time
- - end_time
- - size_bytes
- - state
+ - name
+ - source_table
+ - expire_time
+ - start_time
+ - end_time
+ - size_bytes
+ - state
For example, "start_time". The default sorting order is
ascending. To specify descending order for the field, a
@@ -1381,7 +1381,7 @@ class CopyBackupRequest(proto.Message):
to create the full backup name, of the form:
``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``.
This string must be between 1 and 50 characters in length
- and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*.
+ and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*.
source_backup (str):
Required. The source backup to be copied from. The source
backup needs to be in READY state for it to be copied.
diff --git a/google/cloud/bigtable_admin_v2/types/instance.py b/google/cloud/bigtable_admin_v2/types/instance.py
index 865487f0d..f07414d56 100644
--- a/google/cloud/bigtable_admin_v2/types/instance.py
+++ b/google/cloud/bigtable_admin_v2/types/instance.py
@@ -67,15 +67,15 @@ class Instance(proto.Message):
customer's organizational needs and deployment strategies.
They can be used to filter resources and aggregate metrics.
- - Label keys must be between 1 and 63 characters long and
- must conform to the regular expression:
- ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``.
- - Label values must be between 0 and 63 characters long and
- must conform to the regular expression:
- ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``.
- - No more than 64 labels can be associated with a given
- resource.
- - Keys and values must both be under 128 bytes.
+ - Label keys must be between 1 and 63 characters long and
+ must conform to the regular expression:
+ ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``.
+ - Label values must be between 0 and 63 characters long and
+ must conform to the regular expression:
+ ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``.
+ - No more than 64 labels can be associated with a given
+ resource.
+ - Keys and values must both be under 128 bytes.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. A commit timestamp representing when this
Instance was created. For instances created before this
diff --git a/google/cloud/bigtable_admin_v2/types/table.py b/google/cloud/bigtable_admin_v2/types/table.py
index c15eac799..f6d1fe729 100644
--- a/google/cloud/bigtable_admin_v2/types/table.py
+++ b/google/cloud/bigtable_admin_v2/types/table.py
@@ -154,9 +154,9 @@ class Table(proto.Message):
i.e. deleting the following resources through Admin APIs are
prohibited:
- - The table.
- - The column families in the table.
- - The instance containing the table.
+ - The table.
+ - The column families in the table.
+ - The instance containing the table.
Note one can still delete the data stored in the table
through Data APIs.
@@ -181,37 +181,22 @@ class Table(proto.Message):
they encounter an invalid row key.
For example, if \_key =
- "some_id#2024-04-30#\x00\x13\x00\xf3" with the following
- schema:
-
- .. code-block::
-
- {
- fields {
- field_name: "id"
- type { string { encoding: utf8_bytes {} } }
- }
- fields {
- field_name: "date"
- type { string { encoding: utf8_bytes {} } }
- }
- fields {
- field_name: "product_code"
- type { int64 { encoding: big_endian_bytes {} } }
- }
- encoding { delimited_bytes { delimiter: "#" } }
- }
-
- The decoded key parts would be:
- id = "some_id", date = "2024-04-30", product_code = 1245427
- The query "SELECT \_key, product_code FROM table" will return
- two columns:
-
- +========================================+==============+
- | \_key | product_code |
- +========================================+==============+
- | "some_id#2024-04-30#\x00\x13\x00\xf3" | 1245427 |
- +----------------------------------------+--------------+
+ "some_id#2024-04-30#\\x00\\x13\\x00\\xf3" with the following
+ schema: { fields { field_name: "id" type { string {
+ encoding: utf8_bytes {} } } } fields { field_name: "date"
+ type { string { encoding: utf8_bytes {} } } } fields {
+ field_name: "product_code" type { int64 { encoding:
+ big_endian_bytes {} } } } encoding { delimited_bytes {
+ delimiter: "#" } } }
+
+ | The decoded key parts would be: id = "some_id", date =
+ "2024-04-30", product_code = 1245427 The query "SELECT
+ \_key, product_code FROM table" will return two columns:
+ /------------------------------------------------------
+ | \| \_key \| product_code \| \|
+ --------------------------------------\|--------------\|
+ \| "some_id#2024-04-30#\\x00\\x13\\x00\\xf3" \| 1245427 \|
+ ------------------------------------------------------/
The schema has the following invariants: (1) The decoded
field values are order-preserved. For read, the field values
@@ -221,19 +206,19 @@ class Table(proto.Message):
type is limited to scalar types only: Array, Map, Aggregate,
and Struct are not allowed. (4) The field names must not
collide with existing column family names and reserved
- keywords "_key" and "_timestamp".
+ keywords "\_key" and "\_timestamp".
The following update operations are allowed for
row_key_schema:
- - Update from an empty schema to a new schema.
- - Remove the existing schema. This operation requires
- setting the ``ignore_warnings`` flag to ``true``, since
- it might be a backward incompatible change. Without the
- flag, the update request will fail with an
- INVALID_ARGUMENT error. Any other row key schema update
- operation (e.g. update existing schema columns names or
- types) is currently unsupported.
+ - Update from an empty schema to a new schema.
+ - Remove the existing schema. This operation requires
+ setting the ``ignore_warnings`` flag to ``true``, since it
+ might be a backward incompatible change. Without the flag,
+ the update request will fail with an INVALID_ARGUMENT
+ error. Any other row key schema update operation (e.g.
+ update existing schema columns names or types) is
+ currently unsupported.
"""
class TimestampGranularity(proto.Enum):
@@ -572,7 +557,7 @@ class ColumnFamily(proto.Message):
If ``value_type`` is ``Aggregate``, written data must be
compatible with:
- - ``value_type.input_type`` for ``AddInput`` mutations
+ - ``value_type.input_type`` for ``AddInput`` mutations
"""
gc_rule: "GcRule" = proto.Field(
@@ -864,8 +849,8 @@ class Backup(proto.Message):
backup or updating its ``expire_time``, the value must be
greater than the backup creation time by:
- - At least 6 hours
- - At most 90 days
+ - At least 6 hours
+ - At most 90 days
Once the ``expire_time`` has passed, Cloud Bigtable will
delete the backup.
@@ -895,7 +880,7 @@ class Backup(proto.Message):
standard backup. This value must be greater than the backup
creation time by:
- - At least 24 hours
+ - At least 24 hours
This field only applies for hot backups. When creating or
updating a standard backup, attempting to set this field
diff --git a/google/cloud/bigtable_admin_v2/types/types.py b/google/cloud/bigtable_admin_v2/types/types.py
index b6ea5341d..4f56429da 100644
--- a/google/cloud/bigtable_admin_v2/types/types.py
+++ b/google/cloud/bigtable_admin_v2/types/types.py
@@ -40,15 +40,15 @@ class Type(proto.Message):
Each encoding can operate in one of two modes:
- - Sorted: In this mode, Bigtable guarantees that
- ``Encode(X) <= Encode(Y)`` if and only if ``X <= Y``. This is
- useful anywhere sort order is important, for example when
- encoding keys.
- - Distinct: In this mode, Bigtable guarantees that if ``X != Y``
- then ``Encode(X) != Encode(Y)``. However, the converse is not
- guaranteed. For example, both "{'foo': '1', 'bar': '2'}" and
- "{'bar': '2', 'foo': '1'}" are valid encodings of the same JSON
- value.
+ - Sorted: In this mode, Bigtable guarantees that
+ ``Encode(X) <= Encode(Y)`` if and only if ``X <= Y``. This is
+ useful anywhere sort order is important, for example when encoding
+ keys.
+ - Distinct: In this mode, Bigtable guarantees that if ``X != Y``
+ then ``Encode(X) != Encode(Y)``. However, the converse is not
+ guaranteed. For example, both "{'foo': '1', 'bar': '2'}" and
+ "{'bar': '2', 'foo': '1'}" are valid encodings of the same JSON
+ value.
The API clearly documents which mode is used wherever an encoding
can be configured. Each encoding also documents which values are
@@ -205,16 +205,16 @@ class Utf8Bytes(proto.Message):
Sorted mode:
- - All values are supported.
- - Code point order is preserved.
+ - All values are supported.
+ - Code point order is preserved.
Distinct mode: all values are supported.
Compatible with:
- - BigQuery ``TEXT`` encoding
- - HBase ``Bytes.toBytes``
- - Java ``String#getBytes(StandardCharsets.UTF_8)``
+ - BigQuery ``TEXT`` encoding
+ - HBase ``Bytes.toBytes``
+ - Java ``String#getBytes(StandardCharsets.UTF_8)``
"""
@@ -276,9 +276,9 @@ class BigEndianBytes(proto.Message):
Compatible with:
- - BigQuery ``BINARY`` encoding
- - HBase ``Bytes.toBytes``
- - Java ``ByteBuffer.putLong()`` with ``ByteOrder.BIG_ENDIAN``
+ - BigQuery ``BINARY`` encoding
+ - HBase ``Bytes.toBytes``
+ - Java ``ByteBuffer.putLong()`` with ``ByteOrder.BIG_ENDIAN``
Attributes:
bytes_type (google.cloud.bigtable_admin_v2.types.Type.Bytes):
@@ -358,7 +358,7 @@ class Encoding(proto.Message):
Compatible with:
- - Java ``Instant.truncatedTo()`` with ``ChronoUnit.MICROS``
+ - Java ``Instant.truncatedTo()`` with ``ChronoUnit.MICROS``
This field is a member of `oneof`_ ``encoding``.
"""
@@ -455,17 +455,17 @@ class DelimitedBytes(proto.Message):
Sorted mode:
- - Fields are encoded in sorted mode.
- - Encoded field values must not contain any bytes <=
- ``delimiter[0]``
- - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or
- if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort
- first.
+ - Fields are encoded in sorted mode.
+ - Encoded field values must not contain any bytes <=
+ ``delimiter[0]``
+ - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or
+ if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort
+ first.
Distinct mode:
- - Fields are encoded in distinct mode.
- - Encoded field values must not contain ``delimiter[0]``.
+ - Fields are encoded in distinct mode.
+ - Encoded field values must not contain ``delimiter[0]``.
Attributes:
delimiter (bytes):
@@ -488,24 +488,23 @@ class OrderedCodeBytes(proto.Message):
Fields that encode to the empty string "" have special handling:
- - If *every* field encodes to "", or if the STRUCT has no fields
- defined, then the STRUCT is encoded as the fixed byte pair {0x00,
- 0x00}.
- - Otherwise, the STRUCT only encodes until the last non-empty
- field, omitting any trailing empty fields. Any empty fields that
- aren't omitted are replaced with the fixed byte pair {0x00,
- 0x00}.
+ - If *every* field encodes to "", or if the STRUCT has no fields
+ defined, then the STRUCT is encoded as the fixed byte pair {0x00,
+ 0x00}.
+ - Otherwise, the STRUCT only encodes until the last non-empty field,
+ omitting any trailing empty fields. Any empty fields that aren't
+ omitted are replaced with the fixed byte pair {0x00, 0x00}.
Examples:
- - STRUCT() -> "\00\00"
- - STRUCT("") -> "\00\00"
- - STRUCT("", "") -> "\00\00"
- - STRUCT("", "B") -> "\00\00" + "\00\01" + "B"
- - STRUCT("A", "") -> "A"
- - STRUCT("", "B", "") -> "\00\00" + "\00\01" + "B"
- - STRUCT("A", "", "C") -> "A" + "\00\01" + "\00\00" + "\00\01" +
- "C"
+ - STRUCT() -> "\\00\\00"
+ - STRUCT("") -> "\\00\\00"
+ - STRUCT("", "") -> "\\00\\00"
+ - STRUCT("", "B") -> "\\00\\00" + "\\00\\01" + "B"
+ - STRUCT("A", "") -> "A"
+ - STRUCT("", "B", "") -> "\\00\\00" + "\\00\\01" + "B"
+ - STRUCT("A", "", "C") -> "A" + "\\00\\01" + "\\00\\00" + "\\00\\01"
+ + "C"
Since null bytes are always escaped, this encoding can cause size
blowup for encodings like ``Int64.BigEndianBytes`` that are likely
@@ -513,16 +512,16 @@ class OrderedCodeBytes(proto.Message):
Sorted mode:
- - Fields are encoded in sorted mode.
- - All values supported by the field encodings are allowed
- - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or
- if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort
- first.
+ - Fields are encoded in sorted mode.
+ - All values supported by the field encodings are allowed
+ - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or
+ if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort
+ first.
Distinct mode:
- - Fields are encoded in distinct mode.
- - All values supported by the field encodings are allowed.
+ - Fields are encoded in distinct mode.
+ - All values supported by the field encodings are allowed.
"""
diff --git a/google/cloud/bigtable_v2/__init__.py b/google/cloud/bigtable_v2/__init__.py
index 3a5a72c9c..ec552a85d 100644
--- a/google/cloud/bigtable_v2/__init__.py
+++ b/google/cloud/bigtable_v2/__init__.py
@@ -15,8 +15,18 @@
#
from google.cloud.bigtable_v2 import gapic_version as package_version
+import google.api_core as api_core
+import sys
+
__version__ = package_version.__version__
+if sys.version_info >= (3, 8): # pragma: NO COVER
+ from importlib import metadata
+else: # pragma: NO COVER
+ # TODO(https://github.com/googleapis/python-api-core/issues/835): Remove
+ # this code path once we drop support for Python 3.7
+ import importlib_metadata as metadata
+
from .services.bigtable import BigtableClient
from .services.bigtable import BigtableAsyncClient
@@ -70,6 +80,7 @@
from .types.data import Value
from .types.data import ValueRange
from .types.feature_flags import FeatureFlags
+from .types.peer_info import PeerInfo
from .types.request_stats import FullReadStatsView
from .types.request_stats import ReadIterationStats
from .types.request_stats import RequestLatencyStats
@@ -77,6 +88,100 @@
from .types.response_params import ResponseParams
from .types.types import Type
+if hasattr(api_core, "check_python_version") and hasattr(
+ api_core, "check_dependency_versions"
+): # pragma: NO COVER
+ api_core.check_python_version("google.cloud.bigtable_v2") # type: ignore
+ api_core.check_dependency_versions("google.cloud.bigtable_v2") # type: ignore
+else: # pragma: NO COVER
+ # An older version of api_core is installed which does not define the
+ # functions above. We do equivalent checks manually.
+ try:
+ import warnings
+ import sys
+
+ _py_version_str = sys.version.split()[0]
+ _package_label = "google.cloud.bigtable_v2"
+ if sys.version_info < (3, 9):
+ warnings.warn(
+ "You are using a non-supported Python version "
+ + f"({_py_version_str}). Google will not post any further "
+ + f"updates to {_package_label} supporting this Python version. "
+ + "Please upgrade to the latest Python version, or at "
+ + f"least to Python 3.9, and then update {_package_label}.",
+ FutureWarning,
+ )
+ if sys.version_info[:2] == (3, 9):
+ warnings.warn(
+ f"You are using a Python version ({_py_version_str}) "
+ + f"which Google will stop supporting in {_package_label} in "
+ + "January 2026. Please "
+ + "upgrade to the latest Python version, or at "
+ + "least to Python 3.10, before then, and "
+ + f"then update {_package_label}.",
+ FutureWarning,
+ )
+
+ def parse_version_to_tuple(version_string: str):
+ """Safely converts a semantic version string to a comparable tuple of integers.
+ Example: "4.25.8" -> (4, 25, 8)
+ Ignores non-numeric parts and handles common version formats.
+ Args:
+ version_string: Version string in the format "x.y.z" or "x.y.z"
+ Returns:
+ Tuple of integers for the parsed version string.
+ """
+ parts = []
+ for part in version_string.split("."):
+ try:
+ parts.append(int(part))
+ except ValueError:
+ # If it's a non-numeric part (e.g., '1.0.0b1' -> 'b1'), stop here.
+ # This is a simplification compared to 'packaging.parse_version', but sufficient
+ # for comparing strictly numeric semantic versions.
+ break
+ return tuple(parts)
+
+ def _get_version(dependency_name):
+ try:
+ version_string: str = metadata.version(dependency_name)
+ parsed_version = parse_version_to_tuple(version_string)
+ return (parsed_version, version_string)
+ except Exception:
+ # Catch exceptions from metadata.version() (e.g., PackageNotFoundError)
+ # or errors during parse_version_to_tuple
+ return (None, "--")
+
+ _dependency_package = "google.protobuf"
+ _next_supported_version = "4.25.8"
+ _next_supported_version_tuple = (4, 25, 8)
+ _recommendation = " (we recommend 6.x)"
+ (_version_used, _version_used_string) = _get_version(_dependency_package)
+ if _version_used and _version_used < _next_supported_version_tuple:
+ warnings.warn(
+ f"Package {_package_label} depends on "
+ + f"{_dependency_package}, currently installed at version "
+ + f"{_version_used_string}. Future updates to "
+ + f"{_package_label} will require {_dependency_package} at "
+ + f"version {_next_supported_version} or higher{_recommendation}."
+ + " Please ensure "
+ + "that either (a) your Python environment doesn't pin the "
+ + f"version of {_dependency_package}, so that updates to "
+ + f"{_package_label} can require the higher version, or "
+ + "(b) you manually update your Python environment to use at "
+ + f"least version {_next_supported_version} of "
+ + f"{_dependency_package}.",
+ FutureWarning,
+ )
+ except Exception:
+ warnings.warn(
+ "Could not determine the version of Python "
+ + "currently being used. To continue receiving "
+ + "updates for {_package_label}, ensure you are "
+ + "using a supported version of Python; see "
+ + "https://devguide.python.org/versions/"
+ )
+
__all__ = (
"BigtableAsyncClient",
"ArrayValue",
@@ -101,6 +206,7 @@
"MutateRowsResponse",
"Mutation",
"PartialResultSet",
+ "PeerInfo",
"PingAndWarmRequest",
"PingAndWarmResponse",
"PrepareQueryRequest",
diff --git a/google/cloud/bigtable_v2/gapic_version.py b/google/cloud/bigtable_v2/gapic_version.py
index 4800b0559..6d72a226d 100644
--- a/google/cloud/bigtable_v2/gapic_version.py
+++ b/google/cloud/bigtable_v2/gapic_version.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,4 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-__version__ = "2.34.0" # {x-release-please-version}
+__version__ = "2.35.0" # {x-release-please-version}
diff --git a/google/cloud/bigtable_v2/services/bigtable/async_client.py b/google/cloud/bigtable_v2/services/bigtable/async_client.py
index 103ff141c..0a9442287 100644
--- a/google/cloud/bigtable_v2/services/bigtable/async_client.py
+++ b/google/cloud/bigtable_v2/services/bigtable/async_client.py
@@ -397,6 +397,13 @@ def read_rows(
if regex_match and regex_match.group("table_name"):
header_params["table_name"] = regex_match.group("table_name")
+ routing_param_regex = re.compile(
+ "^(?Pprojects/[^/]+/instances/[^/]+)(?:/.*)?$"
+ )
+ regex_match = routing_param_regex.match(request.materialized_view_name)
+ if regex_match and regex_match.group("name"):
+ header_params["name"] = regex_match.group("name")
+
if header_params:
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(header_params),
@@ -519,6 +526,13 @@ def sample_row_keys(
if regex_match and regex_match.group("table_name"):
header_params["table_name"] = regex_match.group("table_name")
+ routing_param_regex = re.compile(
+ "^(?Pprojects/[^/]+/instances/[^/]+)(?:/.*)?$"
+ )
+ regex_match = routing_param_regex.match(request.materialized_view_name)
+ if regex_match and regex_match.group("name"):
+ header_params["name"] = regex_match.group("name")
+
if header_params:
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(header_params),
diff --git a/google/cloud/bigtable_v2/services/bigtable/client.py b/google/cloud/bigtable_v2/services/bigtable/client.py
index ffc448c25..5eb6ba894 100644
--- a/google/cloud/bigtable_v2/services/bigtable/client.py
+++ b/google/cloud/bigtable_v2/services/bigtable/client.py
@@ -151,6 +151,34 @@ def _get_default_mtls_endpoint(api_endpoint):
_DEFAULT_ENDPOINT_TEMPLATE = "bigtable.{UNIVERSE_DOMAIN}"
_DEFAULT_UNIVERSE = "googleapis.com"
+ @staticmethod
+ def _use_client_cert_effective():
+ """Returns whether client certificate should be used for mTLS if the
+ google-auth version supports should_use_client_cert automatic mTLS enablement.
+
+ Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var.
+
+ Returns:
+ bool: whether client certificate should be used for mTLS
+ Raises:
+ ValueError: (If using a version of google-auth without should_use_client_cert and
+ GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.)
+ """
+ # check if google-auth version supports should_use_client_cert for automatic mTLS enablement
+ if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER
+ return mtls.should_use_client_cert()
+ else: # pragma: NO COVER
+ # if unsupported, fallback to reading from env var
+ use_client_cert_str = os.getenv(
+ "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
+ ).lower()
+ if use_client_cert_str not in ("true", "false"):
+ raise ValueError(
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be"
+ " either `true` or `false`"
+ )
+ return use_client_cert_str == "true"
+
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
@@ -401,12 +429,8 @@ def get_mtls_endpoint_and_cert_source(
)
if client_options is None:
client_options = client_options_lib.ClientOptions()
- use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
+ use_client_cert = BigtableClient._use_client_cert_effective()
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
- if use_client_cert not in ("true", "false"):
- raise ValueError(
- "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
@@ -414,7 +438,7 @@ def get_mtls_endpoint_and_cert_source(
# Figure out the client cert source to use.
client_cert_source = None
- if use_client_cert == "true":
+ if use_client_cert:
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
@@ -446,20 +470,14 @@ def _read_environment_variables():
google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT
is not any of ["auto", "never", "always"].
"""
- use_client_cert = os.getenv(
- "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
- ).lower()
+ use_client_cert = BigtableClient._use_client_cert_effective()
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower()
universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN")
- if use_client_cert not in ("true", "false"):
- raise ValueError(
- "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
- return use_client_cert == "true", use_mtls_endpoint, universe_domain_env
+ return use_client_cert, use_mtls_endpoint, universe_domain_env
@staticmethod
def _get_client_cert_source(provided_cert_source, use_cert_flag):
@@ -873,6 +891,13 @@ def read_rows(
if regex_match and regex_match.group("table_name"):
header_params["table_name"] = regex_match.group("table_name")
+ routing_param_regex = re.compile(
+ "^(?Pprojects/[^/]+/instances/[^/]+)(?:/.*)?$"
+ )
+ regex_match = routing_param_regex.match(request.materialized_view_name)
+ if regex_match and regex_match.group("name"):
+ header_params["name"] = regex_match.group("name")
+
if header_params:
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(header_params),
@@ -992,6 +1017,13 @@ def sample_row_keys(
if regex_match and regex_match.group("table_name"):
header_params["table_name"] = regex_match.group("table_name")
+ routing_param_regex = re.compile(
+ "^(?Pprojects/[^/]+/instances/[^/]+)(?:/.*)?$"
+ )
+ regex_match = routing_param_regex.match(request.materialized_view_name)
+ if regex_match and regex_match.group("name"):
+ header_params["name"] = regex_match.group("name")
+
if header_params:
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(header_params),
diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/google/cloud/bigtable_v2/services/bigtable/transports/base.py
index 4d25d8b30..f08bca73e 100644
--- a/google/cloud/bigtable_v2/services/bigtable/transports/base.py
+++ b/google/cloud/bigtable_v2/services/bigtable/transports/base.py
@@ -74,9 +74,10 @@ def __init__(
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is mutually exclusive with credentials.
+ This argument is mutually exclusive with credentials. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py
index 309e72662..8ddbf15a2 100644
--- a/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py
+++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py
@@ -152,9 +152,10 @@ def __init__(
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if a ``channel`` instance is provided.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if a ``channel`` instance is provided.
+ This argument will be removed in the next major version of this library.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if a ``channel`` instance is provided.
channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]):
@@ -287,9 +288,10 @@ def create_channel(
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is mutually exclusive with credentials.
+ This argument is mutually exclusive with credentials. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py
index 49f981d9a..3e6b70832 100644
--- a/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py
+++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py
@@ -149,8 +149,9 @@ def create_channel(
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
- be loaded with :func:`google.auth.load_credentials_from_file`.
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
@@ -201,9 +202,10 @@ def __init__(
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if a ``channel`` instance is provided.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if a ``channel`` instance is provided.
+ This argument will be removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
@@ -290,6 +292,7 @@ def __init__(
always_use_jwt_access=always_use_jwt_access,
api_audience=api_audience,
)
+
if not self._grpc_channel:
# initialize with the provided callable or the default channel
channel_init = channel or type(self).create_channel
diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py
index c84ef147f..f0a761a36 100644
--- a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py
+++ b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py
@@ -750,9 +750,10 @@ def __init__(
are specified, the client will attempt to ascertain the
credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is ignored if ``channel`` is provided.
+ This argument is ignored if ``channel`` is provided. This argument will be
+ removed in the next major version of this library.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
@@ -1080,6 +1081,22 @@ def __call__(
resp, _ = self._interceptor.post_execute_query_with_metadata(
resp, response_metadata
)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ http_response = {
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.bigtable_v2.BigtableClient.execute_query",
+ extra={
+ "serviceName": "google.bigtable.v2.Bigtable",
+ "rpcName": "ExecuteQuery",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
return resp
class _GenerateInitialChangeStreamPartitions(
@@ -1228,6 +1245,22 @@ def __call__(
) = self._interceptor.post_generate_initial_change_stream_partitions_with_metadata(
resp, response_metadata
)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ http_response = {
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.bigtable_v2.BigtableClient.generate_initial_change_stream_partitions",
+ extra={
+ "serviceName": "google.bigtable.v2.Bigtable",
+ "rpcName": "GenerateInitialChangeStreamPartitions",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
return resp
class _MutateRow(_BaseBigtableRestTransport._BaseMutateRow, BigtableRestStub):
@@ -1515,6 +1548,22 @@ def __call__(
resp, _ = self._interceptor.post_mutate_rows_with_metadata(
resp, response_metadata
)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ http_response = {
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.bigtable_v2.BigtableClient.mutate_rows",
+ extra={
+ "serviceName": "google.bigtable.v2.Bigtable",
+ "rpcName": "MutateRows",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
return resp
class _PingAndWarm(_BaseBigtableRestTransport._BasePingAndWarm, BigtableRestStub):
@@ -1966,6 +2015,22 @@ def __call__(
resp, _ = self._interceptor.post_read_change_stream_with_metadata(
resp, response_metadata
)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ http_response = {
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.bigtable_v2.BigtableClient.read_change_stream",
+ extra={
+ "serviceName": "google.bigtable.v2.Bigtable",
+ "rpcName": "ReadChangeStream",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
return resp
class _ReadModifyWriteRow(
@@ -2253,6 +2318,22 @@ def __call__(
resp, _ = self._interceptor.post_read_rows_with_metadata(
resp, response_metadata
)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ http_response = {
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.bigtable_v2.BigtableClient.read_rows",
+ extra={
+ "serviceName": "google.bigtable.v2.Bigtable",
+ "rpcName": "ReadRows",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
return resp
class _SampleRowKeys(
@@ -2383,6 +2464,22 @@ def __call__(
resp, _ = self._interceptor.post_sample_row_keys_with_metadata(
resp, response_metadata
)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ http_response = {
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.bigtable_v2.BigtableClient.sample_row_keys",
+ extra={
+ "serviceName": "google.bigtable.v2.Bigtable",
+ "rpcName": "SampleRowKeys",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
return resp
@property
diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py b/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py
index b2080f4a4..5eab0ded4 100644
--- a/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py
+++ b/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py
@@ -641,6 +641,11 @@ def _get_http_options():
"uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readRows",
"body": "*",
},
+ {
+ "method": "post",
+ "uri": "/v2/{materialized_view_name=projects/*/instances/*/materializedViews/*}:readRows",
+ "body": "*",
+ },
]
return http_options
@@ -686,6 +691,10 @@ def _get_http_options():
"method": "get",
"uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:sampleRowKeys",
},
+ {
+ "method": "get",
+ "uri": "/v2/{materialized_view_name=projects/*/instances/*/materializedViews/*}:sampleRowKeys",
+ },
]
return http_options
diff --git a/google/cloud/bigtable_v2/types/__init__.py b/google/cloud/bigtable_v2/types/__init__.py
index bd3c36154..b13c076a2 100644
--- a/google/cloud/bigtable_v2/types/__init__.py
+++ b/google/cloud/bigtable_v2/types/__init__.py
@@ -68,6 +68,9 @@
from .feature_flags import (
FeatureFlags,
)
+from .peer_info import (
+ PeerInfo,
+)
from .request_stats import (
FullReadStatsView,
ReadIterationStats,
@@ -131,6 +134,7 @@
"Value",
"ValueRange",
"FeatureFlags",
+ "PeerInfo",
"FullReadStatsView",
"ReadIterationStats",
"RequestLatencyStats",
diff --git a/google/cloud/bigtable_v2/types/bigtable.py b/google/cloud/bigtable_v2/types/bigtable.py
index 0e7ac1df3..19abba67b 100644
--- a/google/cloud/bigtable_v2/types/bigtable.py
+++ b/google/cloud/bigtable_v2/types/bigtable.py
@@ -1330,10 +1330,10 @@ class ExecuteQueryRequest(proto.Message):
Setting this field also places restrictions on several other
fields:
- - ``data_format`` must be empty.
- - ``validate_only`` must be false.
- - ``params`` must match the ``param_types`` set in the
- ``PrepareQueryRequest``.
+ - ``data_format`` must be empty.
+ - ``validate_only`` must be false.
+ - ``params`` must match the ``param_types`` set in the
+ ``PrepareQueryRequest``.
proto_format (google.cloud.bigtable_v2.types.ProtoFormat):
Protocol buffer format as described by
ProtoSchema and ProtoRows messages.
diff --git a/google/cloud/bigtable_v2/types/data.py b/google/cloud/bigtable_v2/types/data.py
index ad7e382f7..12ac8b2b1 100644
--- a/google/cloud/bigtable_v2/types/data.py
+++ b/google/cloud/bigtable_v2/types/data.py
@@ -573,26 +573,26 @@ class RowFilter(proto.Message):
transformers), as well as two ways to compose simple filters into
more complex ones (chains and interleaves). They work as follows:
- - True filters alter the input row by excluding some of its cells
- wholesale from the output row. An example of a true filter is the
- ``value_regex_filter``, which excludes cells whose values don't
- match the specified pattern. All regex true filters use RE2
- syntax (https://github.com/google/re2/wiki/Syntax) in raw byte
- mode (RE2::Latin1), and are evaluated as full matches. An
- important point to keep in mind is that ``RE2(.)`` is equivalent
- by default to ``RE2([^\n])``, meaning that it does not match
- newlines. When attempting to match an arbitrary byte, you should
- therefore use the escape sequence ``\C``, which may need to be
- further escaped as ``\\C`` in your client language.
-
- - Transformers alter the input row by changing the values of some
- of its cells in the output, without excluding them completely.
- Currently, the only supported transformer is the
- ``strip_value_transformer``, which replaces every cell's value
- with the empty string.
-
- - Chains and interleaves are described in more detail in the
- RowFilter.Chain and RowFilter.Interleave documentation.
+ - True filters alter the input row by excluding some of its cells
+ wholesale from the output row. An example of a true filter is the
+ ``value_regex_filter``, which excludes cells whose values don't
+ match the specified pattern. All regex true filters use RE2 syntax
+ (https://github.com/google/re2/wiki/Syntax) in raw byte mode
+ (RE2::Latin1), and are evaluated as full matches. An important
+ point to keep in mind is that ``RE2(.)`` is equivalent by default
+ to ``RE2([^\n])``, meaning that it does not match newlines. When
+ attempting to match an arbitrary byte, you should therefore use
+ the escape sequence ``\C``, which may need to be further escaped
+ as ``\\C`` in your client language.
+
+ - Transformers alter the input row by changing the values of some of
+ its cells in the output, without excluding them completely.
+ Currently, the only supported transformer is the
+ ``strip_value_transformer``, which replaces every cell's value
+ with the empty string.
+
+ - Chains and interleaves are described in more detail in the
+ RowFilter.Chain and RowFilter.Interleave documentation.
The total serialized size of a RowFilter message must not exceed
20480 bytes, and RowFilters may not be nested within each other (in
@@ -1493,21 +1493,20 @@ class PartialResultSet(proto.Message):
Having:
- - queue of row results waiting to be returned ``queue``
- - extensible buffer of bytes ``buffer``
- - a place to keep track of the most recent ``resume_token`` for
- each PartialResultSet ``p`` received { if p.reset { ensure
- ``queue`` is empty ensure ``buffer`` is empty } if
- p.estimated_batch_size != 0 { (optional) ensure ``buffer`` is
- sized to at least ``p.estimated_batch_size`` } if
- ``p.proto_rows_batch`` is set { append
- ``p.proto_rows_batch.bytes`` to ``buffer`` } if p.batch_checksum
- is set and ``buffer`` is not empty { validate the checksum
- matches the contents of ``buffer`` (see comments on
- ``batch_checksum``) parse ``buffer`` as ``ProtoRows`` message,
- clearing ``buffer`` add parsed rows to end of ``queue`` } if
- p.resume_token is set { release results in ``queue`` save
- ``p.resume_token`` in ``resume_token`` } }
+ - queue of row results waiting to be returned ``queue``
+ - extensible buffer of bytes ``buffer``
+ - a place to keep track of the most recent ``resume_token`` for each
+ PartialResultSet ``p`` received { if p.reset { ensure ``queue`` is
+ empty ensure ``buffer`` is empty } if p.estimated_batch_size != 0
+ { (optional) ensure ``buffer`` is sized to at least
+ ``p.estimated_batch_size`` } if ``p.proto_rows_batch`` is set {
+ append ``p.proto_rows_batch.bytes`` to ``buffer`` } if
+ p.batch_checksum is set and ``buffer`` is not empty { validate the
+ checksum matches the contents of ``buffer`` (see comments on
+ ``batch_checksum``) parse ``buffer`` as ``ProtoRows`` message,
+ clearing ``buffer`` add parsed rows to end of ``queue`` } if
+ p.resume_token is set { release results in ``queue`` save
+ ``p.resume_token`` in ``resume_token`` } }
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
diff --git a/google/cloud/bigtable_v2/types/feature_flags.py b/google/cloud/bigtable_v2/types/feature_flags.py
index 69cfe1cf4..2c8ea8732 100644
--- a/google/cloud/bigtable_v2/types/feature_flags.py
+++ b/google/cloud/bigtable_v2/types/feature_flags.py
@@ -76,6 +76,9 @@ class FeatureFlags(proto.Message):
direct_access_requested (bool):
Notify the server that the client explicitly
opted in for Direct Access.
+ peer_info (bool):
+ If the client can support using
+ BigtablePeerInfo.
"""
reverse_scans: bool = proto.Field(
@@ -114,6 +117,10 @@ class FeatureFlags(proto.Message):
proto.BOOL,
number=10,
)
+ peer_info: bool = proto.Field(
+ proto.BOOL,
+ number=11,
+ )
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/bigtable_v2/types/peer_info.py b/google/cloud/bigtable_v2/types/peer_info.py
new file mode 100644
index 000000000..b3f1203cc
--- /dev/null
+++ b/google/cloud/bigtable_v2/types/peer_info.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from __future__ import annotations
+
+from typing import MutableMapping, MutableSequence
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.bigtable.v2",
+ manifest={
+ "PeerInfo",
+ },
+)
+
+
+class PeerInfo(proto.Message):
+ r"""PeerInfo contains information about the peer that the client
+ is connecting to.
+
+ Attributes:
+ google_frontend_id (int):
+ An opaque identifier for the Google Frontend
+ which serviced this request. Only set when not
+ using DirectAccess.
+ application_frontend_id (int):
+ An opaque identifier for the application
+ frontend which serviced this request.
+ application_frontend_zone (str):
+ The Cloud zone of the application frontend
+ that served this request.
+ application_frontend_subzone (str):
+ The subzone of the application frontend that
+ served this request, e.g. an identifier for
+ where within the zone the application frontend
+ is.
+ transport_type (google.cloud.bigtable_v2.types.PeerInfo.TransportType):
+
+ """
+
+ class TransportType(proto.Enum):
+ r"""The transport type that the client used to connect to this
+ peer.
+
+ Values:
+ TRANSPORT_TYPE_UNKNOWN (0):
+ The transport type is unknown.
+ TRANSPORT_TYPE_EXTERNAL (1):
+ The client connected to this peer via an
+ external network (e.g. outside Google Coud).
+ TRANSPORT_TYPE_CLOUD_PATH (2):
+ The client connected to this peer via
+ CloudPath.
+ TRANSPORT_TYPE_DIRECT_ACCESS (3):
+ The client connected to this peer via
+ DirectAccess.
+ TRANSPORT_TYPE_SESSION_UNKNOWN (4):
+ The client connected to this peer via
+ Bigtable Sessions using an unknown transport
+ type.
+ TRANSPORT_TYPE_SESSION_EXTERNAL (5):
+ The client connected to this peer via
+ Bigtable Sessions on an external network (e.g.
+ outside Google Cloud).
+ TRANSPORT_TYPE_SESSION_CLOUD_PATH (6):
+ The client connected to this peer via
+ Bigtable Sessions using CloudPath.
+ TRANSPORT_TYPE_SESSION_DIRECT_ACCESS (7):
+ The client connected to this peer via
+ Bigtable Sessions using DirectAccess.
+ """
+ TRANSPORT_TYPE_UNKNOWN = 0
+ TRANSPORT_TYPE_EXTERNAL = 1
+ TRANSPORT_TYPE_CLOUD_PATH = 2
+ TRANSPORT_TYPE_DIRECT_ACCESS = 3
+ TRANSPORT_TYPE_SESSION_UNKNOWN = 4
+ TRANSPORT_TYPE_SESSION_EXTERNAL = 5
+ TRANSPORT_TYPE_SESSION_CLOUD_PATH = 6
+ TRANSPORT_TYPE_SESSION_DIRECT_ACCESS = 7
+
+ google_frontend_id: int = proto.Field(
+ proto.INT64,
+ number=1,
+ )
+ application_frontend_id: int = proto.Field(
+ proto.INT64,
+ number=2,
+ )
+ application_frontend_zone: str = proto.Field(
+ proto.STRING,
+ number=3,
+ )
+ application_frontend_subzone: str = proto.Field(
+ proto.STRING,
+ number=4,
+ )
+ transport_type: TransportType = proto.Field(
+ proto.ENUM,
+ number=5,
+ enum=TransportType,
+ )
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/bigtable_v2/types/response_params.py b/google/cloud/bigtable_v2/types/response_params.py
index fb373d055..cc6384ab3 100644
--- a/google/cloud/bigtable_v2/types/response_params.py
+++ b/google/cloud/bigtable_v2/types/response_params.py
@@ -44,6 +44,11 @@ class ResponseParams(proto.Message):
of bigtable resources.
This field is a member of `oneof`_ ``_cluster_id``.
+ afe_id (int):
+ The AFE ID for the AFE that is served this
+ request.
+
+ This field is a member of `oneof`_ ``_afe_id``.
"""
zone_id: str = proto.Field(
@@ -56,6 +61,11 @@ class ResponseParams(proto.Message):
number=2,
optional=True,
)
+ afe_id: int = proto.Field(
+ proto.INT64,
+ number=3,
+ optional=True,
+ )
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/bigtable_v2/types/types.py b/google/cloud/bigtable_v2/types/types.py
index 5eae9e526..0b4ddb57a 100644
--- a/google/cloud/bigtable_v2/types/types.py
+++ b/google/cloud/bigtable_v2/types/types.py
@@ -35,34 +35,27 @@ class Type(proto.Message):
features.
For compatibility with Bigtable's existing untyped APIs, each
- ``Type`` includes an ``Encoding`` which describes how to convert
- to/from the underlying data.
-
- Each encoding also defines the following properties:
-
- - Order-preserving: Does the encoded value sort consistently with
- the original typed value? Note that Bigtable will always sort
- data based on the raw encoded value, *not* the decoded type.
-
- - Example: BYTES values sort in the same order as their raw
- encodings.
- - Counterexample: Encoding INT64 as a fixed-width decimal string
- does *not* preserve sort order when dealing with negative
- numbers. ``INT64(1) > INT64(-1)``, but
- ``STRING("-00001") > STRING("00001)``.
-
- - Self-delimiting: If we concatenate two encoded values, can we
- always tell where the first one ends and the second one begins?
-
- - Example: If we encode INT64s to fixed-width STRINGs, the first
- value will always contain exactly N digits, possibly preceded
- by a sign.
- - Counterexample: If we concatenate two UTF-8 encoded STRINGs,
- we have no way to tell where the first one ends.
-
- - Compatibility: Which other systems have matching encoding
- schemes? For example, does this encoding have a GoogleSQL
- equivalent? HBase? Java?
+ ``Type`` includes an ``Encoding`` which describes how to convert to
+ or from the underlying data.
+
+ Each encoding can operate in one of two modes:
+
+ - Sorted: In this mode, Bigtable guarantees that
+ ``Encode(X) <= Encode(Y)`` if and only if ``X <= Y``. This is
+ useful anywhere sort order is important, for example when encoding
+ keys.
+ - Distinct: In this mode, Bigtable guarantees that if ``X != Y``
+ then ``Encode(X) != Encode(Y)``. However, the converse is not
+ guaranteed. For example, both ``{'foo': '1', 'bar': '2'}`` and
+ ``{'bar': '2', 'foo': '1'}`` are valid encodings of the same JSON
+ value.
+
+ The API clearly documents which mode is used wherever an encoding
+ can be configured. Each encoding also documents which values are
+ supported in which modes. For example, when encoding INT64 as a
+ numeric STRING, negative numbers cannot be encoded in sorted mode.
+ This is because ``INT64(1) > INT64(-1)``, but
+ ``STRING("-00001") > STRING("00001")``.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
@@ -135,12 +128,12 @@ class Bytes(proto.Message):
Attributes:
encoding (google.cloud.bigtable_v2.types.Type.Bytes.Encoding):
- The encoding to use when converting to/from
- lower level types.
+ The encoding to use when converting to or
+ from lower level types.
"""
class Encoding(proto.Message):
- r"""Rules used to convert to/from lower level types.
+ r"""Rules used to convert to or from lower level types.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
@@ -152,14 +145,26 @@ class Encoding(proto.Message):
"""
class Raw(proto.Message):
- r"""Leaves the value "as-is"
+ r"""Leaves the value as-is.
+
+ Sorted mode: all values are supported.
+
+ Distinct mode: all values are supported.
- - Order-preserving? Yes
- - Self-delimiting? No
- - Compatibility? N/A
+ Attributes:
+ escape_nulls (bool):
+ If set, allows NULL values to be encoded as the empty string
+ "".
+ The actual empty string, or any value which only contains
+ the null byte ``0x00``, has one more null byte appended.
"""
+ escape_nulls: bool = proto.Field(
+ proto.BOOL,
+ number=1,
+ )
+
raw: "Type.Bytes.Encoding.Raw" = proto.Field(
proto.MESSAGE,
number=1,
@@ -179,12 +184,12 @@ class String(proto.Message):
Attributes:
encoding (google.cloud.bigtable_v2.types.Type.String.Encoding):
- The encoding to use when converting to/from
- lower level types.
+ The encoding to use when converting to or
+ from lower level types.
"""
class Encoding(proto.Message):
- r"""Rules used to convert to/from lower level types.
+ r"""Rules used to convert to or from lower level types.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
@@ -208,18 +213,45 @@ class Utf8Raw(proto.Message):
r"""Deprecated: prefer the equivalent ``Utf8Bytes``."""
class Utf8Bytes(proto.Message):
- r"""UTF-8 encoding
+ r"""UTF-8 encoding.
- - Order-preserving? Yes (code point order)
- - Self-delimiting? No
- - Compatibility?
+ Sorted mode:
- - BigQuery Federation ``TEXT`` encoding
- - HBase ``Bytes.toBytes``
- - Java ``String#getBytes(StandardCharsets.UTF_8)``
+ - All values are supported.
+ - Code point order is preserved.
+ Distinct mode: all values are supported.
+
+ Compatible with:
+
+ - BigQuery ``TEXT`` encoding
+ - HBase ``Bytes.toBytes``
+ - Java ``String#getBytes(StandardCharsets.UTF_8)``
+
+ Attributes:
+ null_escape_char (str):
+ Single-character escape sequence used to support NULL
+ values.
+
+ If set, allows NULL values to be encoded as the empty string
+ "".
+
+ The actual empty string, or any value where every character
+ equals ``null_escape_char``, has one more
+ ``null_escape_char`` appended.
+
+ If ``null_escape_char`` is set and does not equal the ASCII
+ null character ``0x00``, then the encoding will not support
+ sorted mode.
+
+ .
"""
+ null_escape_char: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+
utf8_raw: "Type.String.Encoding.Utf8Raw" = proto.Field(
proto.MESSAGE,
number=1,
@@ -244,12 +276,17 @@ class Int64(proto.Message):
Attributes:
encoding (google.cloud.bigtable_v2.types.Type.Int64.Encoding):
- The encoding to use when converting to/from
- lower level types.
+ The encoding to use when converting to or
+ from lower level types.
"""
class Encoding(proto.Message):
- r"""Rules used to convert to/from lower level types.
+ r"""Rules used to convert to or from lower level types.
+
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
@@ -257,20 +294,25 @@ class Encoding(proto.Message):
big_endian_bytes (google.cloud.bigtable_v2.types.Type.Int64.Encoding.BigEndianBytes):
Use ``BigEndianBytes`` encoding.
+ This field is a member of `oneof`_ ``encoding``.
+ ordered_code_bytes (google.cloud.bigtable_v2.types.Type.Int64.Encoding.OrderedCodeBytes):
+ Use ``OrderedCodeBytes`` encoding.
+
This field is a member of `oneof`_ ``encoding``.
"""
class BigEndianBytes(proto.Message):
- r"""Encodes the value as an 8-byte big endian twos complement ``Bytes``
- value.
+ r"""Encodes the value as an 8-byte big-endian two's complement value.
+
+ Sorted mode: non-negative values are supported.
- - Order-preserving? No (positive values only)
- - Self-delimiting? Yes
- - Compatibility?
+ Distinct mode: all values are supported.
- - BigQuery Federation ``BINARY`` encoding
- - HBase ``Bytes.toBytes``
- - Java ``ByteBuffer.putLong()`` with ``ByteOrder.BIG_ENDIAN``
+ Compatible with:
+
+ - BigQuery ``BINARY`` encoding
+ - HBase ``Bytes.toBytes``
+ - Java ``ByteBuffer.putLong()`` with ``ByteOrder.BIG_ENDIAN``
Attributes:
bytes_type (google.cloud.bigtable_v2.types.Type.Bytes):
@@ -283,12 +325,28 @@ class BigEndianBytes(proto.Message):
message="Type.Bytes",
)
+ class OrderedCodeBytes(proto.Message):
+ r"""Encodes the value in a variable length binary format of up to
+ 10 bytes. Values that are closer to zero use fewer bytes.
+
+ Sorted mode: all values are supported.
+
+ Distinct mode: all values are supported.
+
+ """
+
big_endian_bytes: "Type.Int64.Encoding.BigEndianBytes" = proto.Field(
proto.MESSAGE,
number=1,
oneof="encoding",
message="Type.Int64.Encoding.BigEndianBytes",
)
+ ordered_code_bytes: "Type.Int64.Encoding.OrderedCodeBytes" = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ oneof="encoding",
+ message="Type.Int64.Encoding.OrderedCodeBytes",
+ )
encoding: "Type.Int64.Encoding" = proto.Field(
proto.MESSAGE,
@@ -315,8 +373,43 @@ class Timestamp(proto.Message):
r"""Timestamp Values of type ``Timestamp`` are stored in
``Value.timestamp_value``.
+ Attributes:
+ encoding (google.cloud.bigtable_v2.types.Type.Timestamp.Encoding):
+ The encoding to use when converting to or
+ from lower level types.
"""
+ class Encoding(proto.Message):
+ r"""Rules used to convert to or from lower level types.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
+ Attributes:
+ unix_micros_int64 (google.cloud.bigtable_v2.types.Type.Int64.Encoding):
+ Encodes the number of microseconds since the Unix epoch
+ using the given ``Int64`` encoding. Values must be
+ microsecond-aligned.
+
+ Compatible with:
+
+ - Java ``Instant.truncatedTo()`` with ``ChronoUnit.MICROS``
+
+ This field is a member of `oneof`_ ``encoding``.
+ """
+
+ unix_micros_int64: "Type.Int64.Encoding" = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ oneof="encoding",
+ message="Type.Int64.Encoding",
+ )
+
+ encoding: "Type.Timestamp.Encoding" = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message="Type.Timestamp.Encoding",
+ )
+
class Date(proto.Message):
r"""Date Values of type ``Date`` are stored in ``Value.date_value``."""
@@ -330,6 +423,9 @@ class Struct(proto.Message):
fields (MutableSequence[google.cloud.bigtable_v2.types.Type.Struct.Field]):
The names and types of the fields in this
struct.
+ encoding (google.cloud.bigtable_v2.types.Type.Struct.Encoding):
+ The encoding to use when converting to or
+ from lower level types.
"""
class Field(proto.Message):
@@ -353,11 +449,146 @@ class Field(proto.Message):
message="Type",
)
+ class Encoding(proto.Message):
+ r"""Rules used to convert to or from lower level types.
+
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
+ Attributes:
+ singleton (google.cloud.bigtable_v2.types.Type.Struct.Encoding.Singleton):
+ Use ``Singleton`` encoding.
+
+ This field is a member of `oneof`_ ``encoding``.
+ delimited_bytes (google.cloud.bigtable_v2.types.Type.Struct.Encoding.DelimitedBytes):
+ Use ``DelimitedBytes`` encoding.
+
+ This field is a member of `oneof`_ ``encoding``.
+ ordered_code_bytes (google.cloud.bigtable_v2.types.Type.Struct.Encoding.OrderedCodeBytes):
+ User ``OrderedCodeBytes`` encoding.
+
+ This field is a member of `oneof`_ ``encoding``.
+ """
+
+ class Singleton(proto.Message):
+ r"""Uses the encoding of ``fields[0].type`` as-is. Only valid if
+ ``fields.size == 1``.
+
+ """
+
+ class DelimitedBytes(proto.Message):
+ r"""Fields are encoded independently and concatenated with a
+ configurable ``delimiter`` in between.
+
+ A struct with no fields defined is encoded as a single
+ ``delimiter``.
+
+ Sorted mode:
+
+ - Fields are encoded in sorted mode.
+ - Encoded field values must not contain any bytes <=
+ ``delimiter[0]``
+ - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or
+ if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort
+ first.
+
+ Distinct mode:
+
+ - Fields are encoded in distinct mode.
+ - Encoded field values must not contain ``delimiter[0]``.
+
+ Attributes:
+ delimiter (bytes):
+ Byte sequence used to delimit concatenated
+ fields. The delimiter must contain at least 1
+ character and at most 50 characters.
+ """
+
+ delimiter: bytes = proto.Field(
+ proto.BYTES,
+ number=1,
+ )
+
+ class OrderedCodeBytes(proto.Message):
+ r"""Fields are encoded independently and concatenated with the fixed
+ byte pair ``{0x00, 0x01}`` in between.
+
+ Any null ``(0x00)`` byte in an encoded field is replaced by the
+ fixed byte pair ``{0x00, 0xFF}``.
+
+ Fields that encode to the empty string "" have special handling:
+
+ - If *every* field encodes to "", or if the STRUCT has no fields
+ defined, then the STRUCT is encoded as the fixed byte pair
+ ``{0x00, 0x00}``.
+ - Otherwise, the STRUCT only encodes until the last non-empty field,
+ omitting any trailing empty fields. Any empty fields that aren't
+ omitted are replaced with the fixed byte pair ``{0x00, 0x00}``.
+
+ Examples:
+
+ ::
+
+ - STRUCT() -> "\00\00"
+ - STRUCT("") -> "\00\00"
+ - STRUCT("", "") -> "\00\00"
+ - STRUCT("", "B") -> "\00\00" + "\00\01" + "B"
+ - STRUCT("A", "") -> "A"
+ - STRUCT("", "B", "") -> "\00\00" + "\00\01" + "B"
+ - STRUCT("A", "", "C") -> "A" + "\00\01" + "\00\00" + "\00\01" + "C"
+
+ Since null bytes are always escaped, this encoding can cause size
+ blowup for encodings like ``Int64.BigEndianBytes`` that are likely
+ to produce many such bytes.
+
+ Sorted mode:
+
+ - Fields are encoded in sorted mode.
+ - All values supported by the field encodings are allowed
+ - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or
+ if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort
+ first.
+
+ Distinct mode:
+
+ - Fields are encoded in distinct mode.
+ - All values supported by the field encodings are allowed.
+
+ """
+
+ singleton: "Type.Struct.Encoding.Singleton" = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ oneof="encoding",
+ message="Type.Struct.Encoding.Singleton",
+ )
+ delimited_bytes: "Type.Struct.Encoding.DelimitedBytes" = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ oneof="encoding",
+ message="Type.Struct.Encoding.DelimitedBytes",
+ )
+ ordered_code_bytes: "Type.Struct.Encoding.OrderedCodeBytes" = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ oneof="encoding",
+ message="Type.Struct.Encoding.OrderedCodeBytes",
+ )
+
fields: MutableSequence["Type.Struct.Field"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="Type.Struct.Field",
)
+ encoding: "Type.Struct.Encoding" = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message="Type.Struct.Encoding",
+ )
class Proto(proto.Message):
r"""A protobuf message type. Values of type ``Proto`` are stored in
@@ -453,8 +684,8 @@ class Aggregate(proto.Message):
r"""A value that combines incremental updates into a summarized value.
Data is never directly written or read using type ``Aggregate``.
- Writes will provide either the ``input_type`` or ``state_type``, and
- reads will always return the ``state_type`` .
+ Writes provide either the ``input_type`` or ``state_type``, and
+ reads always return the ``state_type`` .
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
@@ -466,13 +697,12 @@ class Aggregate(proto.Message):
Attributes:
input_type (google.cloud.bigtable_v2.types.Type):
Type of the inputs that are accumulated by this
- ``Aggregate``, which must specify a full encoding. Use
- ``AddInput`` mutations to accumulate new inputs.
+ ``Aggregate``. Use ``AddInput`` mutations to accumulate new
+ inputs.
state_type (google.cloud.bigtable_v2.types.Type):
Output only. Type that holds the internal accumulator state
for the ``Aggregate``. This is a function of the
- ``input_type`` and ``aggregator`` chosen, and will always
- specify a full encoding.
+ ``input_type`` and ``aggregator`` chosen.
sum (google.cloud.bigtable_v2.types.Type.Aggregate.Sum):
Sum aggregator.
diff --git a/noxfile.py b/noxfile.py
index a182bafba..29de5901b 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -14,6 +14,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+# DO NOT EDIT THIS FILE OUTSIDE OF `.librarian/generator-input`
+# The source of truth for this file is `.librarian/generator-input`
+
+
# Generated by synthtool. DO NOT EDIT!
from __future__ import absolute_import
@@ -30,7 +34,7 @@
FLAKE8_VERSION = "flake8==6.1.0"
BLACK_VERSION = "black[jupyter]==23.3.0"
ISORT_VERSION = "isort==5.11.0"
-LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"]
+LINT_PATHS = ["google", "tests", "noxfile.py", "setup.py"]
DEFAULT_PYTHON_VERSION = "3.13"
diff --git a/release-please-config.json b/release-please-config.json
deleted file mode 100644
index 33d5a7e21..000000000
--- a/release-please-config.json
+++ /dev/null
@@ -1,22 +0,0 @@
-{
- "$schema":
-"https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json",
- "packages": {
- ".": {
- "release-type": "python",
- "extra-files": [
- "google/cloud/bigtable/gapic_version.py",
- "google/cloud/bigtable_admin/gapic_version.py",
- "google/cloud/bigtable_v2/gapic_version.py",
- "google/cloud/bigtable_admin_v2/gapic_version.py"
- ]
- }
- },
- "release-type": "python",
- "plugins": [
- {
- "type": "sentence-case"
- }
- ],
- "initial-version": "2.13.2"
-}
diff --git a/releases.md b/releases.md
deleted file mode 120000
index 4c43d4932..000000000
--- a/releases.md
+++ /dev/null
@@ -1 +0,0 @@
-../../bigtable/CHANGELOG.md
\ No newline at end of file
diff --git a/samples/beam/requirements.txt b/samples/beam/requirements.txt
index 55b3ae719..bb207ddf4 100644
--- a/samples/beam/requirements.txt
+++ b/samples/beam/requirements.txt
@@ -1,3 +1,4 @@
-apache-beam==2.65.0
-google-cloud-bigtable==2.30.1
-google-cloud-core==2.4.3
+apache-beam===2.60.0; python_version == '3.8'
+apache-beam==2.69.0; python_version >= '3.9'
+google-cloud-bigtable==2.34.0
+google-cloud-core==2.5.0
diff --git a/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json b/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json
index 3d73099e8..42db3b70b 100644
--- a/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json
+++ b/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json
@@ -8,7 +8,7 @@
],
"language": "PYTHON",
"name": "google-cloud-bigtable-admin",
- "version": "0.1.0"
+ "version": "2.35.0"
},
"snippets": [
{
diff --git a/samples/hello/requirements.txt b/samples/hello/requirements.txt
index 55d3a1ddd..ab4d1fc82 100644
--- a/samples/hello/requirements.txt
+++ b/samples/hello/requirements.txt
@@ -1,2 +1,2 @@
-google-cloud-bigtable==2.30.1
-google-cloud-core==2.4.3
+google-cloud-bigtable==2.34.0
+google-cloud-core==2.5.0
diff --git a/samples/instanceadmin/requirements.txt b/samples/instanceadmin/requirements.txt
index a2922fe6e..30d3bc28f 100644
--- a/samples/instanceadmin/requirements.txt
+++ b/samples/instanceadmin/requirements.txt
@@ -1,2 +1,2 @@
-google-cloud-bigtable==2.30.1
+google-cloud-bigtable==2.34.0
backoff==2.2.1
diff --git a/samples/metricscaler/requirements.txt b/samples/metricscaler/requirements.txt
index 522c28ae2..09af5060d 100644
--- a/samples/metricscaler/requirements.txt
+++ b/samples/metricscaler/requirements.txt
@@ -1,2 +1,2 @@
-google-cloud-bigtable==2.30.1
-google-cloud-monitoring==2.27.1
+google-cloud-bigtable==2.34.0
+google-cloud-monitoring==2.28.0
diff --git a/samples/quickstart/requirements.txt b/samples/quickstart/requirements.txt
index 807132c7e..aea551f27 100644
--- a/samples/quickstart/requirements.txt
+++ b/samples/quickstart/requirements.txt
@@ -1 +1 @@
-google-cloud-bigtable==2.30.1
+google-cloud-bigtable==2.34.0
diff --git a/samples/snippets/data_client/requirements.txt b/samples/snippets/data_client/requirements.txt
index 807132c7e..aea551f27 100644
--- a/samples/snippets/data_client/requirements.txt
+++ b/samples/snippets/data_client/requirements.txt
@@ -1 +1 @@
-google-cloud-bigtable==2.30.1
+google-cloud-bigtable==2.34.0
diff --git a/samples/snippets/deletes/requirements.txt b/samples/snippets/deletes/requirements.txt
index 807132c7e..aea551f27 100644
--- a/samples/snippets/deletes/requirements.txt
+++ b/samples/snippets/deletes/requirements.txt
@@ -1 +1 @@
-google-cloud-bigtable==2.30.1
+google-cloud-bigtable==2.34.0
diff --git a/samples/snippets/filters/requirements.txt b/samples/snippets/filters/requirements.txt
index 807132c7e..aea551f27 100644
--- a/samples/snippets/filters/requirements.txt
+++ b/samples/snippets/filters/requirements.txt
@@ -1 +1 @@
-google-cloud-bigtable==2.30.1
+google-cloud-bigtable==2.34.0
diff --git a/samples/snippets/reads/requirements.txt b/samples/snippets/reads/requirements.txt
index 807132c7e..aea551f27 100644
--- a/samples/snippets/reads/requirements.txt
+++ b/samples/snippets/reads/requirements.txt
@@ -1 +1 @@
-google-cloud-bigtable==2.30.1
+google-cloud-bigtable==2.34.0
diff --git a/samples/snippets/writes/requirements.txt b/samples/snippets/writes/requirements.txt
index 874788bf7..1ac867641 100644
--- a/samples/snippets/writes/requirements.txt
+++ b/samples/snippets/writes/requirements.txt
@@ -1 +1 @@
-google-cloud-bigtable==2.30.1
\ No newline at end of file
+google-cloud-bigtable==2.34.0
\ No newline at end of file
diff --git a/samples/tableadmin/requirements-test.txt b/samples/tableadmin/requirements-test.txt
index d8889022d..f01fd134c 100644
--- a/samples/tableadmin/requirements-test.txt
+++ b/samples/tableadmin/requirements-test.txt
@@ -1,2 +1,2 @@
pytest
-google-cloud-testutils==1.6.4
+google-cloud-testutils==1.7.0
diff --git a/samples/tableadmin/requirements.txt b/samples/tableadmin/requirements.txt
index 807132c7e..aea551f27 100644
--- a/samples/tableadmin/requirements.txt
+++ b/samples/tableadmin/requirements.txt
@@ -1 +1 @@
-google-cloud-bigtable==2.30.1
+google-cloud-bigtable==2.34.0
diff --git a/scripts/fixup_bigtable_admin_v2_keywords.py b/scripts/fixup_bigtable_admin_v2_keywords.py
deleted file mode 100644
index 1fda05668..000000000
--- a/scripts/fixup_bigtable_admin_v2_keywords.py
+++ /dev/null
@@ -1,238 +0,0 @@
-#! /usr/bin/env python3
-# -*- coding: utf-8 -*-
-# Copyright 2025 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import argparse
-import os
-import libcst as cst
-import pathlib
-import sys
-from typing import (Any, Callable, Dict, List, Sequence, Tuple)
-
-
-def partition(
- predicate: Callable[[Any], bool],
- iterator: Sequence[Any]
-) -> Tuple[List[Any], List[Any]]:
- """A stable, out-of-place partition."""
- results = ([], [])
-
- for i in iterator:
- results[int(predicate(i))].append(i)
-
- # Returns trueList, falseList
- return results[1], results[0]
-
-
-class bigtable_adminCallTransformer(cst.CSTTransformer):
- CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
- METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
- 'check_consistency': ('name', 'consistency_token', 'standard_read_remote_writes', 'data_boost_read_local_writes', ),
- 'copy_backup': ('parent', 'backup_id', 'source_backup', 'expire_time', ),
- 'create_app_profile': ('parent', 'app_profile_id', 'app_profile', 'ignore_warnings', ),
- 'create_authorized_view': ('parent', 'authorized_view_id', 'authorized_view', ),
- 'create_backup': ('parent', 'backup_id', 'backup', ),
- 'create_cluster': ('parent', 'cluster_id', 'cluster', ),
- 'create_instance': ('parent', 'instance_id', 'instance', 'clusters', ),
- 'create_logical_view': ('parent', 'logical_view_id', 'logical_view', ),
- 'create_materialized_view': ('parent', 'materialized_view_id', 'materialized_view', ),
- 'create_schema_bundle': ('parent', 'schema_bundle_id', 'schema_bundle', ),
- 'create_table': ('parent', 'table_id', 'table', 'initial_splits', ),
- 'create_table_from_snapshot': ('parent', 'table_id', 'source_snapshot', ),
- 'delete_app_profile': ('name', 'ignore_warnings', ),
- 'delete_authorized_view': ('name', 'etag', ),
- 'delete_backup': ('name', ),
- 'delete_cluster': ('name', ),
- 'delete_instance': ('name', ),
- 'delete_logical_view': ('name', 'etag', ),
- 'delete_materialized_view': ('name', 'etag', ),
- 'delete_schema_bundle': ('name', 'etag', ),
- 'delete_snapshot': ('name', ),
- 'delete_table': ('name', ),
- 'drop_row_range': ('name', 'row_key_prefix', 'delete_all_data_from_table', ),
- 'generate_consistency_token': ('name', ),
- 'get_app_profile': ('name', ),
- 'get_authorized_view': ('name', 'view', ),
- 'get_backup': ('name', ),
- 'get_cluster': ('name', ),
- 'get_iam_policy': ('resource', 'options', ),
- 'get_instance': ('name', ),
- 'get_logical_view': ('name', ),
- 'get_materialized_view': ('name', ),
- 'get_schema_bundle': ('name', ),
- 'get_snapshot': ('name', ),
- 'get_table': ('name', 'view', ),
- 'list_app_profiles': ('parent', 'page_size', 'page_token', ),
- 'list_authorized_views': ('parent', 'page_size', 'page_token', 'view', ),
- 'list_backups': ('parent', 'filter', 'order_by', 'page_size', 'page_token', ),
- 'list_clusters': ('parent', 'page_token', ),
- 'list_hot_tablets': ('parent', 'start_time', 'end_time', 'page_size', 'page_token', ),
- 'list_instances': ('parent', 'page_token', ),
- 'list_logical_views': ('parent', 'page_size', 'page_token', ),
- 'list_materialized_views': ('parent', 'page_size', 'page_token', ),
- 'list_schema_bundles': ('parent', 'page_size', 'page_token', ),
- 'list_snapshots': ('parent', 'page_size', 'page_token', ),
- 'list_tables': ('parent', 'view', 'page_size', 'page_token', ),
- 'modify_column_families': ('name', 'modifications', 'ignore_warnings', ),
- 'partial_update_cluster': ('cluster', 'update_mask', ),
- 'partial_update_instance': ('instance', 'update_mask', ),
- 'restore_table': ('parent', 'table_id', 'backup', ),
- 'set_iam_policy': ('resource', 'policy', 'update_mask', ),
- 'snapshot_table': ('name', 'cluster', 'snapshot_id', 'ttl', 'description', ),
- 'test_iam_permissions': ('resource', 'permissions', ),
- 'undelete_table': ('name', ),
- 'update_app_profile': ('app_profile', 'update_mask', 'ignore_warnings', ),
- 'update_authorized_view': ('authorized_view', 'update_mask', 'ignore_warnings', ),
- 'update_backup': ('backup', 'update_mask', ),
- 'update_cluster': ('name', 'location', 'state', 'serve_nodes', 'node_scaling_factor', 'cluster_config', 'default_storage_type', 'encryption_config', ),
- 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', 'create_time', 'satisfies_pzs', 'satisfies_pzi', 'tags', ),
- 'update_logical_view': ('logical_view', 'update_mask', ),
- 'update_materialized_view': ('materialized_view', 'update_mask', ),
- 'update_schema_bundle': ('schema_bundle', 'update_mask', 'ignore_warnings', ),
- 'update_table': ('table', 'update_mask', 'ignore_warnings', ),
- }
-
- def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
- try:
- key = original.func.attr.value
- kword_params = self.METHOD_TO_PARAMS[key]
- except (AttributeError, KeyError):
- # Either not a method from the API or too convoluted to be sure.
- return updated
-
- # If the existing code is valid, keyword args come after positional args.
- # Therefore, all positional args must map to the first parameters.
- args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
- if any(k.keyword.value == "request" for k in kwargs):
- # We've already fixed this file, don't fix it again.
- return updated
-
- kwargs, ctrl_kwargs = partition(
- lambda a: a.keyword.value not in self.CTRL_PARAMS,
- kwargs
- )
-
- args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
- ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
- for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
-
- request_arg = cst.Arg(
- value=cst.Dict([
- cst.DictElement(
- cst.SimpleString("'{}'".format(name)),
-cst.Element(value=arg.value)
- )
- # Note: the args + kwargs looks silly, but keep in mind that
- # the control parameters had to be stripped out, and that
- # those could have been passed positionally or by keyword.
- for name, arg in zip(kword_params, args + kwargs)]),
- keyword=cst.Name("request")
- )
-
- return updated.with_changes(
- args=[request_arg] + ctrl_kwargs
- )
-
-
-def fix_files(
- in_dir: pathlib.Path,
- out_dir: pathlib.Path,
- *,
- transformer=bigtable_adminCallTransformer(),
-):
- """Duplicate the input dir to the output dir, fixing file method calls.
-
- Preconditions:
- * in_dir is a real directory
- * out_dir is a real, empty directory
- """
- pyfile_gen = (
- pathlib.Path(os.path.join(root, f))
- for root, _, files in os.walk(in_dir)
- for f in files if os.path.splitext(f)[1] == ".py"
- )
-
- for fpath in pyfile_gen:
- with open(fpath, 'r') as f:
- src = f.read()
-
- # Parse the code and insert method call fixes.
- tree = cst.parse_module(src)
- updated = tree.visit(transformer)
-
- # Create the path and directory structure for the new file.
- updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
- updated_path.parent.mkdir(parents=True, exist_ok=True)
-
- # Generate the updated source file at the corresponding path.
- with open(updated_path, 'w') as f:
- f.write(updated.code)
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser(
- description="""Fix up source that uses the bigtable_admin client library.
-
-The existing sources are NOT overwritten but are copied to output_dir with changes made.
-
-Note: This tool operates at a best-effort level at converting positional
- parameters in client method calls to keyword based parameters.
- Cases where it WILL FAIL include
- A) * or ** expansion in a method call.
- B) Calls via function or method alias (includes free function calls)
- C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
-
- These all constitute false negatives. The tool will also detect false
- positives when an API method shares a name with another method.
-""")
- parser.add_argument(
- '-d',
- '--input-directory',
- required=True,
- dest='input_dir',
- help='the input directory to walk for python files to fix up',
- )
- parser.add_argument(
- '-o',
- '--output-directory',
- required=True,
- dest='output_dir',
- help='the directory to output files fixed via un-flattening',
- )
- args = parser.parse_args()
- input_dir = pathlib.Path(args.input_dir)
- output_dir = pathlib.Path(args.output_dir)
- if not input_dir.is_dir():
- print(
- f"input directory '{input_dir}' does not exist or is not a directory",
- file=sys.stderr,
- )
- sys.exit(-1)
-
- if not output_dir.is_dir():
- print(
- f"output directory '{output_dir}' does not exist or is not a directory",
- file=sys.stderr,
- )
- sys.exit(-1)
-
- if os.listdir(output_dir):
- print(
- f"output directory '{output_dir}' is not empty",
- file=sys.stderr,
- )
- sys.exit(-1)
-
- fix_files(input_dir, output_dir)
diff --git a/scripts/fixup_bigtable_v2_keywords.py b/scripts/fixup_bigtable_v2_keywords.py
deleted file mode 100644
index e65ad39a4..000000000
--- a/scripts/fixup_bigtable_v2_keywords.py
+++ /dev/null
@@ -1,186 +0,0 @@
-#! /usr/bin/env python3
-# -*- coding: utf-8 -*-
-# Copyright 2025 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import argparse
-import os
-import libcst as cst
-import pathlib
-import sys
-from typing import (Any, Callable, Dict, List, Sequence, Tuple)
-
-
-def partition(
- predicate: Callable[[Any], bool],
- iterator: Sequence[Any]
-) -> Tuple[List[Any], List[Any]]:
- """A stable, out-of-place partition."""
- results = ([], [])
-
- for i in iterator:
- results[int(predicate(i))].append(i)
-
- # Returns trueList, falseList
- return results[1], results[0]
-
-
-class bigtableCallTransformer(cst.CSTTransformer):
- CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
- METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
- 'check_and_mutate_row': ('row_key', 'table_name', 'authorized_view_name', 'app_profile_id', 'predicate_filter', 'true_mutations', 'false_mutations', ),
- 'execute_query': ('instance_name', 'query', 'params', 'app_profile_id', 'prepared_query', 'proto_format', 'resume_token', ),
- 'generate_initial_change_stream_partitions': ('table_name', 'app_profile_id', ),
- 'mutate_row': ('row_key', 'mutations', 'table_name', 'authorized_view_name', 'app_profile_id', 'idempotency', ),
- 'mutate_rows': ('entries', 'table_name', 'authorized_view_name', 'app_profile_id', ),
- 'ping_and_warm': ('name', 'app_profile_id', ),
- 'prepare_query': ('instance_name', 'query', 'param_types', 'app_profile_id', 'proto_format', ),
- 'read_change_stream': ('table_name', 'app_profile_id', 'partition', 'start_time', 'continuation_tokens', 'end_time', 'heartbeat_duration', ),
- 'read_modify_write_row': ('row_key', 'rules', 'table_name', 'authorized_view_name', 'app_profile_id', ),
- 'read_rows': ('table_name', 'authorized_view_name', 'materialized_view_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', 'request_stats_view', 'reversed', ),
- 'sample_row_keys': ('table_name', 'authorized_view_name', 'materialized_view_name', 'app_profile_id', ),
- }
-
- def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
- try:
- key = original.func.attr.value
- kword_params = self.METHOD_TO_PARAMS[key]
- except (AttributeError, KeyError):
- # Either not a method from the API or too convoluted to be sure.
- return updated
-
- # If the existing code is valid, keyword args come after positional args.
- # Therefore, all positional args must map to the first parameters.
- args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
- if any(k.keyword.value == "request" for k in kwargs):
- # We've already fixed this file, don't fix it again.
- return updated
-
- kwargs, ctrl_kwargs = partition(
- lambda a: a.keyword.value not in self.CTRL_PARAMS,
- kwargs
- )
-
- args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
- ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
- for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
-
- request_arg = cst.Arg(
- value=cst.Dict([
- cst.DictElement(
- cst.SimpleString("'{}'".format(name)),
-cst.Element(value=arg.value)
- )
- # Note: the args + kwargs looks silly, but keep in mind that
- # the control parameters had to be stripped out, and that
- # those could have been passed positionally or by keyword.
- for name, arg in zip(kword_params, args + kwargs)]),
- keyword=cst.Name("request")
- )
-
- return updated.with_changes(
- args=[request_arg] + ctrl_kwargs
- )
-
-
-def fix_files(
- in_dir: pathlib.Path,
- out_dir: pathlib.Path,
- *,
- transformer=bigtableCallTransformer(),
-):
- """Duplicate the input dir to the output dir, fixing file method calls.
-
- Preconditions:
- * in_dir is a real directory
- * out_dir is a real, empty directory
- """
- pyfile_gen = (
- pathlib.Path(os.path.join(root, f))
- for root, _, files in os.walk(in_dir)
- for f in files if os.path.splitext(f)[1] == ".py"
- )
-
- for fpath in pyfile_gen:
- with open(fpath, 'r') as f:
- src = f.read()
-
- # Parse the code and insert method call fixes.
- tree = cst.parse_module(src)
- updated = tree.visit(transformer)
-
- # Create the path and directory structure for the new file.
- updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
- updated_path.parent.mkdir(parents=True, exist_ok=True)
-
- # Generate the updated source file at the corresponding path.
- with open(updated_path, 'w') as f:
- f.write(updated.code)
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser(
- description="""Fix up source that uses the bigtable client library.
-
-The existing sources are NOT overwritten but are copied to output_dir with changes made.
-
-Note: This tool operates at a best-effort level at converting positional
- parameters in client method calls to keyword based parameters.
- Cases where it WILL FAIL include
- A) * or ** expansion in a method call.
- B) Calls via function or method alias (includes free function calls)
- C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
-
- These all constitute false negatives. The tool will also detect false
- positives when an API method shares a name with another method.
-""")
- parser.add_argument(
- '-d',
- '--input-directory',
- required=True,
- dest='input_dir',
- help='the input directory to walk for python files to fix up',
- )
- parser.add_argument(
- '-o',
- '--output-directory',
- required=True,
- dest='output_dir',
- help='the directory to output files fixed via un-flattening',
- )
- args = parser.parse_args()
- input_dir = pathlib.Path(args.input_dir)
- output_dir = pathlib.Path(args.output_dir)
- if not input_dir.is_dir():
- print(
- f"input directory '{input_dir}' does not exist or is not a directory",
- file=sys.stderr,
- )
- sys.exit(-1)
-
- if not output_dir.is_dir():
- print(
- f"output directory '{output_dir}' does not exist or is not a directory",
- file=sys.stderr,
- )
- sys.exit(-1)
-
- if os.listdir(output_dir):
- print(
- f"output directory '{output_dir}' is not empty",
- file=sys.stderr,
- )
- sys.exit(-1)
-
- fix_files(input_dir, output_dir)
diff --git a/setup.py b/setup.py
index cac533db6..c8f13c372 100644
--- a/setup.py
+++ b/setup.py
@@ -12,6 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+# DO NOT EDIT THIS FILE OUTSIDE OF `.librarian/generator-input`
+# The source of truth for this file is `.librarian/generator-input`
+
+
import io
import os
@@ -94,10 +98,6 @@
packages=packages,
install_requires=dependencies,
extras_require=extras,
- scripts=[
- "scripts/fixup_bigtable_v2_keywords.py",
- "scripts/fixup_admin_v2_keywords.py",
- ],
python_requires=">=3.7",
include_package_data=True,
zip_safe=False,
diff --git a/tests/system/data/test_system_async.py b/tests/system/data/test_system_async.py
index c96570b76..ac8a358a3 100644
--- a/tests/system/data/test_system_async.py
+++ b/tests/system/data/test_system_async.py
@@ -266,44 +266,49 @@ async def test_ping_and_warm(self, client, target):
@CrossSync.pytest
async def test_channel_refresh(self, table_id, instance_id, temp_rows):
"""
- change grpc channel to refresh after 1 second. Schedule a read_rows call after refresh,
- to ensure new channel works
+ perform requests while swapping out the grpc channel. Requests should continue without error
"""
- await temp_rows.add_row(b"row_key_1")
- await temp_rows.add_row(b"row_key_2")
- client = self._make_client()
- # start custom refresh task
- try:
+ import time
+
+ await temp_rows.add_row(b"test_row")
+ async with self._make_client() as client:
+ client._channel_refresh_task.cancel()
+ channel_wrapper = client.transport.grpc_channel
+ first_channel = channel_wrapper._channel
+ # swap channels frequently, with large grace windows
client._channel_refresh_task = CrossSync.create_task(
client._manage_channel,
- refresh_interval_min=1,
- refresh_interval_max=1,
+ refresh_interval_min=0.1,
+ refresh_interval_max=0.1,
+ grace_period=1,
sync_executor=client._executor,
)
- # let task run
- await CrossSync.yield_to_event_loop()
+
+ # hit channels with frequent requests
+ end_time = time.monotonic() + 3
async with client.get_table(instance_id, table_id) as table:
- rows = await table.read_rows({})
- channel_wrapper = client.transport.grpc_channel
- first_channel = client.transport.grpc_channel._channel
- assert len(rows) == 2
- await CrossSync.sleep(2)
- rows_after_refresh = await table.read_rows({})
- assert len(rows_after_refresh) == 2
- assert client.transport.grpc_channel is channel_wrapper
- assert client.transport.grpc_channel._channel is not first_channel
- # ensure gapic's logging interceptor is still active
- if CrossSync.is_async:
- interceptors = (
- client.transport.grpc_channel._channel._unary_unary_interceptors
- )
- assert GapicInterceptor in [type(i) for i in interceptors]
- else:
- assert isinstance(
- client.transport._logged_channel._interceptor, GapicInterceptor
- )
- finally:
- await client.close()
+ while time.monotonic() < end_time:
+ # we expect a CancelledError if a channel is closed before completion
+ rows = await table.read_rows({})
+ assert len(rows) == 1
+ await CrossSync.yield_to_event_loop()
+ # ensure channel was updated
+ updated_channel = channel_wrapper._channel
+ assert updated_channel is not first_channel
+ # ensure interceptors are kept (gapic's logging interceptor, and metric interceptor)
+ if CrossSync.is_async:
+ unary_interceptors = updated_channel._unary_unary_interceptors
+ assert len(unary_interceptors) == 2
+ assert GapicInterceptor in [type(i) for i in unary_interceptors]
+ assert client._metrics_interceptor in unary_interceptors
+ stream_interceptors = updated_channel._unary_stream_interceptors
+ assert len(stream_interceptors) == 1
+ assert client._metrics_interceptor in stream_interceptors
+ else:
+ assert isinstance(
+ client.transport._logged_channel._interceptor, GapicInterceptor
+ )
+ assert updated_channel._interceptor == client._metrics_interceptor
@CrossSync.pytest
@pytest.mark.usefixtures("target")
diff --git a/tests/system/data/test_system_autogen.py b/tests/system/data/test_system_autogen.py
index 44895808a..463235087 100644
--- a/tests/system/data/test_system_autogen.py
+++ b/tests/system/data/test_system_autogen.py
@@ -221,34 +221,33 @@ def test_ping_and_warm(self, client, target):
reason="emulator mode doesn't refresh channel",
)
def test_channel_refresh(self, table_id, instance_id, temp_rows):
- """change grpc channel to refresh after 1 second. Schedule a read_rows call after refresh,
- to ensure new channel works"""
- temp_rows.add_row(b"row_key_1")
- temp_rows.add_row(b"row_key_2")
- client = self._make_client()
- try:
+ """perform requests while swapping out the grpc channel. Requests should continue without error"""
+ import time
+
+ temp_rows.add_row(b"test_row")
+ with self._make_client() as client:
+ client._channel_refresh_task.cancel()
+ channel_wrapper = client.transport.grpc_channel
+ first_channel = channel_wrapper._channel
client._channel_refresh_task = CrossSync._Sync_Impl.create_task(
client._manage_channel,
- refresh_interval_min=1,
- refresh_interval_max=1,
+ refresh_interval_min=0.1,
+ refresh_interval_max=0.1,
+ grace_period=1,
sync_executor=client._executor,
)
- CrossSync._Sync_Impl.yield_to_event_loop()
+ end_time = time.monotonic() + 3
with client.get_table(instance_id, table_id) as table:
- rows = table.read_rows({})
- channel_wrapper = client.transport.grpc_channel
- first_channel = client.transport.grpc_channel._channel
- assert len(rows) == 2
- CrossSync._Sync_Impl.sleep(2)
- rows_after_refresh = table.read_rows({})
- assert len(rows_after_refresh) == 2
- assert client.transport.grpc_channel is channel_wrapper
- assert client.transport.grpc_channel._channel is not first_channel
- assert isinstance(
- client.transport._logged_channel._interceptor, GapicInterceptor
- )
- finally:
- client.close()
+ while time.monotonic() < end_time:
+ rows = table.read_rows({})
+ assert len(rows) == 1
+ CrossSync._Sync_Impl.yield_to_event_loop()
+ updated_channel = channel_wrapper._channel
+ assert updated_channel is not first_channel
+ assert isinstance(
+ client.transport._logged_channel._interceptor, GapicInterceptor
+ )
+ assert updated_channel._interceptor == client._metrics_interceptor
@pytest.mark.usefixtures("target")
@CrossSync._Sync_Impl.Retry(
diff --git a/tests/unit/data/_async/test_client.py b/tests/unit/data/_async/test_client.py
index a5ec1d02d..72b3ae738 100644
--- a/tests/unit/data/_async/test_client.py
+++ b/tests/unit/data/_async/test_client.py
@@ -1334,6 +1334,7 @@ def test_table_ctor_sync(self):
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
core_exceptions.Aborted,
+ core_exceptions.Cancelled,
],
),
(
@@ -1832,7 +1833,6 @@ async def test_read_rows_retryable_error(self, exc_type):
@pytest.mark.parametrize(
"exc_type",
[
- core_exceptions.Cancelled,
core_exceptions.PreconditionFailed,
core_exceptions.NotFound,
core_exceptions.PermissionDenied,
diff --git a/tests/unit/data/_async/test_metrics_interceptor.py b/tests/unit/data/_async/test_metrics_interceptor.py
new file mode 100644
index 000000000..6ea958358
--- /dev/null
+++ b/tests/unit/data/_async/test_metrics_interceptor.py
@@ -0,0 +1,168 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+from grpc import RpcError
+
+from google.cloud.bigtable.data._cross_sync import CrossSync
+
+# try/except added for compatibility with python < 3.8
+try:
+ from unittest import mock
+except ImportError: # pragma: NO COVER
+ import mock # type: ignore
+
+if CrossSync.is_async:
+ from google.cloud.bigtable.data._async.metrics_interceptor import (
+ AsyncBigtableMetricsInterceptor,
+ )
+else:
+ from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import ( # noqa: F401
+ BigtableMetricsInterceptor,
+ )
+
+
+__CROSS_SYNC_OUTPUT__ = "tests.unit.data._sync_autogen.test_metrics_interceptor"
+
+
+@CrossSync.convert(replace_symbols={"__aiter__": "__iter__"})
+def _make_mock_stream_call(values, exc=None):
+ """
+ Create a mock call object that can be used for streaming calls
+ """
+ call = CrossSync.Mock()
+
+ async def gen():
+ for val in values:
+ yield val
+ if exc:
+ raise exc
+
+ call.__aiter__ = mock.Mock(return_value=gen())
+ return call
+
+
+@CrossSync.convert_class(sync_name="TestMetricsInterceptor")
+class TestMetricsInterceptorAsync:
+ @staticmethod
+ @CrossSync.convert(
+ replace_symbols={
+ "AsyncBigtableMetricsInterceptor": "BigtableMetricsInterceptor"
+ }
+ )
+ def _get_target_class():
+ return AsyncBigtableMetricsInterceptor
+
+ def _make_one(self, *args, **kwargs):
+ return self._get_target_class()(*args, **kwargs)
+
+ @CrossSync.pytest
+ async def test_unary_unary_interceptor_success(self):
+ """Test that interceptor handles successful unary-unary calls"""
+ instance = self._make_one()
+ continuation = CrossSync.Mock()
+ call = continuation.return_value
+ details = mock.Mock()
+ request = mock.Mock()
+ result = await instance.intercept_unary_unary(continuation, details, request)
+ assert result == call
+ continuation.assert_called_once_with(details, request)
+
+ @CrossSync.pytest
+ async def test_unary_unary_interceptor_failure(self):
+ """Test a failed RpcError with metadata"""
+
+ instance = self._make_one()
+ exc = RpcError("test")
+ continuation = CrossSync.Mock(side_effect=exc)
+ details = mock.Mock()
+ request = mock.Mock()
+ with pytest.raises(RpcError) as e:
+ await instance.intercept_unary_unary(continuation, details, request)
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+
+ @CrossSync.pytest
+ async def test_unary_unary_interceptor_failure_generic(self):
+ """Test generic exception"""
+
+ instance = self._make_one()
+ exc = ValueError("test")
+ continuation = CrossSync.Mock(side_effect=exc)
+ details = mock.Mock()
+ request = mock.Mock()
+ with pytest.raises(ValueError) as e:
+ await instance.intercept_unary_unary(continuation, details, request)
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+
+ @CrossSync.pytest
+ async def test_unary_stream_interceptor_success(self):
+ """Test that interceptor handles successful unary-stream calls"""
+
+ instance = self._make_one()
+
+ continuation = CrossSync.Mock(return_value=_make_mock_stream_call([1, 2]))
+ details = mock.Mock()
+ request = mock.Mock()
+ wrapper = await instance.intercept_unary_stream(continuation, details, request)
+ results = [val async for val in wrapper]
+ assert results == [1, 2]
+ continuation.assert_called_once_with(details, request)
+
+ @CrossSync.pytest
+ async def test_unary_stream_interceptor_failure_mid_stream(self):
+ """Test that interceptor handles failures mid-stream"""
+ instance = self._make_one()
+ exc = ValueError("test")
+ continuation = CrossSync.Mock(return_value=_make_mock_stream_call([1], exc=exc))
+ details = mock.Mock()
+ request = mock.Mock()
+ wrapper = await instance.intercept_unary_stream(continuation, details, request)
+ with pytest.raises(ValueError) as e:
+ [val async for val in wrapper]
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+
+ @CrossSync.pytest
+ async def test_unary_stream_interceptor_failure_start_stream(self):
+ """Test that interceptor handles failures at start of stream with RpcError with metadata"""
+
+ instance = self._make_one()
+ exc = RpcError("test")
+
+ continuation = CrossSync.Mock()
+ continuation.side_effect = exc
+ details = mock.Mock()
+ request = mock.Mock()
+ with pytest.raises(RpcError) as e:
+ await instance.intercept_unary_stream(continuation, details, request)
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+
+ @CrossSync.pytest
+ async def test_unary_stream_interceptor_failure_start_stream_generic(self):
+ """Test that interceptor handles failures at start of stream with generic exception"""
+
+ instance = self._make_one()
+ exc = ValueError("test")
+
+ continuation = CrossSync.Mock()
+ continuation.side_effect = exc
+ details = mock.Mock()
+ request = mock.Mock()
+ with pytest.raises(ValueError) as e:
+ await instance.intercept_unary_stream(continuation, details, request)
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
diff --git a/tests/unit/data/_async/test_mutations_batcher.py b/tests/unit/data/_async/test_mutations_batcher.py
index 29f2f1026..b139f31f1 100644
--- a/tests/unit/data/_async/test_mutations_batcher.py
+++ b/tests/unit/data/_async/test_mutations_batcher.py
@@ -1169,6 +1169,7 @@ def test__add_exceptions(self, limit, in_e, start_e, end_e):
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
core_exceptions.Aborted,
+ core_exceptions.Cancelled,
],
),
(
diff --git a/tests/unit/data/_sync_autogen/test_client.py b/tests/unit/data/_sync_autogen/test_client.py
index 6ad6c1063..49ed41ad6 100644
--- a/tests/unit/data/_sync_autogen/test_client.py
+++ b/tests/unit/data/_sync_autogen/test_client.py
@@ -1063,6 +1063,7 @@ def test_ctor_invalid_timeout_values(self):
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
core_exceptions.Aborted,
+ core_exceptions.Cancelled,
],
),
(
@@ -1507,7 +1508,6 @@ def test_read_rows_retryable_error(self, exc_type):
@pytest.mark.parametrize(
"exc_type",
[
- core_exceptions.Cancelled,
core_exceptions.PreconditionFailed,
core_exceptions.NotFound,
core_exceptions.PermissionDenied,
diff --git a/tests/unit/data/_sync_autogen/test_metrics_interceptor.py b/tests/unit/data/_sync_autogen/test_metrics_interceptor.py
new file mode 100644
index 000000000..56a6f3650
--- /dev/null
+++ b/tests/unit/data/_sync_autogen/test_metrics_interceptor.py
@@ -0,0 +1,140 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# This file is automatically generated by CrossSync. Do not edit manually.
+
+import pytest
+from grpc import RpcError
+from google.cloud.bigtable.data._cross_sync import CrossSync
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import (
+ BigtableMetricsInterceptor,
+)
+
+
+def _make_mock_stream_call(values, exc=None):
+ """Create a mock call object that can be used for streaming calls"""
+ call = CrossSync._Sync_Impl.Mock()
+
+ def gen():
+ for val in values:
+ yield val
+ if exc:
+ raise exc
+
+ call.__iter__ = mock.Mock(return_value=gen())
+ return call
+
+
+class TestMetricsInterceptor:
+ @staticmethod
+ def _get_target_class():
+ return BigtableMetricsInterceptor
+
+ def _make_one(self, *args, **kwargs):
+ return self._get_target_class()(*args, **kwargs)
+
+ def test_unary_unary_interceptor_success(self):
+ """Test that interceptor handles successful unary-unary calls"""
+ instance = self._make_one()
+ continuation = CrossSync._Sync_Impl.Mock()
+ call = continuation.return_value
+ details = mock.Mock()
+ request = mock.Mock()
+ result = instance.intercept_unary_unary(continuation, details, request)
+ assert result == call
+ continuation.assert_called_once_with(details, request)
+
+ def test_unary_unary_interceptor_failure(self):
+ """Test a failed RpcError with metadata"""
+ instance = self._make_one()
+ exc = RpcError("test")
+ continuation = CrossSync._Sync_Impl.Mock(side_effect=exc)
+ details = mock.Mock()
+ request = mock.Mock()
+ with pytest.raises(RpcError) as e:
+ instance.intercept_unary_unary(continuation, details, request)
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+
+ def test_unary_unary_interceptor_failure_generic(self):
+ """Test generic exception"""
+ instance = self._make_one()
+ exc = ValueError("test")
+ continuation = CrossSync._Sync_Impl.Mock(side_effect=exc)
+ details = mock.Mock()
+ request = mock.Mock()
+ with pytest.raises(ValueError) as e:
+ instance.intercept_unary_unary(continuation, details, request)
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+
+ def test_unary_stream_interceptor_success(self):
+ """Test that interceptor handles successful unary-stream calls"""
+ instance = self._make_one()
+ continuation = CrossSync._Sync_Impl.Mock(
+ return_value=_make_mock_stream_call([1, 2])
+ )
+ details = mock.Mock()
+ request = mock.Mock()
+ wrapper = instance.intercept_unary_stream(continuation, details, request)
+ results = [val for val in wrapper]
+ assert results == [1, 2]
+ continuation.assert_called_once_with(details, request)
+
+ def test_unary_stream_interceptor_failure_mid_stream(self):
+ """Test that interceptor handles failures mid-stream"""
+ instance = self._make_one()
+ exc = ValueError("test")
+ continuation = CrossSync._Sync_Impl.Mock(
+ return_value=_make_mock_stream_call([1], exc=exc)
+ )
+ details = mock.Mock()
+ request = mock.Mock()
+ wrapper = instance.intercept_unary_stream(continuation, details, request)
+ with pytest.raises(ValueError) as e:
+ [val for val in wrapper]
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+
+ def test_unary_stream_interceptor_failure_start_stream(self):
+ """Test that interceptor handles failures at start of stream with RpcError with metadata"""
+ instance = self._make_one()
+ exc = RpcError("test")
+ continuation = CrossSync._Sync_Impl.Mock()
+ continuation.side_effect = exc
+ details = mock.Mock()
+ request = mock.Mock()
+ with pytest.raises(RpcError) as e:
+ instance.intercept_unary_stream(continuation, details, request)
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+
+ def test_unary_stream_interceptor_failure_start_stream_generic(self):
+ """Test that interceptor handles failures at start of stream with generic exception"""
+ instance = self._make_one()
+ exc = ValueError("test")
+ continuation = CrossSync._Sync_Impl.Mock()
+ continuation.side_effect = exc
+ details = mock.Mock()
+ request = mock.Mock()
+ with pytest.raises(ValueError) as e:
+ instance.intercept_unary_stream(continuation, details, request)
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
diff --git a/tests/unit/data/_sync_autogen/test_mutations_batcher.py b/tests/unit/data/_sync_autogen/test_mutations_batcher.py
index 72db64146..92d16b349 100644
--- a/tests/unit/data/_sync_autogen/test_mutations_batcher.py
+++ b/tests/unit/data/_sync_autogen/test_mutations_batcher.py
@@ -1021,6 +1021,7 @@ def test__add_exceptions(self, limit, in_e, start_e, end_e):
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
core_exceptions.Aborted,
+ core_exceptions.Cancelled,
],
),
(
diff --git a/tests/unit/data/test_sync_up_to_date.py b/tests/unit/data/test_sync_up_to_date.py
index d4623a6c8..e6bce9cf6 100644
--- a/tests/unit/data/test_sync_up_to_date.py
+++ b/tests/unit/data/test_sync_up_to_date.py
@@ -90,7 +90,7 @@ def test_verify_headers(sync_file):
\#\ distributed\ under\ the\ License\ is\ distributed\ on\ an\ \"AS\ IS\"\ BASIS,\n
\#\ WITHOUT\ WARRANTIES\ OR\ CONDITIONS\ OF\ ANY\ KIND,\ either\ express\ or\ implied\.\n
\#\ See\ the\ License\ for\ the\ specific\ language\ governing\ permissions\ and\n
- \#\ limitations\ under\ the\ License\.
+ \#\ limitations\ under\ the\ License
"""
pattern = re.compile(license_regex, re.VERBOSE)
diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py
index 166f27eb8..b0ba35f0c 100644
--- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py
+++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py
@@ -182,12 +182,19 @@ def test__read_environment_variables():
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
- with pytest.raises(ValueError) as excinfo:
- BigtableInstanceAdminClient._read_environment_variables()
- assert (
- str(excinfo.value)
- == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with pytest.raises(ValueError) as excinfo:
+ BigtableInstanceAdminClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+ else:
+ assert BigtableInstanceAdminClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
assert BigtableInstanceAdminClient._read_environment_variables() == (
@@ -226,6 +233,105 @@ def test__read_environment_variables():
)
+def test_use_client_cert_effective():
+ # Test case 1: Test when `should_use_client_cert` returns True.
+ # We mock the `should_use_client_cert` function to simulate a scenario where
+ # the google-auth library supports automatic mTLS and determines that a
+ # client certificate should be used.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch(
+ "google.auth.transport.mtls.should_use_client_cert", return_value=True
+ ):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is True
+
+ # Test case 2: Test when `should_use_client_cert` returns False.
+ # We mock the `should_use_client_cert` function to simulate a scenario where
+ # the google-auth library supports automatic mTLS and determines that a
+ # client certificate should NOT be used.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch(
+ "google.auth.transport.mtls.should_use_client_cert", return_value=False
+ ):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is False
+
+ # Test case 3: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "true".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is True
+
+ # Test case 4: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "false".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}
+ ):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is False
+
+ # Test case 5: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "True".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "True"}):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is True
+
+ # Test case 6: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "False".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "False"}
+ ):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is False
+
+ # Test case 7: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "TRUE".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "TRUE"}):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is True
+
+ # Test case 8: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "FALSE".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "FALSE"}
+ ):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is False
+
+ # Test case 9: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not set.
+ # In this case, the method should return False, which is the default value.
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, clear=True):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is False
+
+ # Test case 10: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to an invalid value.
+ # The method should raise a ValueError as the environment variable must be either
+ # "true" or "false".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "unsupported"}
+ ):
+ with pytest.raises(ValueError):
+ BigtableInstanceAdminClient._use_client_cert_effective()
+
+ # Test case 11: Test when `should_use_client_cert` is available and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to an invalid value.
+ # The method should return False as the environment variable is set to an invalid value.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "unsupported"}
+ ):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is False
+
+ # Test case 12: Test when `should_use_client_cert` is available and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is unset. Also,
+ # the GOOGLE_API_CONFIG environment variable is unset.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": ""}):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": ""}):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is False
+
+
def test__get_client_cert_source():
mock_provided_cert_source = mock.Mock()
mock_default_cert_source = mock.Mock()
@@ -615,17 +721,6 @@ def test_bigtable_instance_admin_client_client_options(
== "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
- # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
- with mock.patch.dict(
- os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
- ):
- with pytest.raises(ValueError) as excinfo:
- client = client_class(transport=transport_name)
- assert (
- str(excinfo.value)
- == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
-
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
@@ -861,6 +956,119 @@ def test_bigtable_instance_admin_client_get_mtls_endpoint_and_cert_source(client
assert api_endpoint == mock_api_endpoint
assert cert_source is None
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "Unsupported".
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ mock_client_cert_source = mock.Mock()
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source,
+ api_endpoint=mock_api_endpoint,
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is None
+
+ # Test cases for mTLS enablement when GOOGLE_API_USE_CLIENT_CERTIFICATE is unset.
+ test_cases = [
+ (
+ # With workloads present in config, mTLS is enabled.
+ {
+ "version": 1,
+ "cert_configs": {
+ "workload": {
+ "cert_path": "path/to/cert/file",
+ "key_path": "path/to/key/file",
+ }
+ },
+ },
+ mock_client_cert_source,
+ ),
+ (
+ # With workloads not present in config, mTLS is disabled.
+ {
+ "version": 1,
+ "cert_configs": {},
+ },
+ None,
+ ),
+ ]
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ for config_data, expected_cert_source in test_cases:
+ env = os.environ.copy()
+ env.pop("GOOGLE_API_USE_CLIENT_CERTIFICATE", None)
+ with mock.patch.dict(os.environ, env, clear=True):
+ config_filename = "mock_certificate_config.json"
+ config_file_content = json.dumps(config_data)
+ m = mock.mock_open(read_data=config_file_content)
+ with mock.patch("builtins.open", m):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": config_filename}
+ ):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source,
+ api_endpoint=mock_api_endpoint,
+ )
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source(options)
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is expected_cert_source
+
+ # Test cases for mTLS enablement when GOOGLE_API_USE_CLIENT_CERTIFICATE is unset(empty).
+ test_cases = [
+ (
+ # With workloads present in config, mTLS is enabled.
+ {
+ "version": 1,
+ "cert_configs": {
+ "workload": {
+ "cert_path": "path/to/cert/file",
+ "key_path": "path/to/key/file",
+ }
+ },
+ },
+ mock_client_cert_source,
+ ),
+ (
+ # With workloads not present in config, mTLS is disabled.
+ {
+ "version": 1,
+ "cert_configs": {},
+ },
+ None,
+ ),
+ ]
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ for config_data, expected_cert_source in test_cases:
+ env = os.environ.copy()
+ env.pop("GOOGLE_API_USE_CLIENT_CERTIFICATE", "")
+ with mock.patch.dict(os.environ, env, clear=True):
+ config_filename = "mock_certificate_config.json"
+ config_file_content = json.dumps(config_data)
+ m = mock.mock_open(read_data=config_file_content)
+ with mock.patch("builtins.open", m):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": config_filename}
+ ):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source,
+ api_endpoint=mock_api_endpoint,
+ )
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source(options)
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is expected_cert_source
+
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
@@ -911,18 +1119,6 @@ def test_bigtable_instance_admin_client_get_mtls_endpoint_and_cert_source(client
== "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
- # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
- with mock.patch.dict(
- os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
- ):
- with pytest.raises(ValueError) as excinfo:
- client_class.get_mtls_endpoint_and_cert_source()
-
- assert (
- str(excinfo.value)
- == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
-
@pytest.mark.parametrize(
"client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient]
@@ -25948,6 +26144,7 @@ def test_bigtable_instance_admin_grpc_asyncio_transport_channel():
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize(
"transport_class",
[
diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py
index 7cbe6f3b1..cb5e8dd52 100644
--- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py
+++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py
@@ -185,12 +185,19 @@ def test__read_environment_variables():
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
- with pytest.raises(ValueError) as excinfo:
- BaseBigtableTableAdminClient._read_environment_variables()
- assert (
- str(excinfo.value)
- == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with pytest.raises(ValueError) as excinfo:
+ BaseBigtableTableAdminClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+ else:
+ assert BaseBigtableTableAdminClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
assert BaseBigtableTableAdminClient._read_environment_variables() == (
@@ -229,6 +236,107 @@ def test__read_environment_variables():
)
+def test_use_client_cert_effective():
+ # Test case 1: Test when `should_use_client_cert` returns True.
+ # We mock the `should_use_client_cert` function to simulate a scenario where
+ # the google-auth library supports automatic mTLS and determines that a
+ # client certificate should be used.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch(
+ "google.auth.transport.mtls.should_use_client_cert", return_value=True
+ ):
+ assert BaseBigtableTableAdminClient._use_client_cert_effective() is True
+
+ # Test case 2: Test when `should_use_client_cert` returns False.
+ # We mock the `should_use_client_cert` function to simulate a scenario where
+ # the google-auth library supports automatic mTLS and determines that a
+ # client certificate should NOT be used.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch(
+ "google.auth.transport.mtls.should_use_client_cert", return_value=False
+ ):
+ assert BaseBigtableTableAdminClient._use_client_cert_effective() is False
+
+ # Test case 3: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "true".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ assert BaseBigtableTableAdminClient._use_client_cert_effective() is True
+
+ # Test case 4: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "false".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}
+ ):
+ assert BaseBigtableTableAdminClient._use_client_cert_effective() is False
+
+ # Test case 5: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "True".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "True"}):
+ assert BaseBigtableTableAdminClient._use_client_cert_effective() is True
+
+ # Test case 6: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "False".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "False"}
+ ):
+ assert BaseBigtableTableAdminClient._use_client_cert_effective() is False
+
+ # Test case 7: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "TRUE".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "TRUE"}):
+ assert BaseBigtableTableAdminClient._use_client_cert_effective() is True
+
+ # Test case 8: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "FALSE".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "FALSE"}
+ ):
+ assert BaseBigtableTableAdminClient._use_client_cert_effective() is False
+
+ # Test case 9: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not set.
+ # In this case, the method should return False, which is the default value.
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, clear=True):
+ assert BaseBigtableTableAdminClient._use_client_cert_effective() is False
+
+ # Test case 10: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to an invalid value.
+ # The method should raise a ValueError as the environment variable must be either
+ # "true" or "false".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "unsupported"}
+ ):
+ with pytest.raises(ValueError):
+ BaseBigtableTableAdminClient._use_client_cert_effective()
+
+ # Test case 11: Test when `should_use_client_cert` is available and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to an invalid value.
+ # The method should return False as the environment variable is set to an invalid value.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "unsupported"}
+ ):
+ assert BaseBigtableTableAdminClient._use_client_cert_effective() is False
+
+ # Test case 12: Test when `should_use_client_cert` is available and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is unset. Also,
+ # the GOOGLE_API_CONFIG environment variable is unset.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": ""}):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": ""}):
+ assert (
+ BaseBigtableTableAdminClient._use_client_cert_effective() is False
+ )
+
+
def test__get_client_cert_source():
mock_provided_cert_source = mock.Mock()
mock_default_cert_source = mock.Mock()
@@ -618,17 +726,6 @@ def test_base_bigtable_table_admin_client_client_options(
== "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
- # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
- with mock.patch.dict(
- os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
- ):
- with pytest.raises(ValueError) as excinfo:
- client = client_class(transport=transport_name)
- assert (
- str(excinfo.value)
- == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
-
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
@@ -866,6 +963,119 @@ def test_base_bigtable_table_admin_client_get_mtls_endpoint_and_cert_source(
assert api_endpoint == mock_api_endpoint
assert cert_source is None
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "Unsupported".
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ mock_client_cert_source = mock.Mock()
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source,
+ api_endpoint=mock_api_endpoint,
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is None
+
+ # Test cases for mTLS enablement when GOOGLE_API_USE_CLIENT_CERTIFICATE is unset.
+ test_cases = [
+ (
+ # With workloads present in config, mTLS is enabled.
+ {
+ "version": 1,
+ "cert_configs": {
+ "workload": {
+ "cert_path": "path/to/cert/file",
+ "key_path": "path/to/key/file",
+ }
+ },
+ },
+ mock_client_cert_source,
+ ),
+ (
+ # With workloads not present in config, mTLS is disabled.
+ {
+ "version": 1,
+ "cert_configs": {},
+ },
+ None,
+ ),
+ ]
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ for config_data, expected_cert_source in test_cases:
+ env = os.environ.copy()
+ env.pop("GOOGLE_API_USE_CLIENT_CERTIFICATE", None)
+ with mock.patch.dict(os.environ, env, clear=True):
+ config_filename = "mock_certificate_config.json"
+ config_file_content = json.dumps(config_data)
+ m = mock.mock_open(read_data=config_file_content)
+ with mock.patch("builtins.open", m):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": config_filename}
+ ):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source,
+ api_endpoint=mock_api_endpoint,
+ )
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source(options)
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is expected_cert_source
+
+ # Test cases for mTLS enablement when GOOGLE_API_USE_CLIENT_CERTIFICATE is unset(empty).
+ test_cases = [
+ (
+ # With workloads present in config, mTLS is enabled.
+ {
+ "version": 1,
+ "cert_configs": {
+ "workload": {
+ "cert_path": "path/to/cert/file",
+ "key_path": "path/to/key/file",
+ }
+ },
+ },
+ mock_client_cert_source,
+ ),
+ (
+ # With workloads not present in config, mTLS is disabled.
+ {
+ "version": 1,
+ "cert_configs": {},
+ },
+ None,
+ ),
+ ]
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ for config_data, expected_cert_source in test_cases:
+ env = os.environ.copy()
+ env.pop("GOOGLE_API_USE_CLIENT_CERTIFICATE", "")
+ with mock.patch.dict(os.environ, env, clear=True):
+ config_filename = "mock_certificate_config.json"
+ config_file_content = json.dumps(config_data)
+ m = mock.mock_open(read_data=config_file_content)
+ with mock.patch("builtins.open", m):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": config_filename}
+ ):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source,
+ api_endpoint=mock_api_endpoint,
+ )
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source(options)
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is expected_cert_source
+
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
@@ -916,18 +1126,6 @@ def test_base_bigtable_table_admin_client_get_mtls_endpoint_and_cert_source(
== "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
- # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
- with mock.patch.dict(
- os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
- ):
- with pytest.raises(ValueError) as excinfo:
- client_class.get_mtls_endpoint_and_cert_source()
-
- assert (
- str(excinfo.value)
- == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
-
@pytest.mark.parametrize(
"client_class", [BaseBigtableTableAdminClient, BaseBigtableTableAdminAsyncClient]
@@ -29050,6 +29248,7 @@ def test_bigtable_table_admin_grpc_asyncio_transport_channel():
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize(
"transport_class",
[
diff --git a/tests/unit/gapic/bigtable_v2/test_bigtable.py b/tests/unit/gapic/bigtable_v2/test_bigtable.py
index cb78d2b7a..ea7f0955d 100644
--- a/tests/unit/gapic/bigtable_v2/test_bigtable.py
+++ b/tests/unit/gapic/bigtable_v2/test_bigtable.py
@@ -151,12 +151,19 @@ def test__read_environment_variables():
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
- with pytest.raises(ValueError) as excinfo:
- BigtableClient._read_environment_variables()
- assert (
- str(excinfo.value)
- == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with pytest.raises(ValueError) as excinfo:
+ BigtableClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+ else:
+ assert BigtableClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
assert BigtableClient._read_environment_variables() == (False, "never", None)
@@ -183,6 +190,105 @@ def test__read_environment_variables():
)
+def test_use_client_cert_effective():
+ # Test case 1: Test when `should_use_client_cert` returns True.
+ # We mock the `should_use_client_cert` function to simulate a scenario where
+ # the google-auth library supports automatic mTLS and determines that a
+ # client certificate should be used.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch(
+ "google.auth.transport.mtls.should_use_client_cert", return_value=True
+ ):
+ assert BigtableClient._use_client_cert_effective() is True
+
+ # Test case 2: Test when `should_use_client_cert` returns False.
+ # We mock the `should_use_client_cert` function to simulate a scenario where
+ # the google-auth library supports automatic mTLS and determines that a
+ # client certificate should NOT be used.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch(
+ "google.auth.transport.mtls.should_use_client_cert", return_value=False
+ ):
+ assert BigtableClient._use_client_cert_effective() is False
+
+ # Test case 3: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "true".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ assert BigtableClient._use_client_cert_effective() is True
+
+ # Test case 4: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "false".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}
+ ):
+ assert BigtableClient._use_client_cert_effective() is False
+
+ # Test case 5: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "True".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "True"}):
+ assert BigtableClient._use_client_cert_effective() is True
+
+ # Test case 6: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "False".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "False"}
+ ):
+ assert BigtableClient._use_client_cert_effective() is False
+
+ # Test case 7: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "TRUE".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "TRUE"}):
+ assert BigtableClient._use_client_cert_effective() is True
+
+ # Test case 8: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "FALSE".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "FALSE"}
+ ):
+ assert BigtableClient._use_client_cert_effective() is False
+
+ # Test case 9: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not set.
+ # In this case, the method should return False, which is the default value.
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, clear=True):
+ assert BigtableClient._use_client_cert_effective() is False
+
+ # Test case 10: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to an invalid value.
+ # The method should raise a ValueError as the environment variable must be either
+ # "true" or "false".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "unsupported"}
+ ):
+ with pytest.raises(ValueError):
+ BigtableClient._use_client_cert_effective()
+
+ # Test case 11: Test when `should_use_client_cert` is available and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to an invalid value.
+ # The method should return False as the environment variable is set to an invalid value.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "unsupported"}
+ ):
+ assert BigtableClient._use_client_cert_effective() is False
+
+ # Test case 12: Test when `should_use_client_cert` is available and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is unset. Also,
+ # the GOOGLE_API_CONFIG environment variable is unset.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": ""}):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": ""}):
+ assert BigtableClient._use_client_cert_effective() is False
+
+
def test__get_client_cert_source():
mock_provided_cert_source = mock.Mock()
mock_default_cert_source = mock.Mock()
@@ -539,17 +645,6 @@ def test_bigtable_client_client_options(client_class, transport_class, transport
== "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
- # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
- with mock.patch.dict(
- os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
- ):
- with pytest.raises(ValueError) as excinfo:
- client = client_class(transport=transport_name)
- assert (
- str(excinfo.value)
- == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
-
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
@@ -761,6 +856,119 @@ def test_bigtable_client_get_mtls_endpoint_and_cert_source(client_class):
assert api_endpoint == mock_api_endpoint
assert cert_source is None
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "Unsupported".
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ mock_client_cert_source = mock.Mock()
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source,
+ api_endpoint=mock_api_endpoint,
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is None
+
+ # Test cases for mTLS enablement when GOOGLE_API_USE_CLIENT_CERTIFICATE is unset.
+ test_cases = [
+ (
+ # With workloads present in config, mTLS is enabled.
+ {
+ "version": 1,
+ "cert_configs": {
+ "workload": {
+ "cert_path": "path/to/cert/file",
+ "key_path": "path/to/key/file",
+ }
+ },
+ },
+ mock_client_cert_source,
+ ),
+ (
+ # With workloads not present in config, mTLS is disabled.
+ {
+ "version": 1,
+ "cert_configs": {},
+ },
+ None,
+ ),
+ ]
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ for config_data, expected_cert_source in test_cases:
+ env = os.environ.copy()
+ env.pop("GOOGLE_API_USE_CLIENT_CERTIFICATE", None)
+ with mock.patch.dict(os.environ, env, clear=True):
+ config_filename = "mock_certificate_config.json"
+ config_file_content = json.dumps(config_data)
+ m = mock.mock_open(read_data=config_file_content)
+ with mock.patch("builtins.open", m):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": config_filename}
+ ):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source,
+ api_endpoint=mock_api_endpoint,
+ )
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source(options)
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is expected_cert_source
+
+ # Test cases for mTLS enablement when GOOGLE_API_USE_CLIENT_CERTIFICATE is unset(empty).
+ test_cases = [
+ (
+ # With workloads present in config, mTLS is enabled.
+ {
+ "version": 1,
+ "cert_configs": {
+ "workload": {
+ "cert_path": "path/to/cert/file",
+ "key_path": "path/to/key/file",
+ }
+ },
+ },
+ mock_client_cert_source,
+ ),
+ (
+ # With workloads not present in config, mTLS is disabled.
+ {
+ "version": 1,
+ "cert_configs": {},
+ },
+ None,
+ ),
+ ]
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ for config_data, expected_cert_source in test_cases:
+ env = os.environ.copy()
+ env.pop("GOOGLE_API_USE_CLIENT_CERTIFICATE", "")
+ with mock.patch.dict(os.environ, env, clear=True):
+ config_filename = "mock_certificate_config.json"
+ config_file_content = json.dumps(config_data)
+ m = mock.mock_open(read_data=config_file_content)
+ with mock.patch("builtins.open", m):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": config_filename}
+ ):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source,
+ api_endpoint=mock_api_endpoint,
+ )
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source(options)
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is expected_cert_source
+
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
@@ -811,18 +1019,6 @@ def test_bigtable_client_get_mtls_endpoint_and_cert_source(client_class):
== "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
- # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
- with mock.patch.dict(
- os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
- ):
- with pytest.raises(ValueError) as excinfo:
- client_class.get_mtls_endpoint_and_cert_source()
-
- assert (
- str(excinfo.value)
- == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
-
@pytest.mark.parametrize("client_class", [BigtableClient, BigtableAsyncClient])
@mock.patch.object(
@@ -6852,8 +7048,8 @@ def test_read_rows_routing_parameters_request_1_grpc():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -6880,7 +7076,6 @@ def test_read_rows_routing_parameters_request_2_grpc():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -6916,8 +7111,43 @@ def test_read_rows_routing_parameters_request_3_grpc():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
+ )
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
+
+
+def test_read_rows_routing_parameters_request_4_grpc():
+ client = BigtableClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.read_rows), "__call__") as call:
+ call.return_value = iter([bigtable.ReadRowsResponse()])
+ client.read_rows(
+ request={
+ "materialized_view_name": "projects/sample1/instances/sample2/sample3"
+ }
+ )
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, kw = call.mock_calls[0]
+ request_msg = bigtable.ReadRowsRequest(
+ **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"}
+ )
+ assert args[0] == request_msg
+
+ expected_headers = {
+ "name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
+ }
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -6949,8 +7179,8 @@ def test_sample_row_keys_routing_parameters_request_1_grpc():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -6977,7 +7207,6 @@ def test_sample_row_keys_routing_parameters_request_2_grpc():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -7013,8 +7242,43 @@ def test_sample_row_keys_routing_parameters_request_3_grpc():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
+ )
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
+
+
+def test_sample_row_keys_routing_parameters_request_4_grpc():
+ client = BigtableClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call:
+ call.return_value = iter([bigtable.SampleRowKeysResponse()])
+ client.sample_row_keys(
+ request={
+ "materialized_view_name": "projects/sample1/instances/sample2/sample3"
+ }
+ )
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, kw = call.mock_calls[0]
+ request_msg = bigtable.SampleRowKeysRequest(
+ **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"}
+ )
+ assert args[0] == request_msg
+
+ expected_headers = {
+ "name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
+ }
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -7046,8 +7310,8 @@ def test_mutate_row_routing_parameters_request_1_grpc():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -7074,7 +7338,6 @@ def test_mutate_row_routing_parameters_request_2_grpc():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -7110,8 +7373,8 @@ def test_mutate_row_routing_parameters_request_3_grpc():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -7143,8 +7406,8 @@ def test_mutate_rows_routing_parameters_request_1_grpc():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -7171,7 +7434,6 @@ def test_mutate_rows_routing_parameters_request_2_grpc():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -7207,8 +7469,8 @@ def test_mutate_rows_routing_parameters_request_3_grpc():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -7242,8 +7504,8 @@ def test_check_and_mutate_row_routing_parameters_request_1_grpc():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -7272,7 +7534,6 @@ def test_check_and_mutate_row_routing_parameters_request_2_grpc():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -7310,8 +7571,8 @@ def test_check_and_mutate_row_routing_parameters_request_3_grpc():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -7341,8 +7602,8 @@ def test_ping_and_warm_routing_parameters_request_1_grpc():
expected_headers = {
"name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -7369,7 +7630,6 @@ def test_ping_and_warm_routing_parameters_request_2_grpc():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -7403,8 +7663,8 @@ def test_read_modify_write_row_routing_parameters_request_1_grpc():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -7435,7 +7695,6 @@ def test_read_modify_write_row_routing_parameters_request_2_grpc():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -7473,8 +7732,8 @@ def test_read_modify_write_row_routing_parameters_request_3_grpc():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -7506,8 +7765,8 @@ def test_prepare_query_routing_parameters_request_1_grpc():
expected_headers = {
"name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -7534,7 +7793,6 @@ def test_prepare_query_routing_parameters_request_2_grpc():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -7566,8 +7824,8 @@ def test_execute_query_routing_parameters_request_1_grpc():
expected_headers = {
"name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -7594,7 +7852,6 @@ def test_execute_query_routing_parameters_request_2_grpc():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -7938,8 +8195,8 @@ async def test_read_rows_routing_parameters_request_1_grpc_asyncio():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -7971,7 +8228,6 @@ async def test_read_rows_routing_parameters_request_2_grpc_asyncio():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8012,8 +8268,48 @@ async def test_read_rows_routing_parameters_request_3_grpc_asyncio():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
+ )
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
+
+
+@pytest.mark.asyncio
+async def test_read_rows_routing_parameters_request_4_grpc_asyncio():
+ client = BigtableAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.read_rows), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(
+ side_effect=[bigtable.ReadRowsResponse()]
+ )
+ await client.read_rows(
+ request={
+ "materialized_view_name": "projects/sample1/instances/sample2/sample3"
+ }
+ )
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, kw = call.mock_calls[0]
+ request_msg = bigtable.ReadRowsRequest(
+ **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"}
+ )
+
+ assert args[0] == request_msg
+
+ expected_headers = {
+ "name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
+ }
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8050,8 +8346,8 @@ async def test_sample_row_keys_routing_parameters_request_1_grpc_asyncio():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8083,7 +8379,6 @@ async def test_sample_row_keys_routing_parameters_request_2_grpc_asyncio():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8124,8 +8419,48 @@ async def test_sample_row_keys_routing_parameters_request_3_grpc_asyncio():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
+ )
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
+
+
+@pytest.mark.asyncio
+async def test_sample_row_keys_routing_parameters_request_4_grpc_asyncio():
+ client = BigtableAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(
+ side_effect=[bigtable.SampleRowKeysResponse()]
+ )
+ await client.sample_row_keys(
+ request={
+ "materialized_view_name": "projects/sample1/instances/sample2/sample3"
+ }
+ )
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, kw = call.mock_calls[0]
+ request_msg = bigtable.SampleRowKeysRequest(
+ **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"}
+ )
+
+ assert args[0] == request_msg
+ expected_headers = {
+ "name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
+ }
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8161,8 +8496,8 @@ async def test_mutate_row_routing_parameters_request_1_grpc_asyncio():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8193,7 +8528,6 @@ async def test_mutate_row_routing_parameters_request_2_grpc_asyncio():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8233,8 +8567,8 @@ async def test_mutate_row_routing_parameters_request_3_grpc_asyncio():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8271,8 +8605,8 @@ async def test_mutate_rows_routing_parameters_request_1_grpc_asyncio():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8304,7 +8638,6 @@ async def test_mutate_rows_routing_parameters_request_2_grpc_asyncio():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8345,8 +8678,8 @@ async def test_mutate_rows_routing_parameters_request_3_grpc_asyncio():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8386,8 +8719,8 @@ async def test_check_and_mutate_row_routing_parameters_request_1_grpc_asyncio():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8422,7 +8755,6 @@ async def test_check_and_mutate_row_routing_parameters_request_2_grpc_asyncio():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8466,8 +8798,8 @@ async def test_check_and_mutate_row_routing_parameters_request_3_grpc_asyncio():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8503,8 +8835,8 @@ async def test_ping_and_warm_routing_parameters_request_1_grpc_asyncio():
expected_headers = {
"name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8535,7 +8867,6 @@ async def test_ping_and_warm_routing_parameters_request_2_grpc_asyncio():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8573,8 +8904,8 @@ async def test_read_modify_write_row_routing_parameters_request_1_grpc_asyncio()
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8609,7 +8940,6 @@ async def test_read_modify_write_row_routing_parameters_request_2_grpc_asyncio()
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8651,8 +8981,8 @@ async def test_read_modify_write_row_routing_parameters_request_3_grpc_asyncio()
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8690,8 +9020,8 @@ async def test_prepare_query_routing_parameters_request_1_grpc_asyncio():
expected_headers = {
"name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8724,7 +9054,6 @@ async def test_prepare_query_routing_parameters_request_2_grpc_asyncio():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8761,8 +9090,8 @@ async def test_execute_query_routing_parameters_request_1_grpc_asyncio():
expected_headers = {
"name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -8794,7 +9123,6 @@ async def test_execute_query_routing_parameters_request_2_grpc_asyncio():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -10448,8 +10776,8 @@ def test_read_rows_routing_parameters_request_1_rest():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -10475,7 +10803,6 @@ def test_read_rows_routing_parameters_request_2_rest():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -10510,8 +10837,42 @@ def test_read_rows_routing_parameters_request_3_rest():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
+ )
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
+
+
+def test_read_rows_routing_parameters_request_4_rest():
+ client = BigtableClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.read_rows), "__call__") as call:
+ client.read_rows(
+ request={
+ "materialized_view_name": "projects/sample1/instances/sample2/sample3"
+ }
+ )
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, kw = call.mock_calls[0]
+ request_msg = bigtable.ReadRowsRequest(
+ **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"}
+ )
+
+ assert args[0] == request_msg
+
+ expected_headers = {
+ "name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
+ }
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -10542,8 +10903,8 @@ def test_sample_row_keys_routing_parameters_request_1_rest():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -10569,7 +10930,6 @@ def test_sample_row_keys_routing_parameters_request_2_rest():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -10604,8 +10964,42 @@ def test_sample_row_keys_routing_parameters_request_3_rest():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
+ )
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
+
+
+def test_sample_row_keys_routing_parameters_request_4_rest():
+ client = BigtableClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call:
+ client.sample_row_keys(
+ request={
+ "materialized_view_name": "projects/sample1/instances/sample2/sample3"
+ }
+ )
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, kw = call.mock_calls[0]
+ request_msg = bigtable.SampleRowKeysRequest(
+ **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"}
+ )
+
+ assert args[0] == request_msg
+
+ expected_headers = {
+ "name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
+ }
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -10636,8 +11030,8 @@ def test_mutate_row_routing_parameters_request_1_rest():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -10663,7 +11057,6 @@ def test_mutate_row_routing_parameters_request_2_rest():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -10698,8 +11091,8 @@ def test_mutate_row_routing_parameters_request_3_rest():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -10730,8 +11123,8 @@ def test_mutate_rows_routing_parameters_request_1_rest():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -10757,7 +11150,6 @@ def test_mutate_rows_routing_parameters_request_2_rest():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -10792,8 +11184,8 @@ def test_mutate_rows_routing_parameters_request_3_rest():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -10826,8 +11218,8 @@ def test_check_and_mutate_row_routing_parameters_request_1_rest():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -10855,7 +11247,6 @@ def test_check_and_mutate_row_routing_parameters_request_2_rest():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -10892,8 +11283,8 @@ def test_check_and_mutate_row_routing_parameters_request_3_rest():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -10922,8 +11313,8 @@ def test_ping_and_warm_routing_parameters_request_1_rest():
expected_headers = {
"name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -10949,7 +11340,6 @@ def test_ping_and_warm_routing_parameters_request_2_rest():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -10982,8 +11372,8 @@ def test_read_modify_write_row_routing_parameters_request_1_rest():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -11013,7 +11403,6 @@ def test_read_modify_write_row_routing_parameters_request_2_rest():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -11050,8 +11439,8 @@ def test_read_modify_write_row_routing_parameters_request_3_rest():
expected_headers = {
"table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -11082,8 +11471,8 @@ def test_prepare_query_routing_parameters_request_1_rest():
expected_headers = {
"name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -11109,7 +11498,6 @@ def test_prepare_query_routing_parameters_request_2_rest():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -11140,8 +11528,8 @@ def test_execute_query_routing_parameters_request_1_rest():
expected_headers = {
"name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -11167,7 +11555,6 @@ def test_execute_query_routing_parameters_request_2_rest():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
-
# assert the expected headers are present, in any order
routing_string = next(
iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
@@ -11565,6 +11952,7 @@ def test_bigtable_grpc_asyncio_transport_channel():
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize(
"transport_class",
[transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport],