diff --git a/.cross_sync/transformers.py b/.cross_sync/transformers.py
index 42ba3f83c..9adadd0aa 100644
--- a/.cross_sync/transformers.py
+++ b/.cross_sync/transformers.py
@@ -71,18 +71,19 @@ def visit_FunctionDef(self, node):
Replace function docstrings
"""
docstring = ast.get_docstring(node)
- if docstring and isinstance(node.body[0], ast.Expr) and isinstance(
- node.body[0].value, ast.Str
- ):
+ if docstring and isinstance(node.body[0], ast.Expr) \
+ and isinstance(node.body[0].value, ast.Constant) \
+ and isinstance(node.body[0].value.value, str) \
+ :
for key_word, replacement in self.replacements.items():
docstring = docstring.replace(key_word, replacement)
- node.body[0].value.s = docstring
+ node.body[0].value.value = docstring
return self.generic_visit(node)
def visit_Constant(self, node):
"""Replace string type annotations"""
try:
- node.s = self.replacements.get(node.s, node.s)
+ node.value = self.replacements.get(node.value, node.value)
except TypeError:
# ignore unhashable types (e.g. list)
pass
@@ -264,7 +265,7 @@ def get_output_path(self, node):
for target in n.targets:
if isinstance(target, ast.Name) and target.id == self.FILE_ANNOTATION:
# return the output path
- return n.value.s.replace(".", "/") + ".py"
+ return n.value.value.replace(".", "/") + ".py"
def visit_Module(self, node):
# look for __CROSS_SYNC_OUTPUT__ Assign statement
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 8e8f088b7..4012444e4 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -9,4 +9,4 @@
* @googleapis/yoshi-python @googleapis/api-bigtable @googleapis/api-bigtable-partners
# @googleapis/python-samples-reviewers @googleapis/api-bigtable @googleapis/api-bigtable-partners are the default owners for samples changes
-/samples/ @googleapis/python-samples-reviewers @googleapis/api-bigtable @googleapis/api-bigtable-partners
+/samples/ @googleapis/python-samples-reviewers @googleapis/api-bigtable @googleapis/api-bigtable-partners @googleapis/cloud-sdk-python-team
diff --git a/.github/auto-approve.yml b/.github/auto-approve.yml
deleted file mode 100644
index 311ebbb85..000000000
--- a/.github/auto-approve.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-# https://github.com/googleapis/repo-automation-bots/tree/main/packages/auto-approve
-processes:
- - "OwlBotTemplateChanges"
diff --git a/.github/release-please.yml b/.github/release-please.yml
deleted file mode 100644
index 593e83f9f..000000000
--- a/.github/release-please.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-releaseType: python
-handleGHRelease: true
-# NOTE: this section is generated by synthtool.languages.python
-# See https://github.com/googleapis/synthtool/blob/master/synthtool/languages/python.py
-manifest: true
-branches:
-- branch: v1
- handleGHRelease: true
- releaseType: python
-- branch: v0
- handleGHRelease: true
- releaseType: python
diff --git a/.github/release-trigger.yml b/.github/release-trigger.yml
deleted file mode 100644
index 0bbdd8e4c..000000000
--- a/.github/release-trigger.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-enabled: true
-multiScmName: python-bigtable
diff --git a/.github/sync-repo-settings.yaml b/.github/sync-repo-settings.yaml
deleted file mode 100644
index df49eafcc..000000000
--- a/.github/sync-repo-settings.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-# Whether or not rebase-merging is enabled on this repository.
-# Defaults to `true`
-rebaseMergeAllowed: true
-
-# Whether or not squash-merging is enabled on this repository.
-# Defaults to `true`
-squashMergeAllowed: true
-
-# Whether or not PRs are merged with a merge commit on this repository.
-# Defaults to `false`
-mergeCommitAllowed: false
-
-# Rules for main branch protection
-branchProtectionRules:
-# Identifies the protection rule pattern. Name of the branch to be protected.
-# Defaults to `main`
-- pattern: main
- # Can admins overwrite branch protection.
- # Defaults to `true`
- isAdminEnforced: true
- # Number of approving reviews required to update matching branches.
- # Defaults to `1`
- requiredApprovingReviewCount: 1
- # Are reviews from code owners required to update matching branches.
- # Defaults to `false`
- requiresCodeOwnerReviews: true
- # Require up to date branches
- requiresStrictStatusChecks: false
- # List of required status check contexts that must pass for commits to be accepted to matching branches.
- requiredStatusCheckContexts:
- - 'Kokoro'
- - 'Kokoro system-3.8'
- - 'cla/google'
- - 'OwlBot Post Processor'
-# List of explicit permissions to add (additive only)
-permissionRules:
- # Team slug to add to repository permissions
- - team: yoshi-admins
- # Access level required, one of push|pull|admin|maintain|triage
- permission: admin
- # Team slug to add to repository permissions
- - team: yoshi-python-admins
- # Access level required, one of push|pull|admin|maintain|triage
- permission: admin
- # Team slug to add to repository permissions
- - team: yoshi-python
- # Access level required, one of push|pull|admin|maintain|triage
- permission: push
diff --git a/.github/workflows/conformance.yaml b/.github/workflows/conformance.yaml
index 8445240c3..f7396eaa9 100644
--- a/.github/workflows/conformance.yaml
+++ b/.github/workflows/conformance.yaml
@@ -24,17 +24,15 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- test-version: [ "v0.0.2" ]
- py-version: [ 3.8 ]
- client-type: [ "async", "sync", "legacy" ]
+ test-version: [ "v0.0.4" ]
+ py-version: [ 3.13 ]
+ client-type: [ "async", "sync"]
+ # None of the clients currently support reverse scans, execute query plan refresh, retry info, or routing cookie
include:
+ - client-type: "async"
+ test_args: "-skip \"PlanRefresh|_Reverse|_WithRetryInfo|_WithRoutingCookie\""
- client-type: "sync"
- # sync client does not support concurrent streams
- test_args: "-skip _Generic_MultiStream"
- - client-type: "legacy"
- # legacy client is synchronous and does not support concurrent streams
- # legacy client does not expose mutate_row. Disable those tests
- test_args: "-skip _Generic_MultiStream -skip TestMutateRow_"
+ test_args: "-skip \"PlanRefresh|_Reverse|_WithRetryInfo|_WithRoutingCookie|_Generic_MultiStream\""
fail-fast: false
name: "${{ matrix.client-type }} client / python ${{ matrix.py-version }} / test tag ${{ matrix.test-version }}"
steps:
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index 4866193af..9a0598202 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -12,7 +12,7 @@ jobs:
- name: Setup Python
uses: actions/setup-python@v5
with:
- python-version: "3.8"
+ python-version: "3.13"
- name: Install nox
run: |
python -m pip install --upgrade setuptools pip wheel
diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml
index 3915cddd3..f2b78a536 100644
--- a/.github/workflows/mypy.yml
+++ b/.github/workflows/mypy.yml
@@ -12,7 +12,7 @@ jobs:
- name: Setup Python
uses: actions/setup-python@v5
with:
- python-version: "3.8"
+ python-version: "3.13"
- name: Install nox
run: |
python -m pip install --upgrade setuptools pip wheel
diff --git a/.github/workflows/system_emulated.yml b/.github/workflows/system_emulated.yml
index c9dab998c..d8bbbb639 100644
--- a/.github/workflows/system_emulated.yml
+++ b/.github/workflows/system_emulated.yml
@@ -17,7 +17,7 @@ jobs:
- name: Setup Python
uses: actions/setup-python@v5
with:
- python-version: '3.8'
+ python-version: '3.13'
- name: Setup GCloud SDK
uses: google-github-actions/setup-gcloud@v2.1.1
diff --git a/.github/workflows/unittest.yml b/.github/workflows/unittest.yml
index 6a0429d96..dad646c6b 100644
--- a/.github/workflows/unittest.yml
+++ b/.github/workflows/unittest.yml
@@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-22.04
strategy:
matrix:
- python: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12', '3.13']
+ python: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12', '3.13', '3.14']
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -45,7 +45,7 @@ jobs:
- name: Setup Python
uses: actions/setup-python@v5
with:
- python-version: "3.8"
+ python-version: "3.13"
- name: Install coverage
run: |
python -m pip install --upgrade setuptools pip wheel
diff --git a/.kokoro/presubmit/system-3.8.cfg b/.kokoro/presubmit/system-3.9.cfg
similarity index 83%
rename from .kokoro/presubmit/system-3.8.cfg
rename to .kokoro/presubmit/system-3.9.cfg
index f4bcee3db..b8ae66b37 100644
--- a/.kokoro/presubmit/system-3.8.cfg
+++ b/.kokoro/presubmit/system-3.9.cfg
@@ -3,5 +3,5 @@
# Only run this nox session.
env_vars: {
key: "NOX_SESSION"
- value: "system-3.8"
+ value: "system-3.9"
}
\ No newline at end of file
diff --git a/.kokoro/presubmit/system.cfg b/.kokoro/presubmit/system.cfg
new file mode 100644
index 000000000..30956a3ab
--- /dev/null
+++ b/.kokoro/presubmit/system.cfg
@@ -0,0 +1,7 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Only run this nox session.
+env_vars: {
+ key: "NOX_SESSION"
+ value: "system-3.10"
+}
diff --git a/.kokoro/samples/python3.14/common.cfg b/.kokoro/samples/python3.14/common.cfg
new file mode 100644
index 000000000..a9ea06119
--- /dev/null
+++ b/.kokoro/samples/python3.14/common.cfg
@@ -0,0 +1,40 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Specify which tests to run
+env_vars: {
+ key: "RUN_TESTS_SESSION"
+ value: "py-3.14"
+}
+
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-314"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigtable/.kokoro/test-samples.sh"
+}
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker"
+}
+
+# Download secrets for samples
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "python-bigtable/.kokoro/trampoline_v2.sh"
diff --git a/.kokoro/samples/python3.14/continuous.cfg b/.kokoro/samples/python3.14/continuous.cfg
new file mode 100644
index 000000000..a1c8d9759
--- /dev/null
+++ b/.kokoro/samples/python3.14/continuous.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/.kokoro/samples/python3.14/periodic-head.cfg b/.kokoro/samples/python3.14/periodic-head.cfg
new file mode 100644
index 000000000..be25a34f9
--- /dev/null
+++ b/.kokoro/samples/python3.14/periodic-head.cfg
@@ -0,0 +1,11 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigtable/.kokoro/test-samples-against-head.sh"
+}
diff --git a/.kokoro/samples/python3.14/periodic.cfg b/.kokoro/samples/python3.14/periodic.cfg
new file mode 100644
index 000000000..71cd1e597
--- /dev/null
+++ b/.kokoro/samples/python3.14/periodic.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "False"
+}
diff --git a/.kokoro/samples/python3.14/presubmit.cfg b/.kokoro/samples/python3.14/presubmit.cfg
new file mode 100644
index 000000000..a1c8d9759
--- /dev/null
+++ b/.kokoro/samples/python3.14/presubmit.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/.kokoro/trampoline_v2.sh b/.kokoro/trampoline_v2.sh
index 35fa52923..d03f92dfc 100755
--- a/.kokoro/trampoline_v2.sh
+++ b/.kokoro/trampoline_v2.sh
@@ -26,8 +26,8 @@
# To run this script, first download few files from gcs to /dev/shm.
# (/dev/shm is passed into the container as KOKORO_GFILE_DIR).
#
-# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/secrets_viewer_service_account.json /dev/shm
-# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/automl_secrets.txt /dev/shm
+# gcloud storage cp gs://cloud-devrel-kokoro-resources/python-docs-samples/secrets_viewer_service_account.json /dev/shm
+# gcloud storage cp gs://cloud-devrel-kokoro-resources/python-docs-samples/automl_secrets.txt /dev/shm
#
# Then run the script.
# .kokoro/trampoline_v2.sh
diff --git a/.librarian/generator-input/.repo-metadata.json b/.librarian/generator-input/.repo-metadata.json
new file mode 100644
index 000000000..9de4b5f92
--- /dev/null
+++ b/.librarian/generator-input/.repo-metadata.json
@@ -0,0 +1,80 @@
+{
+ "name": "bigtable",
+ "name_pretty": "Cloud Bigtable",
+ "product_documentation": "https://cloud.google.com/bigtable",
+ "client_documentation": "https://cloud.google.com/python/docs/reference/bigtable/latest",
+ "issue_tracker": "https://issuetracker.google.com/savedsearches/559777",
+ "release_level": "stable",
+ "language": "python",
+ "library_type": "GAPIC_COMBO",
+ "repo": "googleapis/python-bigtable",
+ "distribution_name": "google-cloud-bigtable",
+ "api_id": "bigtable.googleapis.com",
+ "requires_billing": true,
+ "samples": [
+ {
+ "name": "Hello World in Cloud Bigtable",
+ "description": "Demonstrates how to connect to Cloud Bigtable and run some basic operations. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello",
+ "file": "main.py",
+ "runnable": true,
+ "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
+ "override_path": "hello"
+ },
+ {
+ "name": "Hello World using HappyBase",
+ "description": "This sample demonstrates using the Google Cloud Client Library HappyBase package, an implementation of the HappyBase API to connect to and interact with Cloud Bigtable. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello-happybase",
+ "file": "main.py",
+ "runnable": true,
+ "custom_content": "usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
+ "override_path": "hello_happybase"
+ },
+ {
+ "name": "cbt Command Demonstration",
+ "description": "This page explains how to use the cbt command to connect to a Cloud Bigtable instance, perform basic administrative tasks, and read and write data in a table. More information about this quickstart is available at https://cloud.google.com/bigtable/docs/quickstart-cbt",
+ "file": "instanceadmin.py",
+ "runnable": true,
+ "custom_content": "usage: instanceadmin.py [-h] [run] [dev-instance] [del-instance] [add-cluster] [del-cluster] project_id instance_id cluster_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
+ "override_path": "instanceadmin"
+ },
+ {
+ "name": "Metric Scaler",
+ "description": "This sample demonstrates how to use Stackdriver Monitoring to scale Cloud Bigtable based on CPU usage.",
+ "file": "metricscaler.py",
+ "runnable": true,
+ "custom_content": "usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD] [--low_cpu_threshold LOW_CPU_THRESHOLD] [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP] bigtable_instance bigtable_cluster
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD]
[--low_cpu_threshold LOW_CPU_THRESHOLD]
[--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP]
bigtable_instance bigtable_cluster
Scales Cloud Bigtable clusters based on CPU usage.
positional arguments:
bigtable_instance ID of the Cloud Bigtable instance to connect to.
bigtable_cluster ID of the Cloud Bigtable cluster to connect to.
optional arguments:
-h, --help show this help message and exit
--high_cpu_threshold HIGH_CPU_THRESHOLD
If Cloud Bigtable CPU usage is above this threshold,
scale up
--low_cpu_threshold LOW_CPU_THRESHOLD
If Cloud Bigtable CPU usage is below this threshold,
scale down
--short_sleep SHORT_SLEEP
How long to sleep in seconds between checking metrics
after no scale operation
--long_sleep LONG_SLEEP
How long to sleep in seconds between checking metrics
after a scaling operation
",
+ "override_path": "metricscaler"
+ },
+ {
+ "name": "Quickstart",
+ "description": "Demonstrates of Cloud Bigtable. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.",
+ "file": "main.py",
+ "runnable": true,
+ "custom_content": "usage: main.py [-h] [--table TABLE] project_id instance_id
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Existing table used in the quickstart. (default: my-table)
",
+ "override_path": "quickstart"
+ },
+ {
+ "name": "Quickstart using HappyBase",
+ "description": "Demonstrates of Cloud Bigtable using HappyBase. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.",
+ "file": "main.py",
+ "runnable": true,
+ "custom_content": "usage: main.py [-h] [--table TABLE] project_id instance_id
usage: main.py [-h] [--table TABLE] project_id instance_id
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Existing table used in the quickstart. (default: my-table)usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
+ "override_path": "tableadmin"
+ }
+ ],
+ "default_version": "v2",
+ "codeowner_team": "@googleapis/api-bigtable @googleapis/api-bigtable-partners",
+ "api_shortname": "bigtable"
+}
diff --git a/.librarian/generator-input/librarian.py b/.librarian/generator-input/librarian.py
new file mode 100644
index 000000000..5b943d24b
--- /dev/null
+++ b/.librarian/generator-input/librarian.py
@@ -0,0 +1,266 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This script is used to synthesize generated parts of this library."""
+
+from pathlib import Path
+import re
+import textwrap
+from typing import List, Optional
+
+import synthtool as s
+from synthtool import gcp, _tracked_paths
+from synthtool.languages import python
+from synthtool.sources import templates
+
+common = gcp.CommonTemplates()
+
+# These flags are needed because certain post-processing operations
+# append things after a certain line of text, and can infinitely loop
+# in a Github PR. We use these flags to only do those operations
+# on fresh copies of files found in googleapis-gen, and not on user-submitted
+# changes.
+is_fresh_admin_copy = False
+is_fresh_admin_v2_copy = False
+is_fresh_admin_docs_copy = False
+
+for library in s.get_staging_dirs("v2"):
+ s.move(library / "google/cloud/bigtable_v2")
+ is_fresh_admin_copy = \
+ s.move(library / "google/cloud/bigtable_admin")
+ is_fresh_admin_v2_copy = \
+ s.move(library / "google/cloud/bigtable_admin_v2")
+ s.move(library / "tests")
+ s.move(library / "samples")
+ s.move(library / "scripts")
+ is_fresh_admin_docs_copy = \
+ s.move(library / "docs/bigtable_admin_v2", destination="docs/admin_client")
+
+s.remove_staging_dirs()
+
+# ----------------------------------------------------------------------------
+# Add templated files
+# ----------------------------------------------------------------------------
+templated_files = common.py_library(
+ samples=True, # set to True only if there are samples
+ split_system_tests=True,
+ microgenerator=True,
+ cov_level=99,
+ system_test_external_dependencies=[
+ "pytest-asyncio==0.21.2",
+ ],
+ system_test_python_versions=["3.9"],
+ unit_test_python_versions=["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13", "3.14"],
+ default_python_version="3.13",
+)
+
+s.move(templated_files, excludes=[".coveragerc", "README.rst", ".github/**", ".kokoro/**", "noxfile.py", "renovate.json"])
+
+
+s.shell.run(["nox", "-s", "blacken"], hide_output=False)
+
+# ----------------------------------------------------------------------------
+# Always supply app_profile_id in routing headers: https://github.com/googleapis/python-bigtable/pull/1109
+# TODO: remove after backend no longer requires empty strings
+# ----------------------------------------------------------------------------
+for file in ["async_client.py", "client.py"]:
+ s.replace(
+ f"google/cloud/bigtable_v2/services/bigtable/{file}",
+ "if request.app_profile_id:",
+ "if True: # always attach app_profile_id, even if empty string"
+ )
+# fix tests
+s.replace(
+ "tests/unit/gapic/bigtable_v2/test_bigtable.py",
+ 'assert \(\n\s*gapic_v1\.routing_header\.to_grpc_metadata\(expected_headers\) in kw\["metadata"\]\n.*',
+ """# assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
+ )
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])"""
+)
+s.replace(
+ "tests/unit/gapic/bigtable_v2/test_bigtable.py",
+ 'expected_headers = {"name": "projects/sample1/instances/sample2"}',
+ """expected_headers = {
+ "name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
+ }"""
+)
+s.replace(
+ "tests/unit/gapic/bigtable_v2/test_bigtable.py",
+ """
+ expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3"
+ }
+""",
+ """
+ expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
+ }
+"""
+)
+
+# ----------------------------------------------------------------------------
+# Samples templates
+# ----------------------------------------------------------------------------
+
+python.py_samples(skip_readmes=True)
+
+# --------------------------------------------------------------------------
+# Admin Overlay work
+# --------------------------------------------------------------------------
+
+# Add overlay imports to top level __init__.py files in admin_v2 and admin at the end
+# of each file, after the __all__ definition. These changes should only be done on fresh
+# copies of the __init__.py files.
+def add_overlay_to_init_py(init_py_location, import_statements, should_add):
+ if should_add:
+ s.replace(
+ init_py_location,
+ r"(?s)(^__all__ = \(.*\)$)",
+ r"\1\n\n" + import_statements
+ )
+
+add_overlay_to_init_py(
+ "google/cloud/bigtable_admin_v2/__init__.py",
+ """from .overlay import * # noqa: F403\n
+__all__ += overlay.__all__ # noqa: F405""",
+ is_fresh_admin_v2_copy,
+)
+
+add_overlay_to_init_py(
+ "google/cloud/bigtable_admin/__init__.py",
+ """import google.cloud.bigtable_admin_v2.overlay # noqa: F401
+from google.cloud.bigtable_admin_v2.overlay import * # noqa: F401, F403
+
+__all__ += google.cloud.bigtable_admin_v2.overlay.__all__""",
+ is_fresh_admin_copy,
+)
+
+# Replace all instances of BaseBigtableTableAdminClient/BaseBigtableAdminAsyncClient
+# in samples and docstrings with BigtableTableAdminClient/BigtableTableAdminAsyncClient
+s.replace(
+ [
+ "google/cloud/bigtable_admin_v2/services/*/client.py",
+ "google/cloud/bigtable_admin_v2/services/*/async_client.py",
+ "samples/generated_samples/bigtableadmin_v2_*.py"
+ ],
+ r"client = bigtable_admin_v2\.Base(BigtableTableAdmin(Async)?Client\(\))",
+ r"client = bigtable_admin_v2.\1"
+)
+
+# Fix an improperly formatted table that breaks nox -s docs.
+s.replace(
+ "google/cloud/bigtable_admin_v2/types/table.py",
+ """ For example, if \\\\_key =
+ "some_id#2024-04-30#\\\\x00\\\\x13\\\\x00\\\\xf3" with the following
+ schema: \\{ fields \\{ field_name: "id" type \\{ string \\{
+ encoding: utf8_bytes \\{\\} \\} \\} \\} fields \\{ field_name: "date"
+ type \\{ string \\{ encoding: utf8_bytes \\{\\} \\} \\} \\} fields \\{
+ field_name: "product_code" type \\{ int64 \\{ encoding:
+ big_endian_bytes \\{\\} \\} \\} \\} encoding \\{ delimited_bytes \\{
+ delimiter: "#" \\} \\} \\}
+
+ \\| The decoded key parts would be: id = "some_id", date =
+ "2024-04-30", product_code = 1245427 The query "SELECT
+ \\\\_key, product_code FROM table" will return two columns:
+ /------------------------------------------------------
+ \\| \\\\\\| \\\\_key \\\\\\| product_code \\\\\\| \\\\\\|
+ --------------------------------------\\|--------------\\\\\\| \\\\\\|
+ "some_id#2024-04-30#\\\\x00\\\\x13\\\\x00\\\\xf3" \\\\\\| 1245427 \\\\\\|
+ ------------------------------------------------------/
+""",
+ textwrap.indent(
+ """For example, if \\\\_key =
+"some_id#2024-04-30#\\\\x00\\\\x13\\\\x00\\\\xf3" with the following
+schema:
+
+.. code-block::
+
+ {
+ fields {
+ field_name: "id"
+ type { string { encoding: utf8_bytes {} } }
+ }
+ fields {
+ field_name: "date"
+ type { string { encoding: utf8_bytes {} } }
+ }
+ fields {
+ field_name: "product_code"
+ type { int64 { encoding: big_endian_bytes {} } }
+ }
+ encoding { delimited_bytes { delimiter: "#" } }
+ }
+
+The decoded key parts would be:
+id = "some_id", date = "2024-04-30", product_code = 1245427
+The query "SELECT \\\\_key, product_code FROM table" will return
+two columns:
+
++========================================+==============+
+| \\\\_key | product_code |
++========================================+==============+
+| "some_id#2024-04-30#\\\\x00\\\\x13\\\\x00\\\\xf3" | 1245427 |
++----------------------------------------+--------------+
+""",
+ " " * 12,
+ ),
+)
+
+# These changes should only be done on fresh copies of the .rst files
+# from googleapis-gen.
+if is_fresh_admin_docs_copy:
+ # Change the subpackage for clients with overridden internal methods in them
+ # from service to overlay.service.
+ s.replace(
+ "docs/admin_client/bigtable_table_admin.rst",
+ r"^\.\. automodule:: google\.cloud\.bigtable_admin_v2\.services\.bigtable_table_admin$",
+ ".. automodule:: google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin"
+ )
+
+ # Add overlay types to types documentation
+ s.replace(
+ "docs/admin_client/types_.rst",
+ r"""(\.\. automodule:: google\.cloud\.bigtable_admin_v2\.types
+ :members:
+ :show-inheritance:)
+""",
+ r"""\1
+
+.. automodule:: google.cloud.bigtable_admin_v2.overlay.types
+ :members:
+ :show-inheritance:
+"""
+ )
+
+# These changes should only be done on a fresh copy of table.py
+# from googleapis-gen.
+if is_fresh_admin_v2_copy:
+ # Add the oneof_message import into table.py for GcRule
+ s.replace(
+ "google/cloud/bigtable_admin_v2/types/table.py",
+ r"^(from google\.cloud\.bigtable_admin_v2\.types import .+)$",
+ r"""\1
+from google.cloud.bigtable_admin_v2.utils import oneof_message""",
+ )
+
+ # Re-subclass GcRule in table.py
+ s.replace(
+ "google/cloud/bigtable_admin_v2/types/table.py",
+ r"class GcRule\(proto\.Message\)\:",
+ "class GcRule(oneof_message.OneofMessage):",
+ )
diff --git a/.librarian/generator-input/noxfile.py b/.librarian/generator-input/noxfile.py
new file mode 100644
index 000000000..d1176966e
--- /dev/null
+++ b/.librarian/generator-input/noxfile.py
@@ -0,0 +1,569 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Generated by synthtool. DO NOT EDIT!
+
+from __future__ import absolute_import
+
+import os
+import pathlib
+import re
+import shutil
+from typing import Dict, List
+import warnings
+
+import nox
+
+FLAKE8_VERSION = "flake8==6.1.0"
+BLACK_VERSION = "black[jupyter]==23.3.0"
+ISORT_VERSION = "isort==5.11.0"
+LINT_PATHS = ["google", "tests", "noxfile.py", "setup.py"]
+
+DEFAULT_PYTHON_VERSION = "3.13"
+
+UNIT_TEST_PYTHON_VERSIONS: List[str] = [
+ "3.7",
+ "3.8",
+ "3.9",
+ "3.10",
+ "3.11",
+ "3.12",
+ "3.13",
+ "3.14",
+]
+UNIT_TEST_STANDARD_DEPENDENCIES = [
+ "mock",
+ "asyncmock",
+ "pytest",
+ "pytest-cov",
+ "pytest-asyncio",
+ BLACK_VERSION,
+ "autoflake",
+]
+UNIT_TEST_EXTERNAL_DEPENDENCIES: List[str] = []
+UNIT_TEST_LOCAL_DEPENDENCIES: List[str] = []
+UNIT_TEST_DEPENDENCIES: List[str] = []
+UNIT_TEST_EXTRAS: List[str] = []
+UNIT_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {}
+
+SYSTEM_TEST_PYTHON_VERSIONS: List[str] = ["3.10", "3.14"]
+SYSTEM_TEST_STANDARD_DEPENDENCIES: List[str] = [
+ "mock",
+ "pytest",
+ "google-cloud-testutils",
+]
+SYSTEM_TEST_EXTERNAL_DEPENDENCIES: List[str] = [
+ "pytest-asyncio==0.21.2",
+ BLACK_VERSION,
+ "pyyaml==6.0.2",
+]
+SYSTEM_TEST_LOCAL_DEPENDENCIES: List[str] = []
+SYSTEM_TEST_DEPENDENCIES: List[str] = []
+SYSTEM_TEST_EXTRAS: List[str] = []
+SYSTEM_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {}
+
+CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
+
+# 'docfx' is excluded since it only needs to run in 'docs-presubmit'
+nox.options.sessions = [
+ "unit-3.10",
+ "unit-3.11",
+ "unit-3.12",
+ "unit-3.13",
+ "unit-3.14",
+ "system_emulated",
+ "system",
+ "mypy",
+ "cover",
+ "lint",
+ "lint_setup_py",
+ "blacken",
+ "docs",
+ "format",
+]
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def lint(session):
+ """Run linters.
+
+ Returns a failure if the linters find linting errors or sufficiently
+ serious code quality issues.
+ """
+ session.install(FLAKE8_VERSION, BLACK_VERSION)
+ session.run(
+ "black",
+ "--check",
+ *LINT_PATHS,
+ )
+ session.run("flake8", "google", "tests")
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def blacken(session):
+ """Run black. Format code to uniform standard."""
+ session.install(BLACK_VERSION)
+ session.run(
+ "black",
+ *LINT_PATHS,
+ )
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def format(session):
+ """
+ Run isort to sort imports. Then run black
+ to format code to uniform standard.
+ """
+ session.install(BLACK_VERSION, ISORT_VERSION)
+ # Use the --fss option to sort imports using strict alphabetical order.
+ # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections
+ session.run(
+ "isort",
+ "--fss",
+ *LINT_PATHS,
+ )
+ session.run(
+ "black",
+ *LINT_PATHS,
+ )
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def mypy(session):
+ """Verify type hints are mypy compatible."""
+ session.install("-e", ".")
+ session.install(
+ "mypy", "types-setuptools", "types-protobuf", "types-mock", "types-requests"
+ )
+ session.install("google-cloud-testutils")
+ session.run("mypy", "-p", "google.cloud.bigtable.data")
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def lint_setup_py(session):
+ """Verify that setup.py is valid (including RST check)."""
+ session.install("setuptools", "docutils", "pygments")
+ session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
+
+
+def install_unittest_dependencies(session, *constraints):
+ standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES
+ session.install(*standard_deps, *constraints)
+
+ if UNIT_TEST_EXTERNAL_DEPENDENCIES:
+ warnings.warn(
+ "'unit_test_external_dependencies' is deprecated. Instead, please "
+ "use 'unit_test_dependencies' or 'unit_test_local_dependencies'.",
+ DeprecationWarning,
+ )
+ session.install(*UNIT_TEST_EXTERNAL_DEPENDENCIES, *constraints)
+
+ if UNIT_TEST_LOCAL_DEPENDENCIES:
+ session.install(*UNIT_TEST_LOCAL_DEPENDENCIES, *constraints)
+
+ if UNIT_TEST_EXTRAS_BY_PYTHON:
+ extras = UNIT_TEST_EXTRAS_BY_PYTHON.get(session.python, [])
+ elif UNIT_TEST_EXTRAS:
+ extras = UNIT_TEST_EXTRAS
+ else:
+ extras = []
+
+ if extras:
+ session.install("-e", f".[{','.join(extras)}]", *constraints)
+ else:
+ session.install("-e", ".", *constraints)
+
+
+@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
+@nox.parametrize(
+ "protobuf_implementation",
+ ["python", "upb", "cpp"],
+)
+def unit(session, protobuf_implementation):
+ # Install all test dependencies, then install this package in-place.
+ py_version = tuple([int(v) for v in session.python.split(".")])
+ if protobuf_implementation == "cpp" and py_version >= (3, 11):
+ session.skip("cpp implementation is not supported in python 3.11+")
+
+ constraints_path = str(
+ CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
+ )
+ install_unittest_dependencies(session, "-c", constraints_path)
+
+ # TODO(https://github.com/googleapis/synthtool/issues/1976):
+ # Remove the 'cpp' implementation once support for Protobuf 3.x is dropped.
+ # The 'cpp' implementation requires Protobuf<4.
+ if protobuf_implementation == "cpp":
+ session.install("protobuf<4")
+
+ # Run py.test against the unit tests.
+ session.run(
+ "py.test",
+ "--quiet",
+ f"--junitxml=unit_{session.python}_sponge_log.xml",
+ "--cov=google",
+ "--cov=tests/unit",
+ "--cov-append",
+ "--cov-config=.coveragerc",
+ "--cov-report=",
+ "--cov-fail-under=0",
+ os.path.join("tests", "unit"),
+ *session.posargs,
+ env={
+ "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation,
+ },
+ )
+
+
+def install_systemtest_dependencies(session, *constraints):
+ # Use pre-release gRPC for system tests.
+ # Exclude version 1.52.0rc1 which has a known issue.
+ # See https://github.com/grpc/grpc/issues/32163
+ session.install("--pre", "grpcio!=1.52.0rc1")
+
+ session.install(*SYSTEM_TEST_STANDARD_DEPENDENCIES, *constraints)
+
+ if SYSTEM_TEST_EXTERNAL_DEPENDENCIES:
+ session.install(*SYSTEM_TEST_EXTERNAL_DEPENDENCIES, *constraints)
+
+ if SYSTEM_TEST_LOCAL_DEPENDENCIES:
+ session.install("-e", *SYSTEM_TEST_LOCAL_DEPENDENCIES, *constraints)
+
+ if SYSTEM_TEST_DEPENDENCIES:
+ session.install("-e", *SYSTEM_TEST_DEPENDENCIES, *constraints)
+
+ if SYSTEM_TEST_EXTRAS_BY_PYTHON:
+ extras = SYSTEM_TEST_EXTRAS_BY_PYTHON.get(session.python, [])
+ elif SYSTEM_TEST_EXTRAS:
+ extras = SYSTEM_TEST_EXTRAS
+ else:
+ extras = []
+
+ if extras:
+ session.install("-e", f".[{','.join(extras)}]", *constraints)
+ else:
+ session.install("-e", ".", *constraints)
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def system_emulated(session):
+ import subprocess
+ import signal
+
+ try:
+ subprocess.call(["gcloud", "--version"])
+ except OSError:
+ session.skip("gcloud not found but required for emulator support")
+
+ # Currently, CI/CD doesn't have beta component of gcloud.
+ subprocess.call(["gcloud", "components", "install", "beta", "bigtable"])
+
+ hostport = "localhost:8789"
+ session.env["BIGTABLE_EMULATOR_HOST"] = hostport
+
+ p = subprocess.Popen(
+ ["gcloud", "beta", "emulators", "bigtable", "start", "--host-port", hostport]
+ )
+
+ try:
+ system(session)
+ finally:
+ # Stop Emulator
+ os.killpg(os.getpgid(p.pid), signal.SIGKILL)
+
+
+@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
+@nox.parametrize("client_type", ["async", "sync", "legacy"])
+def conformance(session, client_type):
+ # install dependencies
+ constraints_path = str(
+ CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
+ )
+ install_unittest_dependencies(session, "-c", constraints_path)
+ with session.chdir("test_proxy"):
+ # download the conformance test suite
+ session.run(
+ "bash",
+ "-e",
+ "run_tests.sh",
+ external=True,
+ env={"CLIENT_TYPE": client_type},
+ )
+
+
+@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
+def system(session):
+ """Run the system test suite."""
+ constraints_path = str(
+ CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
+ )
+ system_test_path = os.path.join("tests", "system.py")
+ system_test_folder_path = os.path.join("tests", "system")
+
+ # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
+ if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
+ session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
+ # Install pyopenssl for mTLS testing.
+ if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true":
+ session.install("pyopenssl")
+
+ system_test_exists = os.path.exists(system_test_path)
+ system_test_folder_exists = os.path.exists(system_test_folder_path)
+ # Sanity check: only run tests if found.
+ if not system_test_exists and not system_test_folder_exists:
+ session.skip("System tests were not found")
+
+ install_systemtest_dependencies(session, "-c", constraints_path)
+
+ # Run py.test against the system tests.
+ if system_test_exists:
+ session.run(
+ "py.test",
+ "--quiet",
+ f"--junitxml=system_{session.python}_sponge_log.xml",
+ system_test_path,
+ *session.posargs,
+ )
+ if system_test_folder_exists:
+ session.run(
+ "py.test",
+ "--quiet",
+ f"--junitxml=system_{session.python}_sponge_log.xml",
+ system_test_folder_path,
+ *session.posargs,
+ )
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def cover(session):
+ """Run the final coverage report.
+
+ This outputs the coverage report aggregating coverage from the unit
+ test runs (not system test runs), and then erases coverage data.
+ """
+ session.install("coverage", "pytest-cov")
+ session.run("coverage", "report", "--show-missing", "--fail-under=99")
+
+ session.run("coverage", "erase")
+
+
+@nox.session(python="3.10")
+def docs(session):
+ """Build the docs for this library."""
+
+ session.install("-e", ".")
+ session.install(
+ # We need to pin to specific versions of the `sphinxcontrib-*` packages
+ # which still support sphinx 4.x.
+ # See https://github.com/googleapis/sphinx-docfx-yaml/issues/344
+ # and https://github.com/googleapis/sphinx-docfx-yaml/issues/345.
+ "sphinxcontrib-applehelp==1.0.4",
+ "sphinxcontrib-devhelp==1.0.2",
+ "sphinxcontrib-htmlhelp==2.0.1",
+ "sphinxcontrib-qthelp==1.0.3",
+ "sphinxcontrib-serializinghtml==1.1.5",
+ "sphinx==4.5.0",
+ "alabaster",
+ "recommonmark",
+ )
+
+ shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
+ session.run(
+ "sphinx-build",
+ "-W", # warnings as errors
+ "-T", # show full traceback on exception
+ "-N", # no colors
+ "-b",
+ "html",
+ "-d",
+ os.path.join("docs", "_build", "doctrees", ""),
+ os.path.join("docs", ""),
+ os.path.join("docs", "_build", "html", ""),
+ )
+
+
+@nox.session(python="3.10")
+def docfx(session):
+ """Build the docfx yaml files for this library."""
+
+ session.install("-e", ".")
+ session.install(
+ # We need to pin to specific versions of the `sphinxcontrib-*` packages
+ # which still support sphinx 4.x.
+ # See https://github.com/googleapis/sphinx-docfx-yaml/issues/344
+ # and https://github.com/googleapis/sphinx-docfx-yaml/issues/345.
+ "sphinxcontrib-applehelp==1.0.4",
+ "sphinxcontrib-devhelp==1.0.2",
+ "sphinxcontrib-htmlhelp==2.0.1",
+ "sphinxcontrib-qthelp==1.0.3",
+ "sphinxcontrib-serializinghtml==1.1.5",
+ "gcp-sphinx-docfx-yaml",
+ "alabaster",
+ "recommonmark",
+ )
+
+ shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
+ session.run(
+ "sphinx-build",
+ "-T", # show full traceback on exception
+ "-N", # no colors
+ "-D",
+ (
+ "extensions=sphinx.ext.autodoc,"
+ "sphinx.ext.autosummary,"
+ "docfx_yaml.extension,"
+ "sphinx.ext.intersphinx,"
+ "sphinx.ext.coverage,"
+ "sphinx.ext.napoleon,"
+ "sphinx.ext.todo,"
+ "sphinx.ext.viewcode,"
+ "recommonmark"
+ ),
+ "-b",
+ "html",
+ "-d",
+ os.path.join("docs", "_build", "doctrees", ""),
+ os.path.join("docs", ""),
+ os.path.join("docs", "_build", "html", ""),
+ )
+ # Customization: Add extra sections to the table of contents for the Classic vs Async clients
+ session.install("pyyaml")
+ session.run("python", "docs/scripts/patch_devsite_toc.py")
+
+
+@nox.session(python="3.14")
+@nox.parametrize(
+ "protobuf_implementation",
+ ["python", "upb", "cpp"],
+)
+def prerelease_deps(session, protobuf_implementation):
+ """Run all tests with prerelease versions of dependencies installed."""
+
+ py_version = tuple([int(v) for v in session.python.split(".")])
+ if protobuf_implementation == "cpp" and py_version >= (3, 11):
+ session.skip("cpp implementation is not supported in python 3.11+")
+
+ # Install all dependencies
+ session.install("-e", ".[all, tests, tracing]")
+ unit_deps_all = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_EXTERNAL_DEPENDENCIES
+ session.install(*unit_deps_all)
+ system_deps_all = (
+ SYSTEM_TEST_STANDARD_DEPENDENCIES + SYSTEM_TEST_EXTERNAL_DEPENDENCIES
+ )
+ session.install(*system_deps_all)
+
+ # Because we test minimum dependency versions on the minimum Python
+ # version, the first version we test with in the unit tests sessions has a
+ # constraints file containing all dependencies and extras.
+ with open(
+ CURRENT_DIRECTORY
+ / "testing"
+ / f"constraints-{UNIT_TEST_PYTHON_VERSIONS[0]}.txt",
+ encoding="utf-8",
+ ) as constraints_file:
+ constraints_text = constraints_file.read()
+
+ # Ignore leading whitespace and comment lines.
+ constraints_deps = [
+ match.group(1)
+ for match in re.finditer(
+ r"^\s*(\S+)(?===\S+)", constraints_text, flags=re.MULTILINE
+ )
+ ]
+
+ session.install(*constraints_deps)
+
+ prerel_deps = [
+ "protobuf",
+ # dependency of grpc
+ "six",
+ "grpc-google-iam-v1",
+ "googleapis-common-protos",
+ "grpcio",
+ "grpcio-status",
+ "google-api-core",
+ "google-auth",
+ "proto-plus",
+ "google-cloud-testutils",
+ # dependencies of google-cloud-testutils"
+ "click",
+ ]
+
+ for dep in prerel_deps:
+ session.install("--pre", "--no-deps", "--upgrade", dep)
+
+ # Remaining dependencies
+ other_deps = [
+ "requests",
+ "cryptography",
+ ]
+ session.install(*other_deps)
+
+ # Print out prerelease package versions
+ session.run(
+ "python", "-c", "import google.protobuf; print(google.protobuf.__version__)"
+ )
+ session.run("python", "-c", "import grpc; print(grpc.__version__)")
+ session.run("python", "-c", "import google.auth; print(google.auth.__version__)")
+
+ session.run(
+ "py.test",
+ "tests/unit",
+ env={
+ "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation,
+ },
+ )
+
+ system_test_path = os.path.join("tests", "system.py")
+ system_test_folder_path = os.path.join("tests", "system")
+
+ # Only run system tests if found.
+ if os.path.exists(system_test_path):
+ session.run(
+ "py.test",
+ "--verbose",
+ f"--junitxml=system_{session.python}_sponge_log.xml",
+ system_test_path,
+ *session.posargs,
+ env={
+ "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation,
+ },
+ )
+ if os.path.exists(system_test_folder_path):
+ session.run(
+ "py.test",
+ "--verbose",
+ f"--junitxml=system_{session.python}_sponge_log.xml",
+ system_test_folder_path,
+ *session.posargs,
+ env={
+ "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation,
+ },
+ )
+
+
+@nox.session(python="3.10")
+def generate_sync(session):
+ """
+ Re-generate sync files for the library from CrossSync-annotated async source
+ """
+ session.install(BLACK_VERSION)
+ session.install("autoflake")
+ session.run("python", ".cross_sync/generate.py", ".")
diff --git a/.librarian/generator-input/setup.py b/.librarian/generator-input/setup.py
new file mode 100644
index 000000000..fd8062970
--- /dev/null
+++ b/.librarian/generator-input/setup.py
@@ -0,0 +1,100 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import os
+
+import setuptools
+
+
+package_root = os.path.abspath(os.path.dirname(__file__))
+
+# Package metadata.
+
+name = "google-cloud-bigtable"
+description = "Google Cloud Bigtable API client library"
+
+version = {}
+with open(os.path.join(package_root, "google/cloud/bigtable/gapic_version.py")) as fp:
+ exec(fp.read(), version)
+version = version["__version__"]
+
+
+# Should be one of:
+# 'Development Status :: 3 - Alpha'
+# 'Development Status :: 4 - Beta'
+# 'Development Status :: 5 - Production/Stable'
+release_status = "Development Status :: 5 - Production/Stable"
+dependencies = [
+ "google-api-core[grpc] >= 2.17.0, <3.0.0",
+ "google-cloud-core >= 1.4.4, <3.0.0",
+ "google-auth >= 2.23.0, <3.0.0,!=2.24.0,!=2.25.0",
+ "grpc-google-iam-v1 >= 0.12.4, <1.0.0",
+ "proto-plus >= 1.22.3, <2.0.0",
+ "proto-plus >= 1.25.0, <2.0.0; python_version>='3.13'",
+ "protobuf>=3.20.2,<7.0.0,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5",
+ "google-crc32c>=1.5.0, <2.0.0dev",
+]
+extras = {"libcst": "libcst >= 0.2.5"}
+
+
+# Setup boilerplate below this line.
+
+package_root = os.path.abspath(os.path.dirname(__file__))
+
+readme_filename = os.path.join(package_root, "README.rst")
+with io.open(readme_filename, encoding="utf-8") as readme_file:
+ readme = readme_file.read()
+
+# Only include packages under the 'google' namespace. Do not include tests,
+# benchmarks, etc.
+packages = [
+ package
+ for package in setuptools.find_namespace_packages()
+ if package.startswith("google")
+]
+
+setuptools.setup(
+ name=name,
+ version=version,
+ description=description,
+ long_description=readme,
+ author="Google LLC",
+ author_email="googleapis-packages@google.com",
+ license="Apache 2.0",
+ url="https://github.com/googleapis/python-bigtable",
+ classifiers=[
+ release_status,
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: Apache Software License",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "Programming Language :: Python :: 3.13",
+ "Programming Language :: Python :: 3.14",
+ "Operating System :: OS Independent",
+ "Topic :: Internet",
+ ],
+ platforms="Posix; MacOS X; Windows",
+ packages=packages,
+ install_requires=dependencies,
+ extras_require=extras,
+ python_requires=">=3.7",
+ include_package_data=True,
+ zip_safe=False,
+)
diff --git a/.librarian/state.yaml b/.librarian/state.yaml
new file mode 100644
index 000000000..71d0e465d
--- /dev/null
+++ b/.librarian/state.yaml
@@ -0,0 +1,40 @@
+image: us-central1-docker.pkg.dev/cloud-sdk-librarian-prod/images-prod/python-librarian-generator@sha256:b8058df4c45e9a6e07f6b4d65b458d0d059241dd34c814f151c8bf6b89211209
+libraries:
+ - id: google-cloud-bigtable
+ version: 2.35.0
+ last_generated_commit: 9637e50bc0ff6a5e8944980aaf6a2b7f34a90910
+ apis:
+ - path: google/bigtable/v2
+ service_config: bigtable_v2.yaml
+ - path: google/bigtable/admin/v2
+ service_config: bigtableadmin_v2.yaml
+ source_roots:
+ - .
+ preserve_regex: []
+ remove_regex:
+ - ^.pre-commit-config.yaml
+ - ^.repo-metadata.json
+ - ^.trampolinerc
+ - ^docs/admin_client/bigtable
+ - ^docs/admin_client/services_.rst
+ - ^docs/admin_client/types_.rst
+ - ^docs/summary_overview.md
+ - ^google/cloud/bigtable_v2
+ - ^google/cloud/bigtable_admin/
+ - ^google/cloud/bigtable_admin_v2/services
+ - ^google/cloud/bigtable_admin_v2/types
+ - ^google/cloud/bigtable_admin_v2/__init__.py
+ - ^google/cloud/bigtable_admin_v2/gapic
+ - ^google/cloud/bigtable_admin_v2/py.typed
+ - ^samples/AUTHORING_GUIDE.md
+ - ^samples/CONTRIBUTING.md
+ - ^samples/generated_samples
+ - ^tests/unit/gapic
+ - ^noxfile.py
+ - ^scripts/fixup_bigtable
+ - ^setup.py
+ - ^SECURITY.md
+ - ^tests/__init__.py
+ - ^tests/unit/__init__.py
+ - ^tests/unit/gapic
+ tag_format: v{version}
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
deleted file mode 100644
index 90999b775..000000000
--- a/.release-please-manifest.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- ".": "2.31.0"
-}
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9a0b2e013..cbb707694 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,57 @@
[1]: https://pypi.org/project/google-cloud-bigtable/#history
+## [2.35.0](https://github.com/googleapis/python-bigtable/compare/v2.34.0...v2.35.0) (2025-12-16)
+
+
+### Features
+
+* support mTLS certificates when available (#1249) ([ca20219cf45305de25dfb715f69dd63bce9981b7](https://github.com/googleapis/python-bigtable/commit/ca20219cf45305de25dfb715f69dd63bce9981b7))
+* add basic interceptor to client (#1206) ([6561cfac605ba7c5b3f750c3bdca9108e517ba77](https://github.com/googleapis/python-bigtable/commit/6561cfac605ba7c5b3f750c3bdca9108e517ba77))
+* add PeerInfo proto in Bigtable API ([72dfdc440c22db0f4c372e6f11a9f7dc83fed350](https://github.com/googleapis/python-bigtable/commit/72dfdc440c22db0f4c372e6f11a9f7dc83fed350))
+* Add Type API updates needed to support structured keys in materialized views ([72dfdc440c22db0f4c372e6f11a9f7dc83fed350](https://github.com/googleapis/python-bigtable/commit/72dfdc440c22db0f4c372e6f11a9f7dc83fed350))
+* Add encodings for STRUCT and the Timestamp type ([72dfdc440c22db0f4c372e6f11a9f7dc83fed350](https://github.com/googleapis/python-bigtable/commit/72dfdc440c22db0f4c372e6f11a9f7dc83fed350))
+
+
+### Bug Fixes
+
+* async client uses fixed grace period (#1236) ([544db1cd7af876298b8637f495b6c7b2a0bcf16c](https://github.com/googleapis/python-bigtable/commit/544db1cd7af876298b8637f495b6c7b2a0bcf16c))
+* re-export AddToCell for consistency (#1241) ([2a5baf11d30dc383a7b48d5f43b6cbb6160782e3](https://github.com/googleapis/python-bigtable/commit/2a5baf11d30dc383a7b48d5f43b6cbb6160782e3))
+* retry cancelled errors (#1235) ([e3fd5d8668303db4ed35e9bf6be48b46954f9d67](https://github.com/googleapis/python-bigtable/commit/e3fd5d8668303db4ed35e9bf6be48b46954f9d67))
+* Add ReadRows/SampleRowKeys bindings for materialized views ([72dfdc440c22db0f4c372e6f11a9f7dc83fed350](https://github.com/googleapis/python-bigtable/commit/72dfdc440c22db0f4c372e6f11a9f7dc83fed350))
+* Deprecate credentials_file argument ([72dfdc440c22db0f4c372e6f11a9f7dc83fed350](https://github.com/googleapis/python-bigtable/commit/72dfdc440c22db0f4c372e6f11a9f7dc83fed350))
+
+## [2.34.0](https://github.com/googleapis/python-bigtable/compare/v2.33.0...v2.34.0) (2025-10-16)
+
+
+### Features
+
+* Add support for Python 3.14 ([#1217](https://github.com/googleapis/python-bigtable/issues/1217)) ([263332a](https://github.com/googleapis/python-bigtable/commit/263332af71a229cb4fa598008a708137086a6f67))
+
+## [2.33.0](https://github.com/googleapis/python-bigtable/compare/v2.32.0...v2.33.0) (2025-10-06)
+
+
+### Features
+
+* Add support for Proto and Enum types ([#1202](https://github.com/googleapis/python-bigtable/issues/1202)) ([34ceb86](https://github.com/googleapis/python-bigtable/commit/34ceb86007db08d453fa25cca4968d5b498ffcd6))
+* Expose universe_domain for tpc ([#1150](https://github.com/googleapis/python-bigtable/issues/1150)) ([451fd97](https://github.com/googleapis/python-bigtable/commit/451fd97e435218ffed47d39423680ffc4feccac4))
+
+
+### Bug Fixes
+
+* Fix instance registration cleanup on early iterator termination ([#1216](https://github.com/googleapis/python-bigtable/issues/1216)) ([bbfd746](https://github.com/googleapis/python-bigtable/commit/bbfd746c61a6362efa42c7899ec3e34ceb541c83))
+* Refactor channel refresh ([#1174](https://github.com/googleapis/python-bigtable/issues/1174)) ([6fa3008](https://github.com/googleapis/python-bigtable/commit/6fa30084058bc34d4487d1fee5c87d7795ff167a))
+
+## [2.32.0](https://github.com/googleapis/python-bigtable/compare/v2.31.0...v2.32.0) (2025-08-01)
+
+
+### Features
+
+* Add Idempotency to Cloud Bigtable MutateRowsRequest API ([#1143](https://github.com/googleapis/python-bigtable/issues/1143)) ([c3e3eb0](https://github.com/googleapis/python-bigtable/commit/c3e3eb0e4ce44ece72b150dc5822846627074fba))
+* Add support for AddToCell in Data Client ([#1147](https://github.com/googleapis/python-bigtable/issues/1147)) ([1a5b4b5](https://github.com/googleapis/python-bigtable/commit/1a5b4b514cadae5c83d61296314285d3774992c5))
+* Implement SQL support in test proxy ([#1106](https://github.com/googleapis/python-bigtable/issues/1106)) ([7a91bbf](https://github.com/googleapis/python-bigtable/commit/7a91bbfb9df23f7e93c40b88648840342af6f16f))
+* Modernized Bigtable Admin Client featuring selective GAPIC generation ([#1177](https://github.com/googleapis/python-bigtable/issues/1177)) ([58e7d37](https://github.com/googleapis/python-bigtable/commit/58e7d3782df6b13a42af053263afc575222a6b83))
+
## [2.31.0](https://github.com/googleapis/python-bigtable/compare/v2.30.1...v2.31.0) (2025-05-22)
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 985538f48..07ac8f218 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -22,7 +22,7 @@ In order to add a feature:
documentation.
- The feature must work fully on the following CPython versions:
- 3.7, 3.8, 3.9, 3.10, 3.11, 3.12 and 3.13 on both UNIX and Windows.
+ 3.7, 3.8, 3.9, 3.10, 3.11, 3.12, 3.13 and 3.14 on both UNIX and Windows.
- The feature must not add unnecessary dependencies (where
"unnecessary" is of course subjective, but new dependencies should
@@ -72,7 +72,7 @@ We use `nox `__ to instrument our tests.
- To run a single unit test::
- $ nox -s unit-3.13 -- -k
+ $ nox -s unit-3.14 -- -k
.. note::
@@ -143,12 +143,12 @@ Running System Tests
$ nox -s system
# Run a single system test
- $ nox -s system-3.8 -- -k
+ $ nox -s system-3.9 -- -k
.. note::
- System tests are only configured to run under Python 3.8.
+ System tests are only configured to run under Python 3.9.
For expediency, we do not run them in older versions of Python 3.
This alone will not run the tests. You'll need to change some local
@@ -228,6 +228,7 @@ We support:
- `Python 3.11`_
- `Python 3.12`_
- `Python 3.13`_
+- `Python 3.14`_
.. _Python 3.7: https://docs.python.org/3.7/
.. _Python 3.8: https://docs.python.org/3.8/
@@ -236,6 +237,7 @@ We support:
.. _Python 3.11: https://docs.python.org/3.11/
.. _Python 3.12: https://docs.python.org/3.12/
.. _Python 3.13: https://docs.python.org/3.13/
+.. _Python 3.14: https://docs.python.org/3.14/
Supported versions can be found in our ``noxfile.py`` `config`_.
diff --git a/README.rst b/README.rst
index 2ecbd0185..823b52c88 100644
--- a/README.rst
+++ b/README.rst
@@ -1,3 +1,7 @@
+:**NOTE**: **This github repository is archived. The repository contents and history have moved to** `google-cloud-python`_.
+
+.. _google-cloud-python: https://github.com/googleapis/google-cloud-python/tree/main/packages/google-cloud-bigtable
+
Python Client for Google Cloud Bigtable
=======================================
diff --git a/docs/admin_client/admin_client_usage.rst b/docs/admin_client/admin_client_usage.rst
new file mode 100644
index 000000000..8c6f4a5dc
--- /dev/null
+++ b/docs/admin_client/admin_client_usage.rst
@@ -0,0 +1,11 @@
+Admin Client
+============
+.. toctree::
+ :maxdepth: 2
+
+ services_
+ types_
+
+..
+ This should be the only handwritten RST file in this directory.
+ Everything else should be autogenerated.
diff --git a/docs/admin_client/bigtable_instance_admin.rst b/docs/admin_client/bigtable_instance_admin.rst
new file mode 100644
index 000000000..42f7caad7
--- /dev/null
+++ b/docs/admin_client/bigtable_instance_admin.rst
@@ -0,0 +1,10 @@
+BigtableInstanceAdmin
+---------------------------------------
+
+.. automodule:: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin
+ :members:
+ :inherited-members:
+
+.. automodule:: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers
+ :members:
+ :inherited-members:
diff --git a/docs/admin_client/bigtable_table_admin.rst b/docs/admin_client/bigtable_table_admin.rst
new file mode 100644
index 000000000..0fa4b276a
--- /dev/null
+++ b/docs/admin_client/bigtable_table_admin.rst
@@ -0,0 +1,10 @@
+BigtableTableAdmin
+------------------------------------
+
+.. automodule:: google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin
+ :members:
+ :inherited-members:
+
+.. automodule:: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers
+ :members:
+ :inherited-members:
diff --git a/docs/admin_client/services_.rst b/docs/admin_client/services_.rst
new file mode 100644
index 000000000..ea55c7da1
--- /dev/null
+++ b/docs/admin_client/services_.rst
@@ -0,0 +1,7 @@
+Services for Google Cloud Bigtable Admin v2 API
+===============================================
+.. toctree::
+ :maxdepth: 2
+
+ bigtable_instance_admin
+ bigtable_table_admin
diff --git a/docs/admin_client/types_.rst b/docs/admin_client/types_.rst
new file mode 100644
index 000000000..ef32b9684
--- /dev/null
+++ b/docs/admin_client/types_.rst
@@ -0,0 +1,10 @@
+Types for Google Cloud Bigtable Admin v2 API
+============================================
+
+.. automodule:: google.cloud.bigtable_admin_v2.types
+ :members:
+ :show-inheritance:
+
+.. automodule:: google.cloud.bigtable_admin_v2.overlay.types
+ :members:
+ :show-inheritance:
diff --git a/docs/classic_client/snippets.py b/docs/classic_client/snippets.py
index fa3aa3627..c6059409d 100644
--- a/docs/classic_client/snippets.py
+++ b/docs/classic_client/snippets.py
@@ -29,7 +29,7 @@
"""
-import datetime
+from datetime import datetime, timezone
import pytest
from google.api_core.exceptions import DeadlineExceeded
@@ -39,7 +39,7 @@
from test_utils.system import unique_resource_id
from test_utils.retry import RetryErrors
-from google.cloud._helpers import UTC
+
from google.cloud.bigtable import Client
from google.cloud.bigtable import enums
@@ -57,8 +57,8 @@
STORAGE_TYPE = enums.StorageType.SSD
LABEL_KEY = "python-snippet"
LABEL_STAMP = (
- datetime.datetime.utcnow()
- .replace(microsecond=0, tzinfo=UTC)
+ datetime.now(timezone.utc)
+ .replace(microsecond=0)
.strftime("%Y-%m-%dt%H-%M-%S")
)
LABELS = {LABEL_KEY: str(LABEL_STAMP)}
diff --git a/docs/classic_client/snippets_table.py b/docs/classic_client/snippets_table.py
index 893135275..1850e836b 100644
--- a/docs/classic_client/snippets_table.py
+++ b/docs/classic_client/snippets_table.py
@@ -29,7 +29,7 @@
"""
-import datetime
+from datetime import datetime, timezone
import pytest
from google.api_core.exceptions import TooManyRequests
@@ -37,7 +37,6 @@
from test_utils.system import unique_resource_id
from test_utils.retry import RetryErrors
-from google.cloud._helpers import UTC
from google.cloud.bigtable import Client
from google.cloud.bigtable import enums
from google.cloud.bigtable import column_family
@@ -54,8 +53,8 @@
STORAGE_TYPE = enums.StorageType.SSD
LABEL_KEY = "python-snippet"
LABEL_STAMP = (
- datetime.datetime.utcnow()
- .replace(microsecond=0, tzinfo=UTC)
+ datetime.now(timezone.utc)
+ .replace(microsecond=0)
.strftime("%Y-%m-%dt%H-%M-%S")
)
LABELS = {LABEL_KEY: str(LABEL_STAMP)}
@@ -179,7 +178,7 @@ def test_bigtable_write_read_drop_truncate():
value = "value_{}".format(i).encode()
row = table.row(row_key)
row.set_cell(
- COLUMN_FAMILY_ID, col_name, value, timestamp=datetime.datetime.utcnow()
+ COLUMN_FAMILY_ID, col_name, value, timestamp=datetime.now(timezone.utc)
)
rows.append(row)
response = table.mutate_rows(rows)
@@ -270,7 +269,7 @@ def test_bigtable_mutations_batcher():
row_key = row_keys[0]
row = table.row(row_key)
row.set_cell(
- COLUMN_FAMILY_ID, column_name, "value-0", timestamp=datetime.datetime.utcnow()
+ COLUMN_FAMILY_ID, column_name, "value-0", timestamp=datetime.now(timezone.utc)
)
batcher.mutate(row)
# Add a collections of rows
@@ -279,7 +278,7 @@ def test_bigtable_mutations_batcher():
row = table.row(row_keys[i])
value = "value_{}".format(i).encode()
row.set_cell(
- COLUMN_FAMILY_ID, column_name, value, timestamp=datetime.datetime.utcnow()
+ COLUMN_FAMILY_ID, column_name, value, timestamp=datetime.now(timezone.utc)
)
rows.append(row)
batcher.mutate_rows(rows)
@@ -759,7 +758,7 @@ def test_bigtable_batcher_mutate_flush_mutate_rows():
row_key = b"row_key_1"
row = table.row(row_key)
row.set_cell(
- COLUMN_FAMILY_ID, COL_NAME1, "value-0", timestamp=datetime.datetime.utcnow()
+ COLUMN_FAMILY_ID, COL_NAME1, "value-0", timestamp=datetime.now(timezone.utc)
)
# In batcher, mutate will flush current batch if it
@@ -967,12 +966,12 @@ def test_bigtable_row_data_cells_cell_value_cell_values():
value = b"value_in_col1"
row = Config.TABLE.row(b"row_key_1")
row.set_cell(
- COLUMN_FAMILY_ID, COL_NAME1, value, timestamp=datetime.datetime.utcnow()
+ COLUMN_FAMILY_ID, COL_NAME1, value, timestamp=datetime.now(timezone.utc)
)
row.commit()
row.set_cell(
- COLUMN_FAMILY_ID, COL_NAME1, value, timestamp=datetime.datetime.utcnow()
+ COLUMN_FAMILY_ID, COL_NAME1, value, timestamp=datetime.now(timezone.utc)
)
row.commit()
@@ -1050,7 +1049,7 @@ def test_bigtable_row_setcell_rowkey():
cell_val = b"cell-val"
row.set_cell(
- COLUMN_FAMILY_ID, COL_NAME1, cell_val, timestamp=datetime.datetime.utcnow()
+ COLUMN_FAMILY_ID, COL_NAME1, cell_val, timestamp=datetime.now(timezone.utc)
)
# [END bigtable_api_row_set_cell]
diff --git a/docs/index.rst b/docs/index.rst
index c7f9721f3..0694c8bb0 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -9,7 +9,7 @@ Client Types
data_client/data_client_usage
classic_client/usage
-
+ admin_client/admin_client_usage
Changelog
---------
diff --git a/docs/scripts/patch_devsite_toc.py b/docs/scripts/patch_devsite_toc.py
index 5889300d2..fbb753daf 100644
--- a/docs/scripts/patch_devsite_toc.py
+++ b/docs/scripts/patch_devsite_toc.py
@@ -20,6 +20,7 @@
"""
+import glob
import yaml
import os
import shutil
@@ -153,6 +154,81 @@ def copy_markdown(self):
f"_build/html/docfx_yaml",
)
+ def validate_section(self, toc):
+ # Make sure each rst file is listed in the toc.
+ items_in_toc = [
+ d["items"] for d in toc[0]["items"] if d["name"] == self.title and ".rst"
+ ][0]
+ items_in_dir = [f for f in os.listdir(self.dir_name) if f.endswith(".rst")]
+ # subtract 1 for index
+ assert len(items_in_toc) == len(items_in_dir) - 1
+ for file in items_in_dir:
+ if file != self.index_file_name:
+ base_name, _ = os.path.splitext(file)
+ assert any(d["href"] == f"{base_name}.md" for d in items_in_toc)
+ # make sure the markdown files are present in the docfx_yaml directory
+ md_files = [d["href"] for d in items_in_toc]
+ for file in md_files:
+ assert os.path.exists(f"_build/html/docfx_yaml/{file}")
+
+
+class UIDFilteredTocSection(TocSection):
+ def __init__(self, toc_file_path, section_name, title, uid_prefix):
+ """Creates a filtered section denoted by section_name in the toc_file_path to items with the given UID prefix.
+
+ The section is then renamed to the title.
+ """
+ current_toc = yaml.safe_load(open(toc_file_path, "r"))
+ self.uid_prefix = uid_prefix
+
+ # Since we are looking for a specific section_name there should only
+ # be one match.
+ section_items = [
+ d for d in current_toc[0]["items"] if d["name"] == section_name
+ ][0]["items"]
+ filtered_items = [d for d in section_items if d["uid"].startswith(uid_prefix)]
+ self.items = filtered_items
+ self.title = title
+
+ def copy_markdown(self):
+ """
+ No-op because we are filtering on UIDs, not markdown files.
+ """
+ pass
+
+ def validate_section(self, toc):
+ uids_in_toc = set()
+
+ # A UID-filtered TOC tree looks like the following:
+ # - items:
+ # items:
+ # name:
+ # uid:
+ #
+ # Walk through the TOC tree to find all UIDs recursively.
+ def find_uids_in_items(items):
+ uids_in_toc.add(items["uid"])
+ for subitem in items.get("items", []):
+ find_uids_in_items(subitem)
+
+ items_in_toc = [d["items"] for d in toc[0]["items"] if d["name"] == self.title][
+ 0
+ ]
+ for item in items_in_toc:
+ find_uids_in_items(item)
+
+ # Now that we have all the UIDs, first match all of them
+ # with corresponding .yml files.
+ for uid in uids_in_toc:
+ assert os.path.exists(f"_build/html/docfx_yaml/{uid}.yml")
+
+ # Also validate that every uid yml file that starts with the uid_prefix
+ # exists in the section.
+ for filename in glob.glob(
+ f"{self.uid_prefix}*.yml", root_dir="_build/html/docfx_yaml"
+ ):
+ assert filename[:-4] in uids_in_toc
+
def validate_toc(toc_file_path, expected_section_list, added_sections):
current_toc = yaml.safe_load(open(toc_file_path, "r"))
@@ -164,43 +240,27 @@ def validate_toc(toc_file_path, expected_section_list, added_sections):
# make sure each customs ection is in the toc
for section in added_sections:
assert section.title in found_sections
- # make sure each rst file in each custom section dir is listed in the toc
- for section in added_sections:
- items_in_toc = [
- d["items"]
- for d in current_toc[0]["items"]
- if d["name"] == section.title and ".rst"
- ][0]
- items_in_dir = [f for f in os.listdir(section.dir_name) if f.endswith(".rst")]
- # subtract 1 for index
- assert len(items_in_toc) == len(items_in_dir) - 1
- for file in items_in_dir:
- if file != section.index_file_name:
- base_name, _ = os.path.splitext(file)
- assert any(d["href"] == f"{base_name}.md" for d in items_in_toc)
- # make sure the markdown files are present in the docfx_yaml directory
- for section in added_sections:
- items_in_toc = [
- d["items"]
- for d in current_toc[0]["items"]
- if d["name"] == section.title and ".rst"
- ][0]
- md_files = [d["href"] for d in items_in_toc]
- for file in md_files:
- assert os.path.exists(f"_build/html/docfx_yaml/{file}")
+ section.validate_section(current_toc)
print("Toc validation passed")
if __name__ == "__main__":
# Add secrtions for the async_data_client and classic_client directories
toc_path = "_build/html/docfx_yaml/toc.yml"
+
custom_sections = [
TocSection(dir_name="data_client", index_file_name="data_client_usage.rst"),
+ UIDFilteredTocSection(
+ toc_file_path=toc_path,
+ section_name="Bigtable Admin V2",
+ title="Admin Client",
+ uid_prefix="google.cloud.bigtable_admin_v2",
+ ),
TocSection(dir_name="classic_client", index_file_name="usage.rst"),
]
add_sections(toc_path, custom_sections)
# Remove the Bigtable section, since it has duplicated data
- remove_sections(toc_path, ["Bigtable"])
+ remove_sections(toc_path, ["Bigtable", "Bigtable Admin V2"])
# run validation to make sure yaml is structured as we expect
validate_toc(
toc_file_path=toc_path,
@@ -210,6 +270,7 @@ def validate_toc(toc_file_path, expected_section_list, added_sections):
"Changelog",
"Multiprocessing",
"Data Client",
+ "Admin Client",
"Classic Client",
],
added_sections=custom_sections,
diff --git a/google/cloud/bigtable/backup.py b/google/cloud/bigtable/backup.py
index 5b2cafc54..f6fa24421 100644
--- a/google/cloud/bigtable/backup.py
+++ b/google/cloud/bigtable/backup.py
@@ -17,7 +17,7 @@
import re
from google.cloud._helpers import _datetime_to_pb_timestamp # type: ignore
-from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient
+from google.cloud.bigtable_admin_v2 import BaseBigtableTableAdminClient
from google.cloud.bigtable_admin_v2.types import table
from google.cloud.bigtable.encryption_info import EncryptionInfo
from google.cloud.bigtable.policy import Policy
@@ -106,7 +106,7 @@ def name(self):
if not self._cluster:
raise ValueError('"cluster" parameter must be set')
- return BigtableTableAdminClient.backup_path(
+ return BaseBigtableTableAdminClient.backup_path(
project=self._instance._client.project,
instance=self._instance.instance_id,
cluster=self._cluster,
@@ -141,7 +141,7 @@ def parent(self):
:returns: A full path to the parent cluster.
"""
if not self._parent and self._cluster:
- self._parent = BigtableTableAdminClient.cluster_path(
+ self._parent = BaseBigtableTableAdminClient.cluster_path(
project=self._instance._client.project,
instance=self._instance.instance_id,
cluster=self._cluster,
@@ -163,7 +163,7 @@ def source_table(self):
:returns: The Table name.
"""
if not self._source_table and self.table_id:
- self._source_table = BigtableTableAdminClient.table_path(
+ self._source_table = BaseBigtableTableAdminClient.table_path(
project=self._instance._client.project,
instance=self._instance.instance_id,
table=self.table_id,
@@ -226,7 +226,7 @@ def size_bytes(self):
def state(self):
"""The current state of this Backup.
- :rtype: :class:`~google.cloud.bigtable_admin_v2.gapic.enums.Backup.State`
+ :rtype: :class:`~google.cloud.bigtable_admin_v2.types.table.Backup.State`
:returns: The current state of this Backup.
"""
return self._state
@@ -305,8 +305,7 @@ def create(self, cluster_id=None):
created Backup.
:rtype: :class:`~google.api_core.operation.Operation`
- :returns: :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture`
- instance, to be used to poll the status of the 'create' request
+ :returns: A future to be used to poll the status of the 'create' request
:raises Conflict: if the Backup already exists
:raises NotFound: if the Instance owning the Backup does not exist
:raises BadRequest: if the `table` or `expire_time` values are invalid,
@@ -412,7 +411,7 @@ def restore(self, table_id, instance_id=None):
:param instance_id: (Optional) The ID of the Instance to restore the
backup into, if different from the current one.
- :rtype: :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture`
+ :rtype: :class:`~google.api_core.operation.Operation`
:returns: A future to be used to poll the status of the 'restore'
request.
@@ -426,14 +425,14 @@ def restore(self, table_id, instance_id=None):
"""
api = self._instance._client.table_admin_client
if instance_id:
- parent = BigtableTableAdminClient.instance_path(
+ parent = BaseBigtableTableAdminClient.instance_path(
project=self._instance._client.project,
instance=instance_id,
)
else:
parent = self._instance.name
- return api.restore_table(
+ return api._restore_table(
request={"parent": parent, "table_id": table_id, "backup": self.name}
)
diff --git a/google/cloud/bigtable/client.py b/google/cloud/bigtable/client.py
index 0c89ea562..37de10b6e 100644
--- a/google/cloud/bigtable/client.py
+++ b/google/cloud/bigtable/client.py
@@ -325,11 +325,11 @@ def table_admin_client(self):
raise ValueError("Client is not an admin client.")
transport = self._create_gapic_client_channel(
- bigtable_admin_v2.BigtableTableAdminClient,
+ bigtable_admin_v2.BaseBigtableTableAdminClient,
BigtableTableAdminGrpcTransport,
)
klass = _create_gapic_client(
- bigtable_admin_v2.BigtableTableAdminClient,
+ bigtable_admin_v2.BaseBigtableTableAdminClient,
client_options=self._admin_client_options,
transport=transport,
)
diff --git a/google/cloud/bigtable/cluster.py b/google/cloud/bigtable/cluster.py
index 11fb5492d..967ec707e 100644
--- a/google/cloud/bigtable/cluster.py
+++ b/google/cloud/bigtable/cluster.py
@@ -511,9 +511,11 @@ def delete(self):
def _to_pb(self):
"""Create cluster proto buff message for API calls"""
client = self._instance._client
- location = client.instance_admin_client.common_location_path(
- client.project, self.location_id
- )
+ location = None
+ if self.location_id:
+ location = client.instance_admin_client.common_location_path(
+ client.project, self.location_id
+ )
cluster_pb = instance.Cluster(
location=location,
diff --git a/google/cloud/bigtable/data/__init__.py b/google/cloud/bigtable/data/__init__.py
index 9439f0f8d..c18eae683 100644
--- a/google/cloud/bigtable/data/__init__.py
+++ b/google/cloud/bigtable/data/__init__.py
@@ -31,6 +31,7 @@
from google.cloud.bigtable.data.mutations import Mutation
from google.cloud.bigtable.data.mutations import RowMutationEntry
+from google.cloud.bigtable.data.mutations import AddToCell
from google.cloud.bigtable.data.mutations import SetCell
from google.cloud.bigtable.data.mutations import DeleteRangeFromColumn
from google.cloud.bigtable.data.mutations import DeleteAllFromFamily
@@ -89,6 +90,7 @@
"RowRange",
"Mutation",
"RowMutationEntry",
+ "AddToCell",
"SetCell",
"DeleteRangeFromColumn",
"DeleteAllFromFamily",
diff --git a/google/cloud/bigtable/data/_async/_swappable_channel.py b/google/cloud/bigtable/data/_async/_swappable_channel.py
new file mode 100644
index 000000000..bbc9a0d47
--- /dev/null
+++ b/google/cloud/bigtable/data/_async/_swappable_channel.py
@@ -0,0 +1,139 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from __future__ import annotations
+
+from typing import Callable
+
+from google.cloud.bigtable.data._cross_sync import CrossSync
+
+from grpc import ChannelConnectivity
+
+if CrossSync.is_async:
+ from grpc.aio import Channel
+else:
+ from grpc import Channel
+
+__CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen._swappable_channel"
+
+
+@CrossSync.convert_class(sync_name="_WrappedChannel", rm_aio=True)
+class _AsyncWrappedChannel(Channel):
+ """
+ A wrapper around a gRPC channel. All methods are passed
+ through to the underlying channel.
+ """
+
+ def __init__(self, channel: Channel):
+ self._channel = channel
+
+ def unary_unary(self, *args, **kwargs):
+ return self._channel.unary_unary(*args, **kwargs)
+
+ def unary_stream(self, *args, **kwargs):
+ return self._channel.unary_stream(*args, **kwargs)
+
+ def stream_unary(self, *args, **kwargs):
+ return self._channel.stream_unary(*args, **kwargs)
+
+ def stream_stream(self, *args, **kwargs):
+ return self._channel.stream_stream(*args, **kwargs)
+
+ async def channel_ready(self):
+ return await self._channel.channel_ready()
+
+ @CrossSync.convert(
+ sync_name="__enter__", replace_symbols={"__aenter__": "__enter__"}
+ )
+ async def __aenter__(self):
+ await self._channel.__aenter__()
+ return self
+
+ @CrossSync.convert(sync_name="__exit__", replace_symbols={"__aexit__": "__exit__"})
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ return await self._channel.__aexit__(exc_type, exc_val, exc_tb)
+
+ def get_state(self, try_to_connect: bool = False) -> ChannelConnectivity:
+ return self._channel.get_state(try_to_connect=try_to_connect)
+
+ async def wait_for_state_change(self, last_observed_state):
+ return await self._channel.wait_for_state_change(last_observed_state)
+
+ def __getattr__(self, name):
+ return getattr(self._channel, name)
+
+ async def close(self, grace=None):
+ if CrossSync.is_async:
+ return await self._channel.close(grace=grace)
+ else:
+ # grace not supported by sync version
+ return self._channel.close()
+
+ if not CrossSync.is_async:
+ # add required sync methods
+
+ def subscribe(self, callback, try_to_connect=False):
+ return self._channel.subscribe(callback, try_to_connect)
+
+ def unsubscribe(self, callback):
+ return self._channel.unsubscribe(callback)
+
+
+@CrossSync.convert_class(
+ sync_name="SwappableChannel",
+ replace_symbols={"_AsyncWrappedChannel": "_WrappedChannel"},
+)
+class AsyncSwappableChannel(_AsyncWrappedChannel):
+ """
+ Provides a grpc channel wrapper, that allows the internal channel to be swapped out
+
+ Args:
+ - channel_fn: a nullary function that returns a new channel instance.
+ It should be a partial with all channel configuration arguments built-in
+ """
+
+ def __init__(self, channel_fn: Callable[[], Channel]):
+ self._channel_fn = channel_fn
+ self._channel = channel_fn()
+
+ def create_channel(self) -> Channel:
+ """
+ Create a fresh channel using the stored `channel_fn` partial
+ """
+ new_channel = self._channel_fn()
+ if CrossSync.is_async:
+ # copy over interceptors
+ # this is needed because of how gapic attaches the LoggingClientAIOInterceptor
+ # sync channels add interceptors by wrapping, so this step isn't needed
+ new_channel._unary_unary_interceptors = (
+ self._channel._unary_unary_interceptors
+ )
+ new_channel._unary_stream_interceptors = (
+ self._channel._unary_stream_interceptors
+ )
+ new_channel._stream_unary_interceptors = (
+ self._channel._stream_unary_interceptors
+ )
+ new_channel._stream_stream_interceptors = (
+ self._channel._stream_stream_interceptors
+ )
+ return new_channel
+
+ def swap_channel(self, new_channel: Channel) -> Channel:
+ """
+ Replace the wrapped channel with a new instance. Typically created using `create_channel`
+ """
+ old_channel = self._channel
+ self._channel = new_channel
+ return old_channel
diff --git a/google/cloud/bigtable/data/_async/client.py b/google/cloud/bigtable/data/_async/client.py
index 6ee21b554..f86c886f0 100644
--- a/google/cloud/bigtable/data/_async/client.py
+++ b/google/cloud/bigtable/data/_async/client.py
@@ -19,6 +19,7 @@
cast,
Any,
AsyncIterable,
+ Callable,
Optional,
Set,
Sequence,
@@ -58,6 +59,9 @@
from google.api_core.exceptions import DeadlineExceeded
from google.api_core.exceptions import ServiceUnavailable
from google.api_core.exceptions import Aborted
+from google.api_core.exceptions import Cancelled
+from google.protobuf.message import Message
+from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper
import google.auth.credentials
import google.auth._default
@@ -84,6 +88,7 @@
from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter
from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter
from google.cloud.bigtable.data.row_filters import RowFilterChain
+from google.cloud.bigtable.data._metrics import BigtableClientSideMetricsController
from google.cloud.bigtable.data._cross_sync import CrossSync
@@ -92,14 +97,29 @@
from google.cloud.bigtable_v2.services.bigtable.transports import (
BigtableGrpcAsyncIOTransport as TransportType,
)
+ from google.cloud.bigtable_v2.services.bigtable import (
+ BigtableAsyncClient as GapicClient,
+ )
from google.cloud.bigtable.data._async.mutations_batcher import _MB_SIZE
+ from google.cloud.bigtable.data._async._swappable_channel import (
+ AsyncSwappableChannel as SwappableChannelType,
+ )
+ from google.cloud.bigtable.data._async.metrics_interceptor import (
+ AsyncBigtableMetricsInterceptor as MetricsInterceptorType,
+ )
else:
from typing import Iterable # noqa: F401
from grpc import insecure_channel
from grpc import intercept_channel
from google.cloud.bigtable_v2.services.bigtable.transports import BigtableGrpcTransport as TransportType # type: ignore
+ from google.cloud.bigtable_v2.services.bigtable import BigtableClient as GapicClient # type: ignore
from google.cloud.bigtable.data._sync_autogen.mutations_batcher import _MB_SIZE
-
+ from google.cloud.bigtable.data._sync_autogen._swappable_channel import ( # noqa: F401
+ SwappableChannel as SwappableChannelType,
+ )
+ from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import ( # noqa: F401
+ BigtableMetricsInterceptor as MetricsInterceptorType,
+ )
if TYPE_CHECKING:
from google.cloud.bigtable.data._helpers import RowKeySamples
@@ -182,7 +202,6 @@ def __init__(
client_options = cast(
Optional[client_options_lib.ClientOptions], client_options
)
- custom_channel = None
self._emulator_host = os.getenv(BIGTABLE_EMULATOR)
if self._emulator_host is not None:
warnings.warn(
@@ -191,11 +210,11 @@ def __init__(
stacklevel=2,
)
# use insecure channel if emulator is set
- custom_channel = insecure_channel(self._emulator_host)
if credentials is None:
credentials = google.auth.credentials.AnonymousCredentials()
if project is None:
project = _DEFAULT_BIGTABLE_EMULATOR_CLIENT
+ self._metrics_interceptor = MetricsInterceptorType()
# initialize client
ClientWithProject.__init__(
self,
@@ -203,14 +222,28 @@ def __init__(
project=project,
client_options=client_options,
)
- self._gapic_client = CrossSync.GapicClient(
+ self._gapic_client = GapicClient(
credentials=credentials,
client_options=client_options,
client_info=self.client_info,
transport=lambda *args, **kwargs: TransportType(
- *args, **kwargs, channel=custom_channel
+ *args, **kwargs, channel=self._build_grpc_channel
),
)
+ if (
+ credentials
+ and credentials.universe_domain != self.universe_domain
+ and self._emulator_host is None
+ ):
+ # validate that the universe domain of the credentials matches the
+ # universe domain configured in client_options
+ raise ValueError(
+ f"The configured universe domain ({self.universe_domain}) does "
+ "not match the universe domain found in the credentials "
+ f"({self._credentials.universe_domain}). If you haven't "
+ "configured the universe domain explicitly, `googleapis.com` "
+ "is the default."
+ )
self._is_closed = CrossSync.Event()
self.transport = cast(TransportType, self._gapic_client.transport)
# keep track of active instances to for warmup on channel refresh
@@ -220,7 +253,7 @@ def __init__(
self._instance_owners: dict[_WarmedInstanceKey, Set[int]] = {}
self._channel_init_time = time.monotonic()
self._channel_refresh_task: CrossSync.Task[None] | None = None
- self._executor = (
+ self._executor: concurrent.futures.ThreadPoolExecutor | None = (
concurrent.futures.ThreadPoolExecutor() if not CrossSync.is_async else None
)
if self._emulator_host is None:
@@ -235,6 +268,64 @@ def __init__(
stacklevel=2,
)
+ def _build_grpc_channel(self, *args, **kwargs) -> SwappableChannelType:
+ """
+ This method is called by the gapic transport to create a grpc channel.
+
+ The init arguments passed down are captured in a partial used by SwappableChannel
+ to create new channel instances in the future, as part of the channel refresh logic
+
+ Emulators always use an inseucre channel
+
+ Args:
+ - *args: positional arguments passed by the gapic layer to create a new channel with
+ - **kwargs: keyword arguments passed by the gapic layer to create a new channel with
+ Returns:
+ a custom wrapped swappable channel
+ """
+ create_channel_fn: Callable[[], Channel]
+ if self._emulator_host is not None:
+ # Emulators use insecure channels
+ create_channel_fn = partial(insecure_channel, self._emulator_host)
+ elif CrossSync.is_async:
+ # For async client, use the default create_channel.
+ create_channel_fn = partial(TransportType.create_channel, *args, **kwargs)
+ else:
+ # For sync client, wrap create_channel with interceptors.
+ def sync_create_channel_fn():
+ return intercept_channel(
+ TransportType.create_channel(*args, **kwargs),
+ self._metrics_interceptor,
+ )
+
+ create_channel_fn = sync_create_channel_fn
+
+ # Instantiate SwappableChannelType with the determined creation function.
+ new_channel = SwappableChannelType(create_channel_fn)
+ if CrossSync.is_async:
+ # Attach async interceptors to the channel instance itself.
+ new_channel._unary_unary_interceptors.append(self._metrics_interceptor)
+ new_channel._unary_stream_interceptors.append(self._metrics_interceptor)
+ return new_channel
+
+ @property
+ def universe_domain(self) -> str:
+ """Return the universe domain used by the client instance.
+
+ Returns:
+ str: The universe domain used by the client instance.
+ """
+ return self._gapic_client.universe_domain
+
+ @property
+ def api_endpoint(self) -> str:
+ """Return the API endpoint used by the client instance.
+
+ Returns:
+ str: The API endpoint used by the client instance.
+ """
+ return self._gapic_client.api_endpoint
+
@staticmethod
def _client_version() -> str:
"""
@@ -332,6 +423,11 @@ async def _ping_and_warm_instances(
)
return [r or None for r in result_list]
+ def _invalidate_channel_stubs(self):
+ """Helper to reset the cached stubs. Needed when changing out the grpc channel"""
+ self.transport._stubs = {}
+ self.transport._prep_wrapped_messages(self.client_info)
+
@CrossSync.convert
async def _manage_channel(
self,
@@ -357,13 +453,17 @@ async def _manage_channel(
grace_period: time to allow previous channel to serve existing
requests before closing, in seconds
"""
+ if not isinstance(self.transport.grpc_channel, SwappableChannelType):
+ warnings.warn("Channel does not support auto-refresh.")
+ return
+ super_channel: SwappableChannelType = self.transport.grpc_channel
first_refresh = self._channel_init_time + random.uniform(
refresh_interval_min, refresh_interval_max
)
next_sleep = max(first_refresh - time.monotonic(), 0)
if next_sleep > 0:
# warm the current channel immediately
- await self._ping_and_warm_instances(channel=self.transport.grpc_channel)
+ await self._ping_and_warm_instances(channel=super_channel)
# continuously refresh the channel every `refresh_interval` seconds
while not self._is_closed.is_set():
await CrossSync.event_wait(
@@ -376,32 +476,18 @@ async def _manage_channel(
break
start_timestamp = time.monotonic()
# prepare new channel for use
- # TODO: refactor to avoid using internal references: https://github.com/googleapis/python-bigtable/issues/1094
- old_channel = self.transport.grpc_channel
- new_channel = self.transport.create_channel()
- if CrossSync.is_async:
- new_channel._unary_unary_interceptors.append(
- self.transport._interceptor
- )
- else:
- new_channel = intercept_channel(
- new_channel, self.transport._interceptor
- )
+ new_channel = super_channel.create_channel()
await self._ping_and_warm_instances(channel=new_channel)
# cycle channel out of use, with long grace window before closure
- self.transport._grpc_channel = new_channel
- self.transport._logged_channel = new_channel
- # invalidate caches
- self.transport._stubs = {}
- self.transport._prep_wrapped_messages(self.client_info)
+ old_channel = super_channel.swap_channel(new_channel)
+ self._invalidate_channel_stubs()
# give old_channel a chance to complete existing rpcs
- if CrossSync.is_async:
- await old_channel.close(grace_period)
- else:
- if grace_period:
- self._is_closed.wait(grace_period) # type: ignore
- old_channel.close() # type: ignore
- # subtract thed time spent waiting for the channel to be replaced
+ if grace_period:
+ await CrossSync.event_wait(
+ self._is_closed, grace_period, async_break_early=False
+ )
+ await old_channel.close()
+ # subtract the time spent waiting for the channel to be replaced
next_refresh = random.uniform(refresh_interval_min, refresh_interval_max)
next_sleep = max(next_refresh - (time.monotonic() - start_timestamp), 0)
@@ -415,7 +501,8 @@ async def _manage_channel(
async def _register_instance(
self,
instance_id: str,
- owner: _DataApiTargetAsync | ExecuteQueryIteratorAsync,
+ app_profile_id: Optional[str],
+ owner_id: int,
) -> None:
"""
Registers an instance with the client, and warms the channel for the instance
@@ -425,13 +512,15 @@ async def _register_instance(
Args:
instance_id: id of the instance to register.
- owner: table that owns the instance. Owners will be tracked in
+ app_profile_id: id of the app profile calling the instance.
+ owner_id: integer id of the object owning the instance. Owners will be tracked in
_instance_owners, and instances will only be unregistered when all
- owners call _remove_instance_registration
+ owners call _remove_instance_registration. Can be obtained by calling
+ `id` identity funcion, using `id(owner)`
"""
instance_name = self._gapic_client.instance_path(self.project, instance_id)
- instance_key = _WarmedInstanceKey(instance_name, owner.app_profile_id)
- self._instance_owners.setdefault(instance_key, set()).add(id(owner))
+ instance_key = _WarmedInstanceKey(instance_name, app_profile_id)
+ self._instance_owners.setdefault(instance_key, set()).add(owner_id)
if instance_key not in self._active_instances:
self._active_instances.add(instance_key)
if self._channel_refresh_task:
@@ -449,10 +538,11 @@ async def _register_instance(
"_DataApiTargetAsync": "_DataApiTarget",
}
)
- async def _remove_instance_registration(
+ def _remove_instance_registration(
self,
instance_id: str,
- owner: _DataApiTargetAsync | ExecuteQueryIteratorAsync,
+ app_profile_id: Optional[str],
+ owner_id: int,
) -> bool:
"""
Removes an instance from the client's registered instances, to prevent
@@ -462,17 +552,17 @@ async def _remove_instance_registration(
Args:
instance_id: id of the instance to remove
- owner: table that owns the instance. Owners will be tracked in
- _instance_owners, and instances will only be unregistered when all
- owners call _remove_instance_registration
+ app_profile_id: id of the app profile calling the instance.
+ owner_id: integer id of the object owning the instance. Can be
+ obtained by the `id` identity funcion, using `id(owner)`.
Returns:
bool: True if instance was removed, else False
"""
instance_name = self._gapic_client.instance_path(self.project, instance_id)
- instance_key = _WarmedInstanceKey(instance_name, owner.app_profile_id)
+ instance_key = _WarmedInstanceKey(instance_name, app_profile_id)
owner_list = self._instance_owners.get(instance_key, set())
try:
- owner_list.remove(id(owner))
+ owner_list.remove(owner_id)
if len(owner_list) == 0:
self._active_instances.remove(instance_key)
return True
@@ -625,6 +715,7 @@ async def execute_query(
DeadlineExceeded,
ServiceUnavailable,
),
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
) -> "ExecuteQueryIteratorAsync":
"""
Executes an SQL query on an instance.
@@ -673,6 +764,62 @@ async def execute_query(
If None, defaults to prepare_operation_timeout.
prepare_retryable_errors: a list of errors that will be retried if encountered during prepareQuery.
Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable)
+ column_info: (Optional) A dictionary mapping column names to Protobuf message classes or EnumTypeWrapper objects.
+ This dictionary provides the necessary type information for deserializing PROTO and
+ ENUM column values from the query results. When an entry is provided
+ for a PROTO or ENUM column, the client library will attempt to deserialize the raw data.
+
+ - For PROTO columns: The value in the dictionary should be the
+ Protobuf Message class (e.g., ``my_pb2.MyMessage``).
+ - For ENUM columns: The value should be the Protobuf EnumTypeWrapper
+ object (e.g., ``my_pb2.MyEnum``).
+
+ Example::
+
+ import my_pb2
+
+ column_info = {
+ "my_proto_column": my_pb2.MyMessage,
+ "my_enum_column": my_pb2.MyEnum
+ }
+
+ If ``column_info`` is not provided, or if a specific column name is not found
+ in the dictionary:
+
+ - PROTO columns will be returned as raw bytes.
+ - ENUM columns will be returned as integers.
+
+ Note for Nested PROTO or ENUM Fields:
+
+ To specify types for PROTO or ENUM fields within STRUCTs or MAPs, use a dot-separated
+ path from the top-level column name.
+
+ - For STRUCTs: ``struct_column_name.field_name``
+ - For MAPs: ``map_column_name.key`` or ``map_column_name.value`` to specify types
+ for the map keys or values, respectively.
+
+ Example::
+
+ import my_pb2
+
+ column_info = {
+ # Top-level column
+ "my_proto_column": my_pb2.MyMessage,
+ "my_enum_column": my_pb2.MyEnum,
+
+ # Nested field in a STRUCT column named 'my_struct'
+ "my_struct.nested_proto_field": my_pb2.OtherMessage,
+ "my_struct.nested_enum_field": my_pb2.AnotherEnum,
+
+ # Nested field in a MAP column named 'my_map'
+ "my_map.key": my_pb2.MapKeyEnum, # If map keys were enums
+ "my_map.value": my_pb2.MapValueMessage,
+
+ # PROTO field inside a STRUCT, where the STRUCT is the value in a MAP column
+ "struct_map.value.nested_proto_field": my_pb2.DeeplyNestedProto,
+ "struct_map.value.nested_enum_field": my_pb2.DeeplyNestedEnum
+ }
+
Returns:
ExecuteQueryIteratorAsync: an asynchronous iterator that yields rows returned by the query
Raises:
@@ -682,6 +829,7 @@ async def execute_query(
google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error
google.cloud.bigtable.data.exceptions.ParameterTypeInferenceFailed: Raised if
a parameter is passed without an explicit type, and the type cannot be infered
+ google.protobuf.message.DecodeError: raised if the deserialization of a PROTO/ENUM value fails.
"""
instance_name = self._gapic_client.instance_path(self.project, instance_id)
converted_param_types = _to_param_types(parameters, parameter_types)
@@ -739,6 +887,7 @@ async def execute_query(
attempt_timeout,
operation_timeout,
retryable_excs=retryable_excs,
+ column_info=column_info,
)
@CrossSync.convert(sync_name="__enter__")
@@ -790,6 +939,7 @@ def __init__(
DeadlineExceeded,
ServiceUnavailable,
Aborted,
+ Cancelled,
),
default_mutate_rows_retryable_errors: Sequence[type[Exception]] = (
DeadlineExceeded,
@@ -863,30 +1013,41 @@ def __init__(
self.table_name = self.client._gapic_client.table_path(
self.client.project, instance_id, table_id
)
- self.app_profile_id = app_profile_id
+ self.app_profile_id: str | None = app_profile_id
- self.default_operation_timeout = default_operation_timeout
- self.default_attempt_timeout = default_attempt_timeout
- self.default_read_rows_operation_timeout = default_read_rows_operation_timeout
- self.default_read_rows_attempt_timeout = default_read_rows_attempt_timeout
- self.default_mutate_rows_operation_timeout = (
+ self.default_operation_timeout: float = default_operation_timeout
+ self.default_attempt_timeout: float | None = default_attempt_timeout
+ self.default_read_rows_operation_timeout: float = (
+ default_read_rows_operation_timeout
+ )
+ self.default_read_rows_attempt_timeout: float | None = (
+ default_read_rows_attempt_timeout
+ )
+ self.default_mutate_rows_operation_timeout: float = (
default_mutate_rows_operation_timeout
)
- self.default_mutate_rows_attempt_timeout = default_mutate_rows_attempt_timeout
+ self.default_mutate_rows_attempt_timeout: float | None = (
+ default_mutate_rows_attempt_timeout
+ )
- self.default_read_rows_retryable_errors = (
+ self.default_read_rows_retryable_errors: Sequence[type[Exception]] = (
default_read_rows_retryable_errors or ()
)
- self.default_mutate_rows_retryable_errors = (
+ self.default_mutate_rows_retryable_errors: Sequence[type[Exception]] = (
default_mutate_rows_retryable_errors or ()
)
- self.default_retryable_errors = default_retryable_errors or ()
+ self.default_retryable_errors: Sequence[type[Exception]] = (
+ default_retryable_errors or ()
+ )
+
+ self._metrics = BigtableClientSideMetricsController()
try:
self._register_instance_future = CrossSync.create_task(
self.client._register_instance,
self.instance_id,
- self,
+ self.app_profile_id,
+ id(self),
sync_executor=self.client._executor,
)
except RuntimeError as e:
@@ -1595,9 +1756,12 @@ async def close(self):
"""
Called to close the Table instance and release any resources held by it.
"""
+ self._metrics.close()
if self._register_instance_future:
self._register_instance_future.cancel()
- await self.client._remove_instance_registration(self.instance_id, self)
+ self.client._remove_instance_registration(
+ self.instance_id, self.app_profile_id, id(self)
+ )
@CrossSync.convert(sync_name="__enter__")
async def __aenter__(self):
diff --git a/google/cloud/bigtable/data/_async/metrics_interceptor.py b/google/cloud/bigtable/data/_async/metrics_interceptor.py
new file mode 100644
index 000000000..249dcdcc9
--- /dev/null
+++ b/google/cloud/bigtable/data/_async/metrics_interceptor.py
@@ -0,0 +1,172 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import annotations
+
+from typing import Sequence
+
+import time
+from functools import wraps
+
+from google.cloud.bigtable.data._metrics.data_model import ActiveOperationMetric
+from google.cloud.bigtable.data._metrics.data_model import OperationState
+from google.cloud.bigtable.data._metrics.data_model import OperationType
+
+from google.cloud.bigtable.data._cross_sync import CrossSync
+
+if CrossSync.is_async:
+ from grpc.aio import UnaryUnaryClientInterceptor
+ from grpc.aio import UnaryStreamClientInterceptor
+ from grpc.aio import AioRpcError
+else:
+ from grpc import UnaryUnaryClientInterceptor
+ from grpc import UnaryStreamClientInterceptor
+
+
+__CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen.metrics_interceptor"
+
+
+def _with_active_operation(func):
+ """
+ Decorator for interceptor methods to extract the active operation associated with the
+ in-scope contextvars, and pass it to the decorated function.
+ """
+
+ @wraps(func)
+ def wrapper(self, continuation, client_call_details, request):
+ operation: ActiveOperationMetric | None = ActiveOperationMetric.from_context()
+
+ if operation:
+ # start a new attempt if not started
+ if (
+ operation.state == OperationState.CREATED
+ or operation.state == OperationState.BETWEEN_ATTEMPTS
+ ):
+ operation.start_attempt()
+ # wrap continuation in logic to process the operation
+ return func(self, operation, continuation, client_call_details, request)
+ else:
+ # if operation not found, return unwrapped continuation
+ return continuation(client_call_details, request)
+
+ return wrapper
+
+
+@CrossSync.convert
+async def _get_metadata(source) -> dict[str, str | bytes] | None:
+ """Helper to extract metadata from a call or RpcError"""
+ try:
+ metadata: Sequence[tuple[str, str | bytes]]
+ if CrossSync.is_async:
+ # grpc.aio returns metadata in Metadata objects
+ if isinstance(source, AioRpcError):
+ metadata = list(source.trailing_metadata()) + list(
+ source.initial_metadata()
+ )
+ else:
+ metadata = list(await source.trailing_metadata()) + list(
+ await source.initial_metadata()
+ )
+ else:
+ # sync grpc returns metadata as a sequence of tuples
+ metadata = source.trailing_metadata() + source.initial_metadata()
+ # convert metadata to dict format
+ return {k: v for (k, v) in metadata}
+ except Exception:
+ # ignore errors while fetching metadata
+ return None
+
+
+@CrossSync.convert_class(sync_name="BigtableMetricsInterceptor")
+class AsyncBigtableMetricsInterceptor(
+ UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor
+):
+ """
+ An async gRPC interceptor to add client metadata and print server metadata.
+ """
+
+ @CrossSync.convert
+ @_with_active_operation
+ async def intercept_unary_unary(
+ self, operation, continuation, client_call_details, request
+ ):
+ """
+ Interceptor for unary rpcs:
+ - MutateRow
+ - CheckAndMutateRow
+ - ReadModifyWriteRow
+ """
+ metadata = None
+ try:
+ call = await continuation(client_call_details, request)
+ metadata = await _get_metadata(call)
+ return call
+ except Exception as rpc_error:
+ metadata = await _get_metadata(rpc_error)
+ raise rpc_error
+ finally:
+ if metadata is not None:
+ operation.add_response_metadata(metadata)
+
+ @CrossSync.convert
+ @_with_active_operation
+ async def intercept_unary_stream(
+ self, operation, continuation, client_call_details, request
+ ):
+ """
+ Interceptor for streaming rpcs:
+ - ReadRows
+ - MutateRows
+ - SampleRowKeys
+ """
+ try:
+ return self._streaming_generator_wrapper(
+ operation, await continuation(client_call_details, request)
+ )
+ except Exception as rpc_error:
+ # handle errors while intializing stream
+ metadata = await _get_metadata(rpc_error)
+ if metadata is not None:
+ operation.add_response_metadata(metadata)
+ raise rpc_error
+
+ @staticmethod
+ @CrossSync.convert
+ async def _streaming_generator_wrapper(operation, call):
+ """
+ Wrapped generator to be returned by intercept_unary_stream.
+ """
+ # only track has_first response for READ_ROWS
+ has_first_response = (
+ operation.first_response_latency_ns is not None
+ or operation.op_type != OperationType.READ_ROWS
+ )
+ encountered_exc = None
+ try:
+ async for response in call:
+ # record time to first response. Currently only used for READ_ROWs
+ if not has_first_response:
+ operation.first_response_latency_ns = (
+ time.monotonic_ns() - operation.start_time_ns
+ )
+ has_first_response = True
+ yield response
+ except Exception as e:
+ # handle errors while processing stream
+ encountered_exc = e
+ raise
+ finally:
+ if call is not None:
+ metadata = await _get_metadata(encountered_exc or call)
+ if metadata is not None:
+ operation.add_response_metadata(metadata)
diff --git a/google/cloud/bigtable/data/_helpers.py b/google/cloud/bigtable/data/_helpers.py
index 424a34486..e848ebc6f 100644
--- a/google/cloud/bigtable/data/_helpers.py
+++ b/google/cloud/bigtable/data/_helpers.py
@@ -23,6 +23,7 @@
from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery
from google.api_core import exceptions as core_exceptions
+from google.api_core.retry import exponential_sleep_generator
from google.api_core.retry import RetryFailureReason
from google.cloud.bigtable.data.exceptions import RetryExceptionGroup
@@ -248,3 +249,61 @@ def _get_retryable_errors(
call_codes = table.default_mutate_rows_retryable_errors
return [_get_error_type(e) for e in call_codes]
+
+
+class TrackedBackoffGenerator:
+ """
+ Generator class for exponential backoff sleep times.
+ This implementation builds on top of api_core.retries.exponential_sleep_generator,
+ adding the ability to retrieve previous values using get_attempt_backoff(idx).
+ This is used by the Metrics class to track the sleep times used for each attempt.
+ """
+
+ def __init__(self, initial=0.01, maximum=60, multiplier=2):
+ self.history = []
+ self.subgenerator = exponential_sleep_generator(
+ initial=initial, maximum=maximum, multiplier=multiplier
+ )
+ self._next_override: float | None = None
+
+ def __iter__(self):
+ return self
+
+ def set_next(self, next_value: float):
+ """
+ Set the next backoff value, instead of generating one from subgenerator.
+ After the value is yielded, it will go back to using self.subgenerator.
+
+ If set_next is called twice before the next() is called, only the latest
+ value will be used and others discarded
+
+ Args:
+ next_value: the upcomming value to yield when next() is called
+ Raises:
+ ValueError: if next_value is negative
+ """
+ if next_value < 0:
+ raise ValueError("backoff value cannot be less than 0")
+ self._next_override = next_value
+
+ def __next__(self) -> float:
+ if self._next_override is not None:
+ next_backoff = self._next_override
+ self._next_override = None
+ else:
+ next_backoff = next(self.subgenerator)
+ self.history.append(next_backoff)
+ return next_backoff
+
+ def get_attempt_backoff(self, attempt_idx) -> float:
+ """
+ returns the backoff time for a specific attempt index, starting at 0.
+
+ Args:
+ attempt_idx: the index of the attempt to return backoff for
+ Raises:
+ IndexError: if attempt_idx is negative, or not in history
+ """
+ if attempt_idx < 0:
+ raise IndexError("received negative attempt number")
+ return self.history[attempt_idx]
diff --git a/google/cloud/bigtable/data/_metrics/__init__.py b/google/cloud/bigtable/data/_metrics/__init__.py
new file mode 100644
index 000000000..26cfc1326
--- /dev/null
+++ b/google/cloud/bigtable/data/_metrics/__init__.py
@@ -0,0 +1,35 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from google.cloud.bigtable.data._metrics.metrics_controller import (
+ BigtableClientSideMetricsController,
+)
+
+from google.cloud.bigtable.data._metrics.data_model import ActiveOperationMetric
+from google.cloud.bigtable.data._metrics.data_model import ActiveAttemptMetric
+from google.cloud.bigtable.data._metrics.data_model import CompletedOperationMetric
+from google.cloud.bigtable.data._metrics.data_model import CompletedAttemptMetric
+from google.cloud.bigtable.data._metrics.data_model import OperationState
+from google.cloud.bigtable.data._metrics.data_model import OperationType
+from google.cloud.bigtable.data._metrics.tracked_retry import tracked_retry
+
+__all__ = (
+ "BigtableClientSideMetricsController",
+ "OperationType",
+ "OperationState",
+ "ActiveOperationMetric",
+ "ActiveAttemptMetric",
+ "CompletedOperationMetric",
+ "CompletedAttemptMetric",
+ "tracked_retry",
+)
diff --git a/google/cloud/bigtable/data/_metrics/data_model.py b/google/cloud/bigtable/data/_metrics/data_model.py
new file mode 100644
index 000000000..64dd63bfa
--- /dev/null
+++ b/google/cloud/bigtable/data/_metrics/data_model.py
@@ -0,0 +1,469 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import annotations
+
+from typing import ClassVar, Tuple, cast, TYPE_CHECKING
+
+import time
+import re
+import logging
+import contextvars
+
+from enum import Enum
+from functools import lru_cache
+from dataclasses import dataclass
+from dataclasses import field
+from grpc import StatusCode
+from grpc import RpcError
+from grpc.aio import AioRpcError
+
+import google.cloud.bigtable.data.exceptions as bt_exceptions
+from google.cloud.bigtable_v2.types.response_params import ResponseParams
+from google.cloud.bigtable.data._helpers import TrackedBackoffGenerator
+from google.protobuf.message import DecodeError
+
+if TYPE_CHECKING:
+ from google.cloud.bigtable.data._metrics.handlers._base import MetricsHandler
+
+
+LOGGER = logging.getLogger(__name__)
+
+# default values for zone and cluster data, if not captured
+DEFAULT_ZONE = "global"
+DEFAULT_CLUSTER_ID = ""
+
+# keys for parsing metadata blobs
+BIGTABLE_LOCATION_METADATA_KEY = "x-goog-ext-425905942-bin"
+SERVER_TIMING_METADATA_KEY = "server-timing"
+SERVER_TIMING_REGEX = re.compile(r".*gfet4t7;\s*dur=(\d+\.?\d*).*")
+
+INVALID_STATE_ERROR = "Invalid state for {}: {}"
+
+
+class OperationType(Enum):
+ """Enum for the type of operation being performed."""
+
+ READ_ROWS = "ReadRows"
+ SAMPLE_ROW_KEYS = "SampleRowKeys"
+ BULK_MUTATE_ROWS = "MutateRows"
+ MUTATE_ROW = "MutateRow"
+ CHECK_AND_MUTATE = "CheckAndMutateRow"
+ READ_MODIFY_WRITE = "ReadModifyWriteRow"
+
+
+class OperationState(Enum):
+ """Enum for the state of the active operation.
+
+ ┌───────────┐
+ │ CREATED │────────┐
+ └─────┬─────┘ │
+ │ │
+ ▼ │
+ ┌▶ ACTIVE_ATTEMPT ───┐│
+ │ │ ││
+ │ ▼ ││
+ └─ BETWEEN_ATTEMPTS ││
+ │ ││
+ ▼ ││
+ ┌───────────┐ ││
+ │ COMPLETED │ ◀─────┘│
+ └───────────┘ ◀──────┘
+ """
+
+ CREATED = 0
+ ACTIVE_ATTEMPT = 1
+ BETWEEN_ATTEMPTS = 2
+ COMPLETED = 3
+
+
+@dataclass(frozen=True)
+class CompletedAttemptMetric:
+ """
+ An immutable dataclass representing the data associated with a
+ completed rpc attempt.
+
+ Operation-level fields (eg. type, cluster, zone) are stored on the
+ corresponding CompletedOperationMetric or ActiveOperationMetric object.
+ """
+
+ duration_ns: int
+ end_status: StatusCode
+ gfe_latency_ns: int | None = None
+ application_blocking_time_ns: int = 0
+ backoff_before_attempt_ns: int = 0
+
+
+@dataclass(frozen=True)
+class CompletedOperationMetric:
+ """
+ An immutable dataclass representing the data associated with a
+ completed rpc operation.
+
+ Attempt-level fields (eg. duration, latencies, etc) are stored on the
+ corresponding CompletedAttemptMetric object.
+ """
+
+ op_type: OperationType
+ duration_ns: int
+ completed_attempts: list[CompletedAttemptMetric]
+ final_status: StatusCode
+ cluster_id: str
+ zone: str
+ is_streaming: bool
+ first_response_latency_ns: int | None = None
+ flow_throttling_time_ns: int = 0
+
+
+@dataclass
+class ActiveAttemptMetric:
+ """
+ A dataclass representing the data associated with an rpc attempt that is
+ currently in progress. Fields are mutable and may be optional.
+ """
+
+ # keep monotonic timestamps for active attempts
+ start_time_ns: int = field(default_factory=lambda: time.monotonic_ns())
+ # the time taken by the backend, in nanoseconds. Taken from response header
+ gfe_latency_ns: int | None = None
+ # time waiting on user to process the response, in nanoseconds
+ # currently only relevant for ReadRows
+ application_blocking_time_ns: int = 0
+ # backoff time is added to application_blocking_time_ns
+ backoff_before_attempt_ns: int = 0
+
+
+@dataclass
+class ActiveOperationMetric:
+ """
+ A dataclass representing the data associated with an rpc operation that is
+ currently in progress. Fields are mutable and may be optional.
+ """
+
+ op_type: OperationType
+ state: OperationState = OperationState.CREATED
+ # create a default backoff generator, initialized with standard default backoff values
+ backoff_generator: TrackedBackoffGenerator = field(
+ default_factory=lambda: TrackedBackoffGenerator(
+ initial=0.01, maximum=60, multiplier=2
+ )
+ )
+ # keep monotonic timestamps for active operations
+ start_time_ns: int = field(default_factory=lambda: time.monotonic_ns())
+ active_attempt: ActiveAttemptMetric | None = None
+ cluster_id: str | None = None
+ zone: str | None = None
+ completed_attempts: list[CompletedAttemptMetric] = field(default_factory=list)
+ is_streaming: bool = False # only True for read_rows operations
+ handlers: list[MetricsHandler] = field(default_factory=list)
+ # the time it takes to recieve the first response from the server, in nanoseconds
+ # attached by interceptor
+ # currently only tracked for ReadRows
+ first_response_latency_ns: int | None = None
+ # time waiting on flow control, in nanoseconds
+ flow_throttling_time_ns: int = 0
+
+ _active_operation_context: ClassVar[
+ contextvars.ContextVar[ActiveOperationMetric]
+ ] = contextvars.ContextVar("active_operation_context")
+
+ @classmethod
+ def from_context(cls) -> ActiveOperationMetric | None:
+ """Retrieves the active operation from the current execution context.
+
+ Because execution within a context is sequential, this guarantees
+ retrieval of the single, unique operation, isolated from other
+ concurrent RPCs.
+
+ Note:
+ This is intended to be called by gRPC interceptors at the start
+ of an RPC.
+
+ Returns:
+ ActiveOperationMetric: The current active operation.
+ None: If no operation is set, or if the current operation is
+ already in the `COMPLETED` state.
+ """
+ op = cls._active_operation_context.get(None)
+ if op and op.state == OperationState.COMPLETED:
+ return None
+ return op
+
+ def __post_init__(self):
+ """
+ Save new instances to contextvars on init
+ """
+ self._active_operation_context.set(self)
+
+ def start(self) -> None:
+ """
+ Optionally called to mark the start of the operation. If not called,
+ the operation will be started at initialization.
+
+ StartState: CREATED
+ EndState: CREATED
+ """
+ if self.state != OperationState.CREATED:
+ return self._handle_error(INVALID_STATE_ERROR.format("start", self.state))
+ self.start_time_ns = time.monotonic_ns()
+ # set as active operation in contextvars
+ self._active_operation_context.set(self)
+
+ def start_attempt(self) -> ActiveAttemptMetric | None:
+ """
+ Called to initiate a new attempt for the operation.
+
+ StartState: CREATED | BETWEEN_ATTEMPTS
+ EndState: ACTIVE_ATTEMPT
+ """
+ if (
+ self.state != OperationState.BETWEEN_ATTEMPTS
+ and self.state != OperationState.CREATED
+ ):
+ return self._handle_error(
+ INVALID_STATE_ERROR.format("start_attempt", self.state)
+ )
+ # set as active operation in contextvars
+ self._active_operation_context.set(self)
+
+ try:
+ # find backoff value before this attempt
+ prev_attempt_idx = len(self.completed_attempts) - 1
+ backoff = self.backoff_generator.get_attempt_backoff(prev_attempt_idx)
+ # generator will return the backoff time in seconds, so convert to nanoseconds
+ backoff_ns = int(backoff * 1e9)
+ except IndexError:
+ # backoff value not found
+ backoff_ns = 0
+
+ self.active_attempt = ActiveAttemptMetric(backoff_before_attempt_ns=backoff_ns)
+ self.state = OperationState.ACTIVE_ATTEMPT
+ return self.active_attempt
+
+ def add_response_metadata(self, metadata: dict[str, bytes | str]) -> None:
+ """
+ Attach trailing metadata to the active attempt.
+
+ If not called, default values for the metadata will be used.
+
+ StartState: ACTIVE_ATTEMPT
+ EndState: ACTIVE_ATTEMPT
+
+ Args:
+ - metadata: the metadata as extracted from the grpc call
+ """
+ if self.state != OperationState.ACTIVE_ATTEMPT:
+ return self._handle_error(
+ INVALID_STATE_ERROR.format("add_response_metadata", self.state)
+ )
+ if self.cluster_id is None or self.zone is None:
+ # BIGTABLE_LOCATION_METADATA_KEY should give a binary-encoded ResponseParams proto
+ blob = cast(bytes, metadata.get(BIGTABLE_LOCATION_METADATA_KEY))
+ if blob:
+ parse_result = self._parse_response_metadata_blob(blob)
+ if parse_result is not None:
+ cluster, zone = parse_result
+ if cluster:
+ self.cluster_id = cluster
+ if zone:
+ self.zone = zone
+ else:
+ self._handle_error(
+ f"Failed to decode {BIGTABLE_LOCATION_METADATA_KEY} metadata: {blob!r}"
+ )
+ # SERVER_TIMING_METADATA_KEY should give a string with the server-latency headers
+ timing_header = cast(str, metadata.get(SERVER_TIMING_METADATA_KEY))
+ if timing_header:
+ timing_data = SERVER_TIMING_REGEX.match(timing_header)
+ if timing_data and self.active_attempt:
+ gfe_latency_ms = float(timing_data.group(1))
+ self.active_attempt.gfe_latency_ns = int(gfe_latency_ms * 1e6)
+
+ @staticmethod
+ @lru_cache(maxsize=32)
+ def _parse_response_metadata_blob(blob: bytes) -> Tuple[str, str] | None:
+ """
+ Parse the response metadata blob and return a tuple of cluster and zone.
+
+ Function is cached to avoid parsing the same blob multiple times.
+
+ Args:
+ - blob: the metadata blob as extracted from the grpc call
+ Returns:
+ - a tuple of cluster_id and zone, or None if parsing failed
+ """
+ try:
+ proto = ResponseParams.pb().FromString(blob)
+ return proto.cluster_id, proto.zone_id
+ except (DecodeError, TypeError):
+ # failed to parse metadata
+ return None
+
+ def end_attempt_with_status(self, status: StatusCode | BaseException) -> None:
+ """
+ Called to mark the end of an attempt for the operation.
+
+ Typically, this is used to mark a retryable error. If a retry will not
+ be attempted, `end_with_status` or `end_with_success` should be used
+ to finalize the operation along with the attempt.
+
+ StartState: ACTIVE_ATTEMPT
+ EndState: BETWEEN_ATTEMPTS
+
+ Args:
+ - status: The status of the attempt.
+ """
+ if self.state != OperationState.ACTIVE_ATTEMPT or self.active_attempt is None:
+ return self._handle_error(
+ INVALID_STATE_ERROR.format("end_attempt_with_status", self.state)
+ )
+ if isinstance(status, BaseException):
+ status = self._exc_to_status(status)
+ duration_ns = self._ensure_positive(
+ time.monotonic_ns() - self.active_attempt.start_time_ns, "duration"
+ )
+ complete_attempt = CompletedAttemptMetric(
+ duration_ns=duration_ns,
+ end_status=status,
+ gfe_latency_ns=self.active_attempt.gfe_latency_ns,
+ application_blocking_time_ns=self.active_attempt.application_blocking_time_ns,
+ backoff_before_attempt_ns=self.active_attempt.backoff_before_attempt_ns,
+ )
+ self.completed_attempts.append(complete_attempt)
+ self.active_attempt = None
+ self.state = OperationState.BETWEEN_ATTEMPTS
+ for handler in self.handlers:
+ handler.on_attempt_complete(complete_attempt, self)
+
+ def end_with_status(self, status: StatusCode | BaseException) -> None:
+ """
+ Called to mark the end of the operation. If there is an active attempt,
+ end_attempt_with_status will be called with the same status.
+
+ StartState: CREATED | ACTIVE_ATTEMPT | BETWEEN_ATTEMPTS
+ EndState: COMPLETED
+
+ Causes on_operation_completed to be called for each registered handler.
+
+ Args:
+ - status: The status of the operation.
+ """
+ if self.state == OperationState.COMPLETED:
+ return self._handle_error(
+ INVALID_STATE_ERROR.format("end_with_status", self.state)
+ )
+ final_status = (
+ self._exc_to_status(status) if isinstance(status, BaseException) else status
+ )
+ if self.state == OperationState.ACTIVE_ATTEMPT:
+ self.end_attempt_with_status(final_status)
+ duration_ns = self._ensure_positive(
+ time.monotonic_ns() - self.start_time_ns, "duration"
+ )
+ finalized = CompletedOperationMetric(
+ op_type=self.op_type,
+ completed_attempts=self.completed_attempts,
+ duration_ns=duration_ns,
+ final_status=final_status,
+ cluster_id=self.cluster_id or DEFAULT_CLUSTER_ID,
+ zone=self.zone or DEFAULT_ZONE,
+ is_streaming=self.is_streaming,
+ first_response_latency_ns=self.first_response_latency_ns,
+ flow_throttling_time_ns=self.flow_throttling_time_ns,
+ )
+ self.state = OperationState.COMPLETED
+ for handler in self.handlers:
+ handler.on_operation_complete(finalized)
+
+ def end_with_success(self):
+ """
+ Called to mark the end of the operation with a successful status.
+
+ StartState: CREATED | ACTIVE_ATTEMPT | BETWEEN_ATTEMPTS
+ EndState: COMPLETED
+
+ Causes on_operation_completed to be called for each registered handler.
+ """
+ return self.end_with_status(StatusCode.OK)
+
+ @staticmethod
+ def _exc_to_status(exc: BaseException) -> StatusCode:
+ """
+ Extracts the grpc status code from an exception.
+
+ Exception groups and wrappers will be parsed to find the underlying
+ grpc Exception.
+
+ If the exception is not a grpc exception, will return StatusCode.UNKNOWN.
+
+ Args:
+ - exc: The exception to extract the status code from.
+ """
+ if isinstance(exc, bt_exceptions._BigtableExceptionGroup):
+ exc = exc.exceptions[-1]
+ if hasattr(exc, "grpc_status_code") and exc.grpc_status_code is not None:
+ return exc.grpc_status_code
+ if (
+ exc.__cause__
+ and hasattr(exc.__cause__, "grpc_status_code")
+ and exc.__cause__.grpc_status_code is not None
+ ):
+ return exc.__cause__.grpc_status_code
+ if isinstance(exc, AioRpcError) or isinstance(exc, RpcError):
+ return exc.code()
+ return StatusCode.UNKNOWN
+
+ @staticmethod
+ def _handle_error(message: str) -> None:
+ """
+ log error metric system error messages
+
+ Args:
+ - message: The message to include in the exception or warning.
+ """
+ full_message = f"Error in Bigtable Metrics: {message}"
+ LOGGER.warning(full_message)
+
+ def _ensure_positive(self, value: int, field_name: str) -> int:
+ """
+ Helper to replace negative value with 0, and record an error
+ """
+ if value < 0:
+ self._handle_error(f"received negative value for {field_name}: {value}")
+ return 0
+ return value
+
+ def __enter__(self):
+ """
+ Implements the async manager protocol
+
+ Using the operation's context manager provides assurances that the operation
+ is always closed when complete, with the proper status code automaticallty
+ detected when an exception is raised.
+ """
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ """
+ Implements the context manager protocol
+
+ The operation is automatically ended on exit, with the status determined
+ by the exception type and value.
+
+ If operation was already ended manually, do nothing.
+ """
+ if not self.state == OperationState.COMPLETED:
+ if exc_val is None:
+ self.end_with_success()
+ else:
+ self.end_with_status(exc_val)
diff --git a/google/cloud/bigtable/data/_metrics/handlers/_base.py b/google/cloud/bigtable/data/_metrics/handlers/_base.py
new file mode 100644
index 000000000..884091fdd
--- /dev/null
+++ b/google/cloud/bigtable/data/_metrics/handlers/_base.py
@@ -0,0 +1,38 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from google.cloud.bigtable.data._metrics.data_model import ActiveOperationMetric
+from google.cloud.bigtable.data._metrics.data_model import CompletedAttemptMetric
+from google.cloud.bigtable.data._metrics.data_model import CompletedOperationMetric
+
+
+class MetricsHandler:
+ """
+ Base class for all metrics handlers. Metrics handlers will receive callbacks
+ when operations and attempts are completed, and can use this information to
+ update some external metrics system.
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ def on_operation_complete(self, op: CompletedOperationMetric) -> None:
+ pass
+
+ def on_attempt_complete(
+ self, attempt: CompletedAttemptMetric, op: ActiveOperationMetric
+ ) -> None:
+ pass
+
+ def close(self):
+ pass
diff --git a/google/cloud/bigtable/data/_metrics/metrics_controller.py b/google/cloud/bigtable/data/_metrics/metrics_controller.py
new file mode 100644
index 000000000..e9815f201
--- /dev/null
+++ b/google/cloud/bigtable/data/_metrics/metrics_controller.py
@@ -0,0 +1,63 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import annotations
+
+from google.cloud.bigtable.data._metrics.data_model import ActiveOperationMetric
+from google.cloud.bigtable.data._metrics.handlers._base import MetricsHandler
+from google.cloud.bigtable.data._metrics.data_model import OperationType
+
+
+class BigtableClientSideMetricsController:
+ """
+ BigtableClientSideMetricsController is responsible for managing the
+ lifecycle of the metrics system. The Bigtable client library will
+ use this class to create new operations. Each operation will be
+ registered with the handlers associated with this controller.
+ """
+
+ def __init__(
+ self,
+ handlers: list[MetricsHandler] | None = None,
+ ):
+ """
+ Initializes the metrics controller.
+
+ Args:
+ - handlers: A list of MetricsHandler objects to subscribe to metrics events.
+ """
+ self.handlers: list[MetricsHandler] = handlers or []
+
+ def add_handler(self, handler: MetricsHandler) -> None:
+ """
+ Add a new handler to the list of handlers.
+
+ Args:
+ - handler: A MetricsHandler object to add to the list of subscribed handlers.
+ """
+ self.handlers.append(handler)
+
+ def create_operation(
+ self, op_type: OperationType, **kwargs
+ ) -> ActiveOperationMetric:
+ """
+ Creates a new operation and registers it with the subscribed handlers.
+ """
+ return ActiveOperationMetric(op_type, **kwargs, handlers=self.handlers)
+
+ def close(self):
+ """
+ Close all handlers.
+ """
+ for handler in self.handlers:
+ handler.close()
diff --git a/google/cloud/bigtable/data/_metrics/tracked_retry.py b/google/cloud/bigtable/data/_metrics/tracked_retry.py
new file mode 100644
index 000000000..94d2e5dcb
--- /dev/null
+++ b/google/cloud/bigtable/data/_metrics/tracked_retry.py
@@ -0,0 +1,133 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Methods for instrumenting an google.api_core.retry.retry_target or
+google.api_core.retry.retry_target_stream method
+
+`tracked_retry` will intercept `on_error` and `exception_factory`
+methods to update the associated ActiveOperationMetric when exceptions
+are encountered through the retryable rpc.
+"""
+from __future__ import annotations
+
+from typing import Callable, List, Optional, Tuple, TypeVar
+
+from grpc import StatusCode
+from google.api_core.exceptions import GoogleAPICallError
+from google.api_core.retry import RetryFailureReason
+from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete
+from google.cloud.bigtable.data._helpers import _retry_exception_factory
+from google.cloud.bigtable.data._metrics import ActiveOperationMetric
+from google.cloud.bigtable.data._metrics import OperationState
+
+
+T = TypeVar("T")
+
+
+ExceptionFactoryType = Callable[
+ [List[Exception], RetryFailureReason, Optional[float]],
+ Tuple[Exception, Optional[Exception]],
+]
+
+
+def _track_retryable_error(
+ operation: ActiveOperationMetric,
+) -> Callable[[Exception], None]:
+ """
+ Used as input to api_core.Retry classes, to track when retryable errors are encountered
+
+ Should be passed as on_error callback
+ """
+
+ def wrapper(exc: Exception) -> None:
+ try:
+ # record metadata from failed rpc
+ if isinstance(exc, GoogleAPICallError) and exc.errors:
+ rpc_error = exc.errors[-1]
+ metadata = list(rpc_error.trailing_metadata()) + list(
+ rpc_error.initial_metadata()
+ )
+ operation.add_response_metadata({k: v for k, v in metadata})
+ except Exception:
+ # ignore errors in metadata collection
+ pass
+ if isinstance(exc, _MutateRowsIncomplete):
+ # _MutateRowsIncomplete represents a successful rpc with some failed mutations
+ # mark the attempt as successful
+ operation.end_attempt_with_status(StatusCode.OK)
+ else:
+ operation.end_attempt_with_status(exc)
+
+ return wrapper
+
+
+def _track_terminal_error(
+ operation: ActiveOperationMetric, exception_factory: ExceptionFactoryType
+) -> ExceptionFactoryType:
+ """
+ Used as input to api_core.Retry classes, to track when terminal errors are encountered
+
+ Should be used as a wrapper over an exception_factory callback
+ """
+
+ def wrapper(
+ exc_list: List[Exception],
+ reason: RetryFailureReason,
+ timeout_val: float | None,
+ ) -> tuple[Exception, Exception | None]:
+ source_exc, cause_exc = exception_factory(exc_list, reason, timeout_val)
+ try:
+ # record metadata from failed rpc
+ if isinstance(source_exc, GoogleAPICallError) and source_exc.errors:
+ rpc_error = source_exc.errors[-1]
+ metadata = list(rpc_error.trailing_metadata()) + list(
+ rpc_error.initial_metadata()
+ )
+ operation.add_response_metadata({k: v for k, v in metadata})
+ except Exception:
+ # ignore errors in metadata collection
+ pass
+ if (
+ reason == RetryFailureReason.TIMEOUT
+ and operation.state == OperationState.ACTIVE_ATTEMPT
+ and exc_list
+ ):
+ # record ending attempt for timeout failures
+ attempt_exc = exc_list[-1]
+ _track_retryable_error(operation)(attempt_exc)
+ operation.end_with_status(source_exc)
+ return source_exc, cause_exc
+
+ return wrapper
+
+
+def tracked_retry(
+ *,
+ retry_fn: Callable[..., T],
+ operation: ActiveOperationMetric,
+ **kwargs,
+) -> T:
+ """
+ Wrapper for retry_rarget or retry_target_stream, which injects methods to
+ track the lifecycle of the retry using the provided ActiveOperationMetric
+ """
+ in_exception_factory = kwargs.pop("exception_factory", _retry_exception_factory)
+ kwargs.pop("on_error", None)
+ kwargs.pop("sleep_generator", None)
+ return retry_fn(
+ sleep_generator=operation.backoff_generator,
+ on_error=_track_retryable_error(operation),
+ exception_factory=_track_terminal_error(operation, in_exception_factory),
+ **kwargs,
+ )
diff --git a/google/cloud/bigtable/data/_sync_autogen/_swappable_channel.py b/google/cloud/bigtable/data/_sync_autogen/_swappable_channel.py
new file mode 100644
index 000000000..78ba129d9
--- /dev/null
+++ b/google/cloud/bigtable/data/_sync_autogen/_swappable_channel.py
@@ -0,0 +1,96 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This file is automatically generated by CrossSync. Do not edit manually.
+
+from __future__ import annotations
+from typing import Callable
+from grpc import ChannelConnectivity
+from grpc import Channel
+
+
+class _WrappedChannel(Channel):
+ """
+ A wrapper around a gRPC channel. All methods are passed
+ through to the underlying channel.
+ """
+
+ def __init__(self, channel: Channel):
+ self._channel = channel
+
+ def unary_unary(self, *args, **kwargs):
+ return self._channel.unary_unary(*args, **kwargs)
+
+ def unary_stream(self, *args, **kwargs):
+ return self._channel.unary_stream(*args, **kwargs)
+
+ def stream_unary(self, *args, **kwargs):
+ return self._channel.stream_unary(*args, **kwargs)
+
+ def stream_stream(self, *args, **kwargs):
+ return self._channel.stream_stream(*args, **kwargs)
+
+ def channel_ready(self):
+ return self._channel.channel_ready()
+
+ def __enter__(self):
+ self._channel.__enter__()
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ return self._channel.__exit__(exc_type, exc_val, exc_tb)
+
+ def get_state(self, try_to_connect: bool = False) -> ChannelConnectivity:
+ return self._channel.get_state(try_to_connect=try_to_connect)
+
+ def wait_for_state_change(self, last_observed_state):
+ return self._channel.wait_for_state_change(last_observed_state)
+
+ def __getattr__(self, name):
+ return getattr(self._channel, name)
+
+ def close(self, grace=None):
+ return self._channel.close()
+
+ def subscribe(self, callback, try_to_connect=False):
+ return self._channel.subscribe(callback, try_to_connect)
+
+ def unsubscribe(self, callback):
+ return self._channel.unsubscribe(callback)
+
+
+class SwappableChannel(_WrappedChannel):
+ """
+ Provides a grpc channel wrapper, that allows the internal channel to be swapped out
+
+ Args:
+ - channel_fn: a nullary function that returns a new channel instance.
+ It should be a partial with all channel configuration arguments built-in
+ """
+
+ def __init__(self, channel_fn: Callable[[], Channel]):
+ self._channel_fn = channel_fn
+ self._channel = channel_fn()
+
+ def create_channel(self) -> Channel:
+ """Create a fresh channel using the stored `channel_fn` partial"""
+ new_channel = self._channel_fn()
+ return new_channel
+
+ def swap_channel(self, new_channel: Channel) -> Channel:
+ """Replace the wrapped channel with a new instance. Typically created using `create_channel`"""
+ old_channel = self._channel
+ self._channel = new_channel
+ return old_channel
diff --git a/google/cloud/bigtable/data/_sync_autogen/client.py b/google/cloud/bigtable/data/_sync_autogen/client.py
index b36bf359a..622002763 100644
--- a/google/cloud/bigtable/data/_sync_autogen/client.py
+++ b/google/cloud/bigtable/data/_sync_autogen/client.py
@@ -17,7 +17,7 @@
# This file is automatically generated by CrossSync. Do not edit manually.
from __future__ import annotations
-from typing import cast, Any, Optional, Set, Sequence, TYPE_CHECKING
+from typing import cast, Any, Callable, Optional, Set, Sequence, TYPE_CHECKING
import abc
import time
import warnings
@@ -49,6 +49,9 @@
from google.api_core.exceptions import DeadlineExceeded
from google.api_core.exceptions import ServiceUnavailable
from google.api_core.exceptions import Aborted
+from google.api_core.exceptions import Cancelled
+from google.protobuf.message import Message
+from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper
import google.auth.credentials
import google.auth._default
from google.api_core import client_options as client_options_lib
@@ -72,6 +75,7 @@
from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter
from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter
from google.cloud.bigtable.data.row_filters import RowFilterChain
+from google.cloud.bigtable.data._metrics import BigtableClientSideMetricsController
from google.cloud.bigtable.data._cross_sync import CrossSync
from typing import Iterable
from grpc import insecure_channel
@@ -79,7 +83,14 @@
from google.cloud.bigtable_v2.services.bigtable.transports import (
BigtableGrpcTransport as TransportType,
)
+from google.cloud.bigtable_v2.services.bigtable import BigtableClient as GapicClient
from google.cloud.bigtable.data._sync_autogen.mutations_batcher import _MB_SIZE
+from google.cloud.bigtable.data._sync_autogen._swappable_channel import (
+ SwappableChannel as SwappableChannelType,
+)
+from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import (
+ BigtableMetricsInterceptor as MetricsInterceptorType,
+)
if TYPE_CHECKING:
from google.cloud.bigtable.data._helpers import RowKeySamples
@@ -131,7 +142,6 @@ def __init__(
client_options = cast(
Optional[client_options_lib.ClientOptions], client_options
)
- custom_channel = None
self._emulator_host = os.getenv(BIGTABLE_EMULATOR)
if self._emulator_host is not None:
warnings.warn(
@@ -139,32 +149,40 @@ def __init__(
RuntimeWarning,
stacklevel=2,
)
- custom_channel = insecure_channel(self._emulator_host)
if credentials is None:
credentials = google.auth.credentials.AnonymousCredentials()
if project is None:
project = _DEFAULT_BIGTABLE_EMULATOR_CLIENT
+ self._metrics_interceptor = MetricsInterceptorType()
ClientWithProject.__init__(
self,
credentials=credentials,
project=project,
client_options=client_options,
)
- self._gapic_client = CrossSync._Sync_Impl.GapicClient(
+ self._gapic_client = GapicClient(
credentials=credentials,
client_options=client_options,
client_info=self.client_info,
transport=lambda *args, **kwargs: TransportType(
- *args, **kwargs, channel=custom_channel
+ *args, **kwargs, channel=self._build_grpc_channel
),
)
+ if (
+ credentials
+ and credentials.universe_domain != self.universe_domain
+ and (self._emulator_host is None)
+ ):
+ raise ValueError(
+ f"The configured universe domain ({self.universe_domain}) does not match the universe domain found in the credentials ({self._credentials.universe_domain}). If you haven't configured the universe domain explicitly, `googleapis.com` is the default."
+ )
self._is_closed = CrossSync._Sync_Impl.Event()
self.transport = cast(TransportType, self._gapic_client.transport)
self._active_instances: Set[_WarmedInstanceKey] = set()
self._instance_owners: dict[_WarmedInstanceKey, Set[int]] = {}
self._channel_init_time = time.monotonic()
self._channel_refresh_task: CrossSync._Sync_Impl.Task[None] | None = None
- self._executor = (
+ self._executor: concurrent.futures.ThreadPoolExecutor | None = (
concurrent.futures.ThreadPoolExecutor()
if not CrossSync._Sync_Impl.is_async
else None
@@ -179,6 +197,50 @@ def __init__(
stacklevel=2,
)
+ def _build_grpc_channel(self, *args, **kwargs) -> SwappableChannelType:
+ """This method is called by the gapic transport to create a grpc channel.
+
+ The init arguments passed down are captured in a partial used by SwappableChannel
+ to create new channel instances in the future, as part of the channel refresh logic
+
+ Emulators always use an inseucre channel
+
+ Args:
+ - *args: positional arguments passed by the gapic layer to create a new channel with
+ - **kwargs: keyword arguments passed by the gapic layer to create a new channel with
+ Returns:
+ a custom wrapped swappable channel"""
+ create_channel_fn: Callable[[], Channel]
+ if self._emulator_host is not None:
+ create_channel_fn = partial(insecure_channel, self._emulator_host)
+ else:
+
+ def sync_create_channel_fn():
+ return intercept_channel(
+ TransportType.create_channel(*args, **kwargs),
+ self._metrics_interceptor,
+ )
+
+ create_channel_fn = sync_create_channel_fn
+ new_channel = SwappableChannelType(create_channel_fn)
+ return new_channel
+
+ @property
+ def universe_domain(self) -> str:
+ """Return the universe domain used by the client instance.
+
+ Returns:
+ str: The universe domain used by the client instance."""
+ return self._gapic_client.universe_domain
+
+ @property
+ def api_endpoint(self) -> str:
+ """Return the API endpoint used by the client instance.
+
+ Returns:
+ str: The API endpoint used by the client instance."""
+ return self._gapic_client.api_endpoint
+
@staticmethod
def _client_version() -> str:
"""Helper function to return the client version string for this client"""
@@ -255,6 +317,11 @@ def _ping_and_warm_instances(
)
return [r or None for r in result_list]
+ def _invalidate_channel_stubs(self):
+ """Helper to reset the cached stubs. Needed when changing out the grpc channel"""
+ self.transport._stubs = {}
+ self.transport._prep_wrapped_messages(self.client_info)
+
def _manage_channel(
self,
refresh_interval_min: float = 60 * 35,
@@ -277,12 +344,16 @@ def _manage_channel(
between `refresh_interval_min` and `refresh_interval_max`
grace_period: time to allow previous channel to serve existing
requests before closing, in seconds"""
+ if not isinstance(self.transport.grpc_channel, SwappableChannelType):
+ warnings.warn("Channel does not support auto-refresh.")
+ return
+ super_channel: SwappableChannelType = self.transport.grpc_channel
first_refresh = self._channel_init_time + random.uniform(
refresh_interval_min, refresh_interval_max
)
next_sleep = max(first_refresh - time.monotonic(), 0)
if next_sleep > 0:
- self._ping_and_warm_instances(channel=self.transport.grpc_channel)
+ self._ping_and_warm_instances(channel=super_channel)
while not self._is_closed.is_set():
CrossSync._Sync_Impl.event_wait(
self._is_closed, next_sleep, async_break_early=False
@@ -290,22 +361,20 @@ def _manage_channel(
if self._is_closed.is_set():
break
start_timestamp = time.monotonic()
- old_channel = self.transport.grpc_channel
- new_channel = self.transport.create_channel()
- new_channel = intercept_channel(new_channel, self.transport._interceptor)
+ new_channel = super_channel.create_channel()
self._ping_and_warm_instances(channel=new_channel)
- self.transport._grpc_channel = new_channel
- self.transport._logged_channel = new_channel
- self.transport._stubs = {}
- self.transport._prep_wrapped_messages(self.client_info)
+ old_channel = super_channel.swap_channel(new_channel)
+ self._invalidate_channel_stubs()
if grace_period:
- self._is_closed.wait(grace_period)
+ CrossSync._Sync_Impl.event_wait(
+ self._is_closed, grace_period, async_break_early=False
+ )
old_channel.close()
next_refresh = random.uniform(refresh_interval_min, refresh_interval_max)
next_sleep = max(next_refresh - (time.monotonic() - start_timestamp), 0)
def _register_instance(
- self, instance_id: str, owner: _DataApiTarget | ExecuteQueryIterator
+ self, instance_id: str, app_profile_id: Optional[str], owner_id: int
) -> None:
"""Registers an instance with the client, and warms the channel for the instance
The client will periodically refresh grpc channel used to make
@@ -314,12 +383,14 @@ def _register_instance(
Args:
instance_id: id of the instance to register.
- owner: table that owns the instance. Owners will be tracked in
+ app_profile_id: id of the app profile calling the instance.
+ owner_id: integer id of the object owning the instance. Owners will be tracked in
_instance_owners, and instances will only be unregistered when all
- owners call _remove_instance_registration"""
+ owners call _remove_instance_registration. Can be obtained by calling
+ `id` identity funcion, using `id(owner)`"""
instance_name = self._gapic_client.instance_path(self.project, instance_id)
- instance_key = _WarmedInstanceKey(instance_name, owner.app_profile_id)
- self._instance_owners.setdefault(instance_key, set()).add(id(owner))
+ instance_key = _WarmedInstanceKey(instance_name, app_profile_id)
+ self._instance_owners.setdefault(instance_key, set()).add(owner_id)
if instance_key not in self._active_instances:
self._active_instances.add(instance_key)
if self._channel_refresh_task:
@@ -328,7 +399,7 @@ def _register_instance(
self._start_background_channel_refresh()
def _remove_instance_registration(
- self, instance_id: str, owner: _DataApiTarget | ExecuteQueryIterator
+ self, instance_id: str, app_profile_id: Optional[str], owner_id: int
) -> bool:
"""Removes an instance from the client's registered instances, to prevent
warming new channels for the instance
@@ -337,16 +408,16 @@ def _remove_instance_registration(
Args:
instance_id: id of the instance to remove
- owner: table that owns the instance. Owners will be tracked in
- _instance_owners, and instances will only be unregistered when all
- owners call _remove_instance_registration
+ app_profile_id: id of the app profile calling the instance.
+ owner_id: integer id of the object owning the instance. Can be
+ obtained by the `id` identity funcion, using `id(owner)`.
Returns:
bool: True if instance was removed, else False"""
instance_name = self._gapic_client.instance_path(self.project, instance_id)
- instance_key = _WarmedInstanceKey(instance_name, owner.app_profile_id)
+ instance_key = _WarmedInstanceKey(instance_name, app_profile_id)
owner_list = self._instance_owners.get(instance_key, set())
try:
- owner_list.remove(id(owner))
+ owner_list.remove(owner_id)
if len(owner_list) == 0:
self._active_instances.remove(instance_key)
return True
@@ -461,6 +532,7 @@ def execute_query(
DeadlineExceeded,
ServiceUnavailable,
),
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
) -> "ExecuteQueryIterator":
"""Executes an SQL query on an instance.
Returns an iterator to asynchronously stream back columns from selected rows.
@@ -508,6 +580,62 @@ def execute_query(
If None, defaults to prepare_operation_timeout.
prepare_retryable_errors: a list of errors that will be retried if encountered during prepareQuery.
Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable)
+ column_info: (Optional) A dictionary mapping column names to Protobuf message classes or EnumTypeWrapper objects.
+ This dictionary provides the necessary type information for deserializing PROTO and
+ ENUM column values from the query results. When an entry is provided
+ for a PROTO or ENUM column, the client library will attempt to deserialize the raw data.
+
+ - For PROTO columns: The value in the dictionary should be the
+ Protobuf Message class (e.g., ``my_pb2.MyMessage``).
+ - For ENUM columns: The value should be the Protobuf EnumTypeWrapper
+ object (e.g., ``my_pb2.MyEnum``).
+
+ Example::
+
+ import my_pb2
+
+ column_info = {
+ "my_proto_column": my_pb2.MyMessage,
+ "my_enum_column": my_pb2.MyEnum
+ }
+
+ If ``column_info`` is not provided, or if a specific column name is not found
+ in the dictionary:
+
+ - PROTO columns will be returned as raw bytes.
+ - ENUM columns will be returned as integers.
+
+ Note for Nested PROTO or ENUM Fields:
+
+ To specify types for PROTO or ENUM fields within STRUCTs or MAPs, use a dot-separated
+ path from the top-level column name.
+
+ - For STRUCTs: ``struct_column_name.field_name``
+ - For MAPs: ``map_column_name.key`` or ``map_column_name.value`` to specify types
+ for the map keys or values, respectively.
+
+ Example::
+
+ import my_pb2
+
+ column_info = {
+ # Top-level column
+ "my_proto_column": my_pb2.MyMessage,
+ "my_enum_column": my_pb2.MyEnum,
+
+ # Nested field in a STRUCT column named 'my_struct'
+ "my_struct.nested_proto_field": my_pb2.OtherMessage,
+ "my_struct.nested_enum_field": my_pb2.AnotherEnum,
+
+ # Nested field in a MAP column named 'my_map'
+ "my_map.key": my_pb2.MapKeyEnum, # If map keys were enums
+ "my_map.value": my_pb2.MapValueMessage,
+
+ # PROTO field inside a STRUCT, where the STRUCT is the value in a MAP column
+ "struct_map.value.nested_proto_field": my_pb2.DeeplyNestedProto,
+ "struct_map.value.nested_enum_field": my_pb2.DeeplyNestedEnum
+ }
+
Returns:
ExecuteQueryIterator: an asynchronous iterator that yields rows returned by the query
Raises:
@@ -517,6 +645,7 @@ def execute_query(
google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error
google.cloud.bigtable.data.exceptions.ParameterTypeInferenceFailed: Raised if
a parameter is passed without an explicit type, and the type cannot be infered
+ google.protobuf.message.DecodeError: raised if the deserialization of a PROTO/ENUM value fails.
"""
instance_name = self._gapic_client.instance_path(self.project, instance_id)
converted_param_types = _to_param_types(parameters, parameter_types)
@@ -568,6 +697,7 @@ def execute_query(
attempt_timeout,
operation_timeout,
retryable_excs=retryable_excs,
+ column_info=column_info,
)
def __enter__(self):
@@ -603,6 +733,7 @@ def __init__(
DeadlineExceeded,
ServiceUnavailable,
Aborted,
+ Cancelled,
),
default_mutate_rows_retryable_errors: Sequence[type[Exception]] = (
DeadlineExceeded,
@@ -670,27 +801,37 @@ def __init__(
self.table_name = self.client._gapic_client.table_path(
self.client.project, instance_id, table_id
)
- self.app_profile_id = app_profile_id
- self.default_operation_timeout = default_operation_timeout
- self.default_attempt_timeout = default_attempt_timeout
- self.default_read_rows_operation_timeout = default_read_rows_operation_timeout
- self.default_read_rows_attempt_timeout = default_read_rows_attempt_timeout
- self.default_mutate_rows_operation_timeout = (
+ self.app_profile_id: str | None = app_profile_id
+ self.default_operation_timeout: float = default_operation_timeout
+ self.default_attempt_timeout: float | None = default_attempt_timeout
+ self.default_read_rows_operation_timeout: float = (
+ default_read_rows_operation_timeout
+ )
+ self.default_read_rows_attempt_timeout: float | None = (
+ default_read_rows_attempt_timeout
+ )
+ self.default_mutate_rows_operation_timeout: float = (
default_mutate_rows_operation_timeout
)
- self.default_mutate_rows_attempt_timeout = default_mutate_rows_attempt_timeout
- self.default_read_rows_retryable_errors = (
+ self.default_mutate_rows_attempt_timeout: float | None = (
+ default_mutate_rows_attempt_timeout
+ )
+ self.default_read_rows_retryable_errors: Sequence[type[Exception]] = (
default_read_rows_retryable_errors or ()
)
- self.default_mutate_rows_retryable_errors = (
+ self.default_mutate_rows_retryable_errors: Sequence[type[Exception]] = (
default_mutate_rows_retryable_errors or ()
)
- self.default_retryable_errors = default_retryable_errors or ()
+ self.default_retryable_errors: Sequence[type[Exception]] = (
+ default_retryable_errors or ()
+ )
+ self._metrics = BigtableClientSideMetricsController()
try:
self._register_instance_future = CrossSync._Sync_Impl.create_task(
self.client._register_instance,
self.instance_id,
- self,
+ self.app_profile_id,
+ id(self),
sync_executor=self.client._executor,
)
except RuntimeError as e:
@@ -1342,9 +1483,12 @@ def read_modify_write_row(
def close(self):
"""Called to close the Table instance and release any resources held by it."""
+ self._metrics.close()
if self._register_instance_future:
self._register_instance_future.cancel()
- self.client._remove_instance_registration(self.instance_id, self)
+ self.client._remove_instance_registration(
+ self.instance_id, self.app_profile_id, id(self)
+ )
def __enter__(self):
"""Implement async context manager protocol
diff --git a/google/cloud/bigtable/data/_sync_autogen/metrics_interceptor.py b/google/cloud/bigtable/data/_sync_autogen/metrics_interceptor.py
new file mode 100644
index 000000000..c5a59787c
--- /dev/null
+++ b/google/cloud/bigtable/data/_sync_autogen/metrics_interceptor.py
@@ -0,0 +1,126 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is automatically generated by CrossSync. Do not edit manually.
+
+from __future__ import annotations
+from typing import Sequence
+import time
+from functools import wraps
+from google.cloud.bigtable.data._metrics.data_model import ActiveOperationMetric
+from google.cloud.bigtable.data._metrics.data_model import OperationState
+from google.cloud.bigtable.data._metrics.data_model import OperationType
+from grpc import UnaryUnaryClientInterceptor
+from grpc import UnaryStreamClientInterceptor
+
+
+def _with_active_operation(func):
+ """Decorator for interceptor methods to extract the active operation associated with the
+ in-scope contextvars, and pass it to the decorated function."""
+
+ @wraps(func)
+ def wrapper(self, continuation, client_call_details, request):
+ operation: ActiveOperationMetric | None = ActiveOperationMetric.from_context()
+ if operation:
+ if (
+ operation.state == OperationState.CREATED
+ or operation.state == OperationState.BETWEEN_ATTEMPTS
+ ):
+ operation.start_attempt()
+ return func(self, operation, continuation, client_call_details, request)
+ else:
+ return continuation(client_call_details, request)
+
+ return wrapper
+
+
+def _get_metadata(source) -> dict[str, str | bytes] | None:
+ """Helper to extract metadata from a call or RpcError"""
+ try:
+ metadata: Sequence[tuple[str, str | bytes]]
+ metadata = source.trailing_metadata() + source.initial_metadata()
+ return {k: v for (k, v) in metadata}
+ except Exception:
+ return None
+
+
+class BigtableMetricsInterceptor(
+ UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor
+):
+ """
+ An async gRPC interceptor to add client metadata and print server metadata.
+ """
+
+ @_with_active_operation
+ def intercept_unary_unary(
+ self, operation, continuation, client_call_details, request
+ ):
+ """Interceptor for unary rpcs:
+ - MutateRow
+ - CheckAndMutateRow
+ - ReadModifyWriteRow"""
+ metadata = None
+ try:
+ call = continuation(client_call_details, request)
+ metadata = _get_metadata(call)
+ return call
+ except Exception as rpc_error:
+ metadata = _get_metadata(rpc_error)
+ raise rpc_error
+ finally:
+ if metadata is not None:
+ operation.add_response_metadata(metadata)
+
+ @_with_active_operation
+ def intercept_unary_stream(
+ self, operation, continuation, client_call_details, request
+ ):
+ """Interceptor for streaming rpcs:
+ - ReadRows
+ - MutateRows
+ - SampleRowKeys"""
+ try:
+ return self._streaming_generator_wrapper(
+ operation, continuation(client_call_details, request)
+ )
+ except Exception as rpc_error:
+ metadata = _get_metadata(rpc_error)
+ if metadata is not None:
+ operation.add_response_metadata(metadata)
+ raise rpc_error
+
+ @staticmethod
+ def _streaming_generator_wrapper(operation, call):
+ """Wrapped generator to be returned by intercept_unary_stream."""
+ has_first_response = (
+ operation.first_response_latency_ns is not None
+ or operation.op_type != OperationType.READ_ROWS
+ )
+ encountered_exc = None
+ try:
+ for response in call:
+ if not has_first_response:
+ operation.first_response_latency_ns = (
+ time.monotonic_ns() - operation.start_time_ns
+ )
+ has_first_response = True
+ yield response
+ except Exception as e:
+ encountered_exc = e
+ raise
+ finally:
+ if call is not None:
+ metadata = _get_metadata(encountered_exc or call)
+ if metadata is not None:
+ operation.add_response_metadata(metadata)
diff --git a/google/cloud/bigtable/data/exceptions.py b/google/cloud/bigtable/data/exceptions.py
index 54ca30853..b19e0e5ea 100644
--- a/google/cloud/bigtable/data/exceptions.py
+++ b/google/cloud/bigtable/data/exceptions.py
@@ -90,7 +90,7 @@ def __init__(self, message, excs):
# apply index header
if idx != 0:
message_parts.append(
- f"+---------------- {str(idx+1).rjust(2)} ----------------"
+ f"+---------------- {str(idx + 1).rjust(2)} ----------------"
)
cause = e.__cause__
# if this exception was had a cause, print the cause first
@@ -331,6 +331,9 @@ def __init__(
class InvalidExecuteQueryResponse(core_exceptions.GoogleAPICallError):
"""Exception raised to invalid query response data from back-end."""
+ # Set to internal. This is representative of an internal error.
+ code = 13
+
class ParameterTypeInferenceFailed(ValueError):
"""Exception raised when query parameter types were not provided and cannot be inferred."""
diff --git a/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py b/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py
index d3ca890b4..2beda4cd6 100644
--- a/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py
+++ b/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py
@@ -23,6 +23,8 @@
TYPE_CHECKING,
)
from google.api_core import retry as retries
+from google.protobuf.message import Message
+from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper
from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor
from google.cloud.bigtable.data._helpers import (
@@ -87,6 +89,7 @@ def __init__(
operation_timeout: float,
req_metadata: Sequence[Tuple[str, str]] = (),
retryable_excs: Sequence[type[Exception]] = (),
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
) -> None:
"""
Collects responses from ExecuteQuery requests and parses them into QueryResultRows.
@@ -107,6 +110,8 @@ def __init__(
Failed requests will be retried within the budget
req_metadata: metadata used while sending the gRPC request
retryable_excs: a list of errors that will be retried if encountered.
+ column_info: dict with mappings between column names and additional column information
+ for protobuf deserialization.
Raises:
{NO_LOOP}
:class:`ValueError ` as a safeguard if data is processed in an unexpected state
@@ -115,13 +120,14 @@ def __init__(
self._app_profile_id = app_profile_id
self._client = client
self._instance_id = instance_id
- self._prepare_metadata = prepare_metadata
- self._final_metadata = None
+ self._prepare_metadata: Metadata = prepare_metadata
+ self._final_metadata: Metadata | None = None
self._byte_cursor = _ByteCursor()
self._reader: _Reader[QueryResultRow] = _QueryResultRowReader()
self.has_received_token = False
self._result_generator = self._next_impl()
self._register_instance_task = None
+ self._fully_consumed = False
self._is_closed = False
self._request_body = request_body
self._attempt_timeout_gen = _attempt_timeout_generator(
@@ -135,11 +141,13 @@ def __init__(
exception_factory=_retry_exception_factory,
)
self._req_metadata = req_metadata
+ self._column_info = column_info
try:
self._register_instance_task = CrossSync.create_task(
self._client._register_instance,
self._instance_id,
- self,
+ self.app_profile_id,
+ id(self),
sync_executor=self._client._executor,
)
except RuntimeError as e:
@@ -187,37 +195,42 @@ async def _next_impl(self) -> CrossSync.Iterator[QueryResultRow]:
Generator wrapping the response stream which parses the stream results
and returns full `QueryResultRow`s.
"""
- async for response in self._stream:
- try:
- # we've received a resume token, so we can finalize the metadata
- if self._final_metadata is None and _has_resume_token(response):
- self._finalize_metadata()
-
- batches_to_parse = self._byte_cursor.consume(response)
- if not batches_to_parse:
- continue
- # metadata must be set at this point since there must be a resume_token
- # for byte_cursor to yield data
- if not self.metadata:
- raise ValueError(
- "Error parsing response before finalizing metadata"
+ try:
+ async for response in self._stream:
+ try:
+ # we've received a resume token, so we can finalize the metadata
+ if self._final_metadata is None and _has_resume_token(response):
+ self._finalize_metadata()
+
+ batches_to_parse = self._byte_cursor.consume(response)
+ if not batches_to_parse:
+ continue
+ # metadata must be set at this point since there must be a resume_token
+ # for byte_cursor to yield data
+ if not self.metadata:
+ raise ValueError(
+ "Error parsing response before finalizing metadata"
+ )
+ results = self._reader.consume(
+ batches_to_parse, self.metadata, self._column_info
)
- results = self._reader.consume(batches_to_parse, self.metadata)
- if results is None:
- continue
-
- except ValueError as e:
- raise InvalidExecuteQueryResponse(
- "Invalid ExecuteQuery response received"
- ) from e
-
- for result in results:
- yield result
- # this means the stream has finished with no responses. In that case we know the
- # latest_prepare_reponses was used successfully so we can finalize the metadata
- if self._final_metadata is None:
- self._finalize_metadata()
- await self.close()
+ if results is None:
+ continue
+
+ except ValueError as e:
+ raise InvalidExecuteQueryResponse(
+ "Invalid ExecuteQuery response received"
+ ) from e
+
+ for result in results:
+ yield result
+ # this means the stream has finished with no responses. In that case we know the
+ # latest_prepare_reponses was used successfully so we can finalize the metadata
+ if self._final_metadata is None:
+ self._finalize_metadata()
+ self._fully_consumed = True
+ finally:
+ self._close_internal()
@CrossSync.convert(sync_name="__next__", replace_symbols={"__anext__": "__next__"})
async def __anext__(self) -> QueryResultRow:
@@ -277,15 +290,26 @@ def metadata(self) -> Metadata:
@CrossSync.convert
async def close(self) -> None:
"""
- Cancel all background tasks. Should be called all rows were processed.
+ Cancel all background tasks. Should be called after all rows were processed.
+
+ Called automatically by iterator
:raises: :class:`ValueError ` if called in an invalid state
"""
+ # this doesn't need to be async anymore but we wrap the sync api to avoid a breaking
+ # change
+ self._close_internal()
+
+ def _close_internal(self) -> None:
if self._is_closed:
return
- if not self._byte_cursor.empty():
+ # Throw an error if the iterator has been successfully consumed but there is
+ # still buffered data
+ if self._fully_consumed and not self._byte_cursor.empty():
raise ValueError("Unexpected buffered data at end of executeQuery reqest")
self._is_closed = True
if self._register_instance_task is not None:
self._register_instance_task.cancel()
- await self._client._remove_instance_registration(self._instance_id, self)
+ self._client._remove_instance_registration(
+ self._instance_id, self.app_profile_id, id(self)
+ )
diff --git a/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py b/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py
index 4cb5db291..a43539e55 100644
--- a/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py
+++ b/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py
@@ -11,8 +11,12 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from __future__ import annotations
-from typing import Any, Callable, Dict, Type
+from typing import Any, Callable, Dict, Type, Optional, Union
+
+from google.protobuf.message import Message
+from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper
from google.cloud.bigtable.data.execute_query.values import Struct
from google.cloud.bigtable.data.execute_query.metadata import SqlType
from google.cloud.bigtable_v2 import Value as PBValue
@@ -30,24 +34,36 @@
SqlType.Struct: "array_value",
SqlType.Array: "array_value",
SqlType.Map: "array_value",
+ SqlType.Proto: "bytes_value",
+ SqlType.Enum: "int_value",
}
-def _parse_array_type(value: PBValue, metadata_type: SqlType.Array) -> Any:
+def _parse_array_type(
+ value: PBValue,
+ metadata_type: SqlType.Array,
+ column_name: str | None,
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
+) -> list[Any]:
"""
used for parsing an array represented as a protobuf to a python list.
"""
return list(
map(
lambda val: _parse_pb_value_to_python_value(
- val, metadata_type.element_type
+ val, metadata_type.element_type, column_name, column_info
),
value.array_value.values,
)
)
-def _parse_map_type(value: PBValue, metadata_type: SqlType.Map) -> Any:
+def _parse_map_type(
+ value: PBValue,
+ metadata_type: SqlType.Map,
+ column_name: str | None,
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
+) -> dict[Any, Any]:
"""
used for parsing a map represented as a protobuf to a python dict.
@@ -64,10 +80,16 @@ def _parse_map_type(value: PBValue, metadata_type: SqlType.Map) -> Any:
map(
lambda map_entry: (
_parse_pb_value_to_python_value(
- map_entry.array_value.values[0], metadata_type.key_type
+ map_entry.array_value.values[0],
+ metadata_type.key_type,
+ f"{column_name}.key" if column_name is not None else None,
+ column_info,
),
_parse_pb_value_to_python_value(
- map_entry.array_value.values[1], metadata_type.value_type
+ map_entry.array_value.values[1],
+ metadata_type.value_type,
+ f"{column_name}.value" if column_name is not None else None,
+ column_info,
),
),
value.array_value.values,
@@ -77,7 +99,12 @@ def _parse_map_type(value: PBValue, metadata_type: SqlType.Map) -> Any:
raise ValueError("Invalid map entry - less or more than two values.")
-def _parse_struct_type(value: PBValue, metadata_type: SqlType.Struct) -> Struct:
+def _parse_struct_type(
+ value: PBValue,
+ metadata_type: SqlType.Struct,
+ column_name: str | None,
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
+) -> Struct:
"""
used for parsing a struct represented as a protobuf to a
google.cloud.bigtable.data.execute_query.Struct
@@ -88,13 +115,27 @@ def _parse_struct_type(value: PBValue, metadata_type: SqlType.Struct) -> Struct:
struct = Struct()
for value, field in zip(value.array_value.values, metadata_type.fields):
field_name, field_type = field
- struct.add_field(field_name, _parse_pb_value_to_python_value(value, field_type))
+ nested_column_name: str | None
+ if column_name and field_name:
+ # qualify the column name for nested lookups
+ nested_column_name = f"{column_name}.{field_name}"
+ else:
+ nested_column_name = None
+ struct.add_field(
+ field_name,
+ _parse_pb_value_to_python_value(
+ value, field_type, nested_column_name, column_info
+ ),
+ )
return struct
def _parse_timestamp_type(
- value: PBValue, metadata_type: SqlType.Timestamp
+ value: PBValue,
+ metadata_type: SqlType.Timestamp,
+ column_name: str | None,
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
) -> DatetimeWithNanoseconds:
"""
used for parsing a timestamp represented as a protobuf to DatetimeWithNanoseconds
@@ -102,15 +143,105 @@ def _parse_timestamp_type(
return DatetimeWithNanoseconds.from_timestamp_pb(value.timestamp_value)
-_TYPE_PARSERS: Dict[Type[SqlType.Type], Callable[[PBValue, Any], Any]] = {
+def _parse_proto_type(
+ value: PBValue,
+ metadata_type: SqlType.Proto,
+ column_name: str | None,
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
+) -> Message | bytes:
+ """
+ Parses a serialized protobuf message into a Message object using type information
+ provided in column_info.
+
+ Args:
+ value: The value to parse, expected to have a bytes_value attribute.
+ metadata_type: The expected SQL type (Proto).
+ column_name: The name of the column.
+ column_info: (Optional) A dictionary mapping column names to their
+ corresponding Protobuf Message classes. This information is used
+ to deserialize the raw bytes.
+
+ Returns:
+ A deserialized Protobuf Message object if parsing is successful.
+ If the required type information is not found in column_info, the function
+ returns the original serialized data as bytes (value.bytes_value).
+ This fallback ensures that the raw data is still accessible.
+
+ Raises:
+ google.protobuf.message.DecodeError: If `value.bytes_value` cannot be
+ parsed as the Message type specified in `column_info`.
+ """
+ if (
+ column_name is not None
+ and column_info is not None
+ and column_info.get(column_name) is not None
+ ):
+ default_proto_message = column_info.get(column_name)
+ if isinstance(default_proto_message, Message):
+ proto_message = type(default_proto_message)()
+ proto_message.ParseFromString(value.bytes_value)
+ return proto_message
+ return value.bytes_value
+
+
+def _parse_enum_type(
+ value: PBValue,
+ metadata_type: SqlType.Enum,
+ column_name: str | None,
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
+) -> int | str:
+ """
+ Parses an integer value into a Protobuf enum name string using type information
+ provided in column_info.
+
+ Args:
+ value: The value to parse, expected to have an int_value attribute.
+ metadata_type: The expected SQL type (Enum).
+ column_name: The name of the column.
+ column_info: (Optional) A dictionary mapping column names to their
+ corresponding Protobuf EnumTypeWrapper objects. This information
+ is used to convert the integer to an enum name.
+
+ Returns:
+ A string representing the name of the enum value if conversion is successful.
+ If conversion fails for any reason, such as the required EnumTypeWrapper
+ not being found in column_info, or if an error occurs during the name lookup
+ (e.g., the integer is not a valid enum value), the function returns the
+ original integer value (value.int_value). This fallback ensures the
+ raw integer representation is still accessible.
+ """
+ if (
+ column_name is not None
+ and column_info is not None
+ and column_info.get(column_name) is not None
+ ):
+ proto_enum = column_info.get(column_name)
+ if isinstance(proto_enum, EnumTypeWrapper):
+ return proto_enum.Name(value.int_value)
+ return value.int_value
+
+
+ParserCallable = Callable[
+ [PBValue, Any, Optional[str], Optional[Dict[str, Union[Message, EnumTypeWrapper]]]],
+ Any,
+]
+
+_TYPE_PARSERS: Dict[Type[SqlType.Type], ParserCallable] = {
SqlType.Timestamp: _parse_timestamp_type,
SqlType.Struct: _parse_struct_type,
SqlType.Array: _parse_array_type,
SqlType.Map: _parse_map_type,
+ SqlType.Proto: _parse_proto_type,
+ SqlType.Enum: _parse_enum_type,
}
-def _parse_pb_value_to_python_value(value: PBValue, metadata_type: SqlType.Type) -> Any:
+def _parse_pb_value_to_python_value(
+ value: PBValue,
+ metadata_type: SqlType.Type,
+ column_name: str | None,
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
+) -> Any:
"""
used for converting the value represented as a protobufs to a python object.
"""
@@ -126,7 +257,7 @@ def _parse_pb_value_to_python_value(value: PBValue, metadata_type: SqlType.Type)
if kind in _TYPE_PARSERS:
parser = _TYPE_PARSERS[kind]
- return parser(value, metadata_type)
+ return parser(value, metadata_type, column_name, column_info)
elif kind in _REQUIRED_PROTO_FIELDS:
field_name = _REQUIRED_PROTO_FIELDS[kind]
return getattr(value, field_name)
diff --git a/google/cloud/bigtable/data/execute_query/_reader.py b/google/cloud/bigtable/data/execute_query/_reader.py
index d9507fe35..467c2030f 100644
--- a/google/cloud/bigtable/data/execute_query/_reader.py
+++ b/google/cloud/bigtable/data/execute_query/_reader.py
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from __future__ import annotations
from typing import (
List,
@@ -21,6 +22,8 @@
Sequence,
)
from abc import ABC, abstractmethod
+from google.protobuf.message import Message
+from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper
from google.cloud.bigtable_v2 import ProtoRows, Value as PBValue
@@ -54,7 +57,10 @@ class _Reader(ABC, Generic[T]):
@abstractmethod
def consume(
- self, batches_to_consume: List[bytes], metadata: Metadata
+ self,
+ batches_to_consume: List[bytes],
+ metadata: Metadata,
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
) -> Optional[Iterable[T]]:
"""This method receives a list of batches of bytes to be parsed as ProtoRows messages.
It then uses the metadata to group the values in the parsed messages into rows. Returns
@@ -64,6 +70,8 @@ def consume(
:meth:`google.cloud.bigtable.byte_cursor._ByteCursor.consume`
method.
metadata: metadata used to transform values to rows
+ column_info: (Optional) dict with mappings between column names and additional column information
+ for protobuf deserialization.
Returns:
Iterable[T] or None: Iterable if gathered values can form one or more instances of T,
@@ -89,7 +97,10 @@ def _parse_proto_rows(self, bytes_to_parse: bytes) -> Iterable[PBValue]:
return proto_rows.values
def _construct_query_result_row(
- self, values: Sequence[PBValue], metadata: Metadata
+ self,
+ values: Sequence[PBValue],
+ metadata: Metadata,
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
) -> QueryResultRow:
result = QueryResultRow()
columns = metadata.columns
@@ -99,12 +110,17 @@ def _construct_query_result_row(
), "This function should be called only when count of values matches count of columns."
for column, value in zip(columns, values):
- parsed_value = _parse_pb_value_to_python_value(value, column.column_type)
+ parsed_value = _parse_pb_value_to_python_value(
+ value, column.column_type, column.column_name, column_info
+ )
result.add_field(column.column_name, parsed_value)
return result
def consume(
- self, batches_to_consume: List[bytes], metadata: Metadata
+ self,
+ batches_to_consume: List[bytes],
+ metadata: Metadata,
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
) -> Optional[Iterable[QueryResultRow]]:
num_columns = len(metadata.columns)
rows = []
@@ -112,7 +128,11 @@ def consume(
values = self._parse_proto_rows(batch_bytes)
for row_data in batched(values, n=num_columns):
if len(row_data) == num_columns:
- rows.append(self._construct_query_result_row(row_data, metadata))
+ rows.append(
+ self._construct_query_result_row(
+ row_data, metadata, column_info
+ )
+ )
else:
raise ValueError(
"Unexpected error, recieved bad number of values. "
diff --git a/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py b/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py
index 9c2d1c6d8..68594d0e8 100644
--- a/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py
+++ b/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py
@@ -18,6 +18,8 @@
from __future__ import annotations
from typing import Any, Dict, Optional, Sequence, Tuple, TYPE_CHECKING
from google.api_core import retry as retries
+from google.protobuf.message import Message
+from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper
from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor
from google.cloud.bigtable.data._helpers import (
_attempt_timeout_generator,
@@ -63,6 +65,7 @@ def __init__(
operation_timeout: float,
req_metadata: Sequence[Tuple[str, str]] = (),
retryable_excs: Sequence[type[Exception]] = (),
+ column_info: dict[str, Message | EnumTypeWrapper] | None = None,
) -> None:
"""Collects responses from ExecuteQuery requests and parses them into QueryResultRows.
@@ -82,6 +85,8 @@ def __init__(
Failed requests will be retried within the budget
req_metadata: metadata used while sending the gRPC request
retryable_excs: a list of errors that will be retried if encountered.
+ column_info: dict with mappings between column names and additional column information
+ for protobuf deserialization.
Raises:
None
:class:`ValueError ` as a safeguard if data is processed in an unexpected state
@@ -90,13 +95,14 @@ def __init__(
self._app_profile_id = app_profile_id
self._client = client
self._instance_id = instance_id
- self._prepare_metadata = prepare_metadata
- self._final_metadata = None
+ self._prepare_metadata: Metadata = prepare_metadata
+ self._final_metadata: Metadata | None = None
self._byte_cursor = _ByteCursor()
self._reader: _Reader[QueryResultRow] = _QueryResultRowReader()
self.has_received_token = False
self._result_generator = self._next_impl()
self._register_instance_task = None
+ self._fully_consumed = False
self._is_closed = False
self._request_body = request_body
self._attempt_timeout_gen = _attempt_timeout_generator(
@@ -110,11 +116,13 @@ def __init__(
exception_factory=_retry_exception_factory,
)
self._req_metadata = req_metadata
+ self._column_info = column_info
try:
self._register_instance_task = CrossSync._Sync_Impl.create_task(
self._client._register_instance,
self._instance_id,
- self,
+ self.app_profile_id,
+ id(self),
sync_executor=self._client._executor,
)
except RuntimeError as e:
@@ -153,29 +161,34 @@ def _make_request_with_resume_token(self):
def _next_impl(self) -> CrossSync._Sync_Impl.Iterator[QueryResultRow]:
"""Generator wrapping the response stream which parses the stream results
and returns full `QueryResultRow`s."""
- for response in self._stream:
- try:
- if self._final_metadata is None and _has_resume_token(response):
- self._finalize_metadata()
- batches_to_parse = self._byte_cursor.consume(response)
- if not batches_to_parse:
- continue
- if not self.metadata:
- raise ValueError(
- "Error parsing response before finalizing metadata"
+ try:
+ for response in self._stream:
+ try:
+ if self._final_metadata is None and _has_resume_token(response):
+ self._finalize_metadata()
+ batches_to_parse = self._byte_cursor.consume(response)
+ if not batches_to_parse:
+ continue
+ if not self.metadata:
+ raise ValueError(
+ "Error parsing response before finalizing metadata"
+ )
+ results = self._reader.consume(
+ batches_to_parse, self.metadata, self._column_info
)
- results = self._reader.consume(batches_to_parse, self.metadata)
- if results is None:
- continue
- except ValueError as e:
- raise InvalidExecuteQueryResponse(
- "Invalid ExecuteQuery response received"
- ) from e
- for result in results:
- yield result
- if self._final_metadata is None:
- self._finalize_metadata()
- self.close()
+ if results is None:
+ continue
+ except ValueError as e:
+ raise InvalidExecuteQueryResponse(
+ "Invalid ExecuteQuery response received"
+ ) from e
+ for result in results:
+ yield result
+ if self._final_metadata is None:
+ self._finalize_metadata()
+ self._fully_consumed = True
+ finally:
+ self._close_internal()
def __next__(self) -> QueryResultRow:
"""Yields QueryResultRows representing the results of the query.
@@ -225,15 +238,22 @@ def metadata(self) -> Metadata:
return self._final_metadata
def close(self) -> None:
- """Cancel all background tasks. Should be called all rows were processed.
+ """Cancel all background tasks. Should be called after all rows were processed.
+
+ Called automatically by iterator
:raises: :class:`ValueError ` if called in an invalid state
"""
+ self._close_internal()
+
+ def _close_internal(self) -> None:
if self._is_closed:
return
- if not self._byte_cursor.empty():
+ if self._fully_consumed and (not self._byte_cursor.empty()):
raise ValueError("Unexpected buffered data at end of executeQuery reqest")
self._is_closed = True
if self._register_instance_task is not None:
self._register_instance_task.cancel()
- self._client._remove_instance_registration(self._instance_id, self)
+ self._client._remove_instance_registration(
+ self._instance_id, self.app_profile_id, id(self)
+ )
diff --git a/google/cloud/bigtable/data/execute_query/metadata.py b/google/cloud/bigtable/data/execute_query/metadata.py
index 2fd66947d..74b6cb836 100644
--- a/google/cloud/bigtable/data/execute_query/metadata.py
+++ b/google/cloud/bigtable/data/execute_query/metadata.py
@@ -296,6 +296,28 @@ def _to_value_pb_dict(self, value: Any) -> Dict[str, Any]:
)
}
+ class Proto(Type):
+ """Proto SQL type."""
+
+ type_field_name = "proto_type"
+
+ def _to_value_pb_dict(self, value: Any):
+ raise NotImplementedError("Proto is not supported as a query parameter")
+
+ def _to_type_pb_dict(self) -> Dict[str, Any]:
+ raise NotImplementedError("Proto is not supported as a query parameter")
+
+ class Enum(Type):
+ """Enum SQL type."""
+
+ type_field_name = "enum_type"
+
+ def _to_value_pb_dict(self, value: Any):
+ raise NotImplementedError("Enum is not supported as a query parameter")
+
+ def _to_type_pb_dict(self) -> Dict[str, Any]:
+ raise NotImplementedError("Enum is not supported as a query parameter")
+
class Metadata:
"""
@@ -388,6 +410,8 @@ def _pb_metadata_to_metadata_types(
"bool_type": SqlType.Bool,
"timestamp_type": SqlType.Timestamp,
"date_type": SqlType.Date,
+ "proto_type": SqlType.Proto,
+ "enum_type": SqlType.Enum,
"struct_type": SqlType.Struct,
"array_type": SqlType.Array,
"map_type": SqlType.Map,
diff --git a/google/cloud/bigtable/data/mutations.py b/google/cloud/bigtable/data/mutations.py
index 2f4e441ed..f19b1e49e 100644
--- a/google/cloud/bigtable/data/mutations.py
+++ b/google/cloud/bigtable/data/mutations.py
@@ -123,6 +123,14 @@ def _from_dict(cls, input_dict: dict[str, Any]) -> Mutation:
instance = DeleteAllFromFamily(details["family_name"])
elif "delete_from_row" in input_dict:
instance = DeleteAllFromRow()
+ elif "add_to_cell" in input_dict:
+ details = input_dict["add_to_cell"]
+ instance = AddToCell(
+ details["family_name"],
+ details["column_qualifier"]["raw_value"],
+ details["input"]["int_value"],
+ details["timestamp"]["raw_timestamp_micros"],
+ )
except KeyError as e:
raise ValueError("Invalid mutation dictionary") from e
if instance is None:
@@ -276,6 +284,75 @@ def _to_dict(self) -> dict[str, Any]:
}
+@dataclass
+class AddToCell(Mutation):
+ """
+ Adds an int64 value to an aggregate cell. The column family must be an
+ aggregate family and have an "int64" input type or this mutation will be
+ rejected.
+
+ Note: The timestamp values are in microseconds but must match the
+ granularity of the table (defaults to `MILLIS`). Therefore, the given value
+ must be a multiple of 1000 (millisecond granularity). For example:
+ `1571902339435000`.
+
+ Args:
+ family: The name of the column family to which the cell belongs.
+ qualifier: The column qualifier of the cell.
+ value: The value to be accumulated into the cell.
+ timestamp_micros: The timestamp of the cell. Must be provided for
+ cell aggregation to work correctly.
+
+
+ Raises:
+ TypeError: If `qualifier` is not `bytes` or `str`.
+ TypeError: If `value` is not `int`.
+ TypeError: If `timestamp_micros` is not `int`.
+ ValueError: If `value` is out of bounds for a 64-bit signed int.
+ ValueError: If `timestamp_micros` is less than 0.
+ """
+
+ def __init__(
+ self,
+ family: str,
+ qualifier: bytes | str,
+ value: int,
+ timestamp_micros: int,
+ ):
+ qualifier = qualifier.encode() if isinstance(qualifier, str) else qualifier
+ if not isinstance(qualifier, bytes):
+ raise TypeError("qualifier must be bytes or str")
+ if not isinstance(value, int):
+ raise TypeError("value must be int")
+ if not isinstance(timestamp_micros, int):
+ raise TypeError("timestamp_micros must be int")
+ if abs(value) > _MAX_INCREMENT_VALUE:
+ raise ValueError(
+ "int values must be between -2**63 and 2**63 (64-bit signed int)"
+ )
+
+ if timestamp_micros < 0:
+ raise ValueError("timestamp must be non-negative")
+
+ self.family = family
+ self.qualifier = qualifier
+ self.value = value
+ self.timestamp = timestamp_micros
+
+ def _to_dict(self) -> dict[str, Any]:
+ return {
+ "add_to_cell": {
+ "family_name": self.family,
+ "column_qualifier": {"raw_value": self.qualifier},
+ "timestamp": {"raw_timestamp_micros": self.timestamp},
+ "input": {"int_value": self.value},
+ }
+ }
+
+ def is_idempotent(self) -> bool:
+ return False
+
+
class RowMutationEntry:
"""
A single entry in a `MutateRows` request.
diff --git a/google/cloud/bigtable/data/row.py b/google/cloud/bigtable/data/row.py
index a5575b83a..50e65a958 100644
--- a/google/cloud/bigtable/data/row.py
+++ b/google/cloud/bigtable/data/row.py
@@ -190,7 +190,7 @@ def __str__(self) -> str:
elif len(cell_list) == 1:
line.append(f"[{cell_list[0]}],")
else:
- line.append(f"[{cell_list[0]}, (+{len(cell_list)-1} more)],")
+ line.append(f"[{cell_list[0]}, (+{len(cell_list) - 1} more)],")
output.append("".join(line))
output.append("}")
return "\n".join(output)
diff --git a/google/cloud/bigtable/gapic_version.py b/google/cloud/bigtable/gapic_version.py
index 8ab09c42e..a105a8349 100644
--- a/google/cloud/bigtable/gapic_version.py
+++ b/google/cloud/bigtable/gapic_version.py
@@ -13,4 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-__version__ = "2.31.0" # {x-release-please-version}
+__version__ = "2.35.0" # {x-release-please-version}
diff --git a/google/cloud/bigtable/table.py b/google/cloud/bigtable/table.py
index 7429bd36f..0009f287e 100644
--- a/google/cloud/bigtable/table.py
+++ b/google/cloud/bigtable/table.py
@@ -47,7 +47,7 @@
from google.cloud.bigtable.row_set import RowRange
from google.cloud.bigtable import enums
from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2
-from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient
+from google.cloud.bigtable_admin_v2 import BaseBigtableTableAdminClient
from google.cloud.bigtable_admin_v2.types import table as admin_messages_v2_pb2
from google.cloud.bigtable_admin_v2.types import (
bigtable_table_admin as table_admin_messages_v2_pb2,
@@ -990,7 +990,7 @@ def list_backups(self, cluster_id=None, filter_=None, order_by=None, page_size=0
if filter_:
backups_filter = "({}) AND ({})".format(backups_filter, filter_)
- parent = BigtableTableAdminClient.cluster_path(
+ parent = BaseBigtableTableAdminClient.cluster_path(
project=self._instance._client.project,
instance=self._instance.instance_id,
cluster=cluster_id,
@@ -1037,7 +1037,7 @@ def restore(self, new_table_id, cluster_id=None, backup_id=None, backup_name=Non
and `backup_id` parameters even of such specified.
:return: An instance of
- :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture`.
+ :class:`~google.api_core.operation.Operation`.
:raises: google.api_core.exceptions.AlreadyExists: If the table
already exists.
@@ -1049,13 +1049,13 @@ def restore(self, new_table_id, cluster_id=None, backup_id=None, backup_name=Non
"""
api = self._instance._client.table_admin_client
if not backup_name:
- backup_name = BigtableTableAdminClient.backup_path(
+ backup_name = BaseBigtableTableAdminClient.backup_path(
project=self._instance._client.project,
instance=self._instance.instance_id,
cluster=cluster_id,
backup=backup_id,
)
- return api.restore_table(
+ return api._restore_table(
request={
"parent": self._instance.name,
"table_id": new_table_id,
diff --git a/google/cloud/bigtable_admin/__init__.py b/google/cloud/bigtable_admin/__init__.py
index c8f2a4482..2d95b06c8 100644
--- a/google/cloud/bigtable_admin/__init__.py
+++ b/google/cloud/bigtable_admin/__init__.py
@@ -25,10 +25,10 @@
BigtableInstanceAdminAsyncClient,
)
from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.client import (
- BigtableTableAdminClient,
+ BaseBigtableTableAdminClient,
)
from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.async_client import (
- BigtableTableAdminAsyncClient,
+ BaseBigtableTableAdminAsyncClient,
)
from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import (
@@ -177,6 +177,12 @@
from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
CreateBackupRequest,
)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ CreateSchemaBundleMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ CreateSchemaBundleRequest,
+)
from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
CreateTableFromSnapshotMetadata,
)
@@ -193,6 +199,9 @@
from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
DeleteBackupRequest,
)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ DeleteSchemaBundleRequest,
+)
from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
DeleteSnapshotRequest,
)
@@ -210,6 +219,9 @@
GetAuthorizedViewRequest,
)
from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetBackupRequest
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ GetSchemaBundleRequest,
+)
from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetSnapshotRequest
from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetTableRequest
from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
@@ -222,6 +234,12 @@
from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
ListBackupsResponse,
)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ ListSchemaBundlesRequest,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ ListSchemaBundlesResponse,
+)
from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
ListSnapshotsRequest,
)
@@ -266,6 +284,12 @@
from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
UpdateBackupRequest,
)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ UpdateSchemaBundleMetadata,
+)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
+ UpdateSchemaBundleRequest,
+)
from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
UpdateTableMetadata,
)
@@ -287,17 +311,21 @@
from google.cloud.bigtable_admin_v2.types.table import ColumnFamily
from google.cloud.bigtable_admin_v2.types.table import EncryptionInfo
from google.cloud.bigtable_admin_v2.types.table import GcRule
+from google.cloud.bigtable_admin_v2.types.table import ProtoSchema
from google.cloud.bigtable_admin_v2.types.table import RestoreInfo
+from google.cloud.bigtable_admin_v2.types.table import SchemaBundle
from google.cloud.bigtable_admin_v2.types.table import Snapshot
from google.cloud.bigtable_admin_v2.types.table import Table
+from google.cloud.bigtable_admin_v2.types.table import TieredStorageConfig
+from google.cloud.bigtable_admin_v2.types.table import TieredStorageRule
from google.cloud.bigtable_admin_v2.types.table import RestoreSourceType
from google.cloud.bigtable_admin_v2.types.types import Type
__all__ = (
"BigtableInstanceAdminClient",
"BigtableInstanceAdminAsyncClient",
- "BigtableTableAdminClient",
- "BigtableTableAdminAsyncClient",
+ "BaseBigtableTableAdminClient",
+ "BaseBigtableTableAdminAsyncClient",
"CreateAppProfileRequest",
"CreateClusterMetadata",
"CreateClusterRequest",
@@ -348,12 +376,15 @@
"CreateAuthorizedViewRequest",
"CreateBackupMetadata",
"CreateBackupRequest",
+ "CreateSchemaBundleMetadata",
+ "CreateSchemaBundleRequest",
"CreateTableFromSnapshotMetadata",
"CreateTableFromSnapshotRequest",
"CreateTableRequest",
"DataBoostReadLocalWrites",
"DeleteAuthorizedViewRequest",
"DeleteBackupRequest",
+ "DeleteSchemaBundleRequest",
"DeleteSnapshotRequest",
"DeleteTableRequest",
"DropRowRangeRequest",
@@ -361,12 +392,15 @@
"GenerateConsistencyTokenResponse",
"GetAuthorizedViewRequest",
"GetBackupRequest",
+ "GetSchemaBundleRequest",
"GetSnapshotRequest",
"GetTableRequest",
"ListAuthorizedViewsRequest",
"ListAuthorizedViewsResponse",
"ListBackupsRequest",
"ListBackupsResponse",
+ "ListSchemaBundlesRequest",
+ "ListSchemaBundlesResponse",
"ListSnapshotsRequest",
"ListSnapshotsResponse",
"ListTablesRequest",
@@ -383,6 +417,8 @@
"UpdateAuthorizedViewMetadata",
"UpdateAuthorizedViewRequest",
"UpdateBackupRequest",
+ "UpdateSchemaBundleMetadata",
+ "UpdateSchemaBundleRequest",
"UpdateTableMetadata",
"UpdateTableRequest",
"OperationProgress",
@@ -402,9 +438,18 @@
"ColumnFamily",
"EncryptionInfo",
"GcRule",
+ "ProtoSchema",
"RestoreInfo",
+ "SchemaBundle",
"Snapshot",
"Table",
+ "TieredStorageConfig",
+ "TieredStorageRule",
"RestoreSourceType",
"Type",
)
+
+import google.cloud.bigtable_admin_v2.overlay # noqa: F401
+from google.cloud.bigtable_admin_v2.overlay import * # noqa: F401, F403
+
+__all__ += google.cloud.bigtable_admin_v2.overlay.__all__
diff --git a/google/cloud/bigtable_admin/gapic_version.py b/google/cloud/bigtable_admin/gapic_version.py
index 8ab09c42e..6d72a226d 100644
--- a/google/cloud/bigtable_admin/gapic_version.py
+++ b/google/cloud/bigtable_admin/gapic_version.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,4 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-__version__ = "2.31.0" # {x-release-please-version}
+__version__ = "2.35.0" # {x-release-please-version}
diff --git a/google/cloud/bigtable_admin_v2/__init__.py b/google/cloud/bigtable_admin_v2/__init__.py
index 4ee0cc6b1..6a47979fd 100644
--- a/google/cloud/bigtable_admin_v2/__init__.py
+++ b/google/cloud/bigtable_admin_v2/__init__.py
@@ -15,13 +15,23 @@
#
from google.cloud.bigtable_admin_v2 import gapic_version as package_version
+import google.api_core as api_core
+import sys
+
__version__ = package_version.__version__
+if sys.version_info >= (3, 8): # pragma: NO COVER
+ from importlib import metadata
+else: # pragma: NO COVER
+ # TODO(https://github.com/googleapis/python-api-core/issues/835): Remove
+ # this code path once we drop support for Python 3.7
+ import importlib_metadata as metadata
+
from .services.bigtable_instance_admin import BigtableInstanceAdminClient
from .services.bigtable_instance_admin import BigtableInstanceAdminAsyncClient
-from .services.bigtable_table_admin import BigtableTableAdminClient
-from .services.bigtable_table_admin import BigtableTableAdminAsyncClient
+from .services.bigtable_table_admin import BaseBigtableTableAdminClient
+from .services.bigtable_table_admin import BaseBigtableTableAdminAsyncClient
from .types.bigtable_instance_admin import CreateAppProfileRequest
from .types.bigtable_instance_admin import CreateClusterMetadata
@@ -73,12 +83,15 @@
from .types.bigtable_table_admin import CreateAuthorizedViewRequest
from .types.bigtable_table_admin import CreateBackupMetadata
from .types.bigtable_table_admin import CreateBackupRequest
+from .types.bigtable_table_admin import CreateSchemaBundleMetadata
+from .types.bigtable_table_admin import CreateSchemaBundleRequest
from .types.bigtable_table_admin import CreateTableFromSnapshotMetadata
from .types.bigtable_table_admin import CreateTableFromSnapshotRequest
from .types.bigtable_table_admin import CreateTableRequest
from .types.bigtable_table_admin import DataBoostReadLocalWrites
from .types.bigtable_table_admin import DeleteAuthorizedViewRequest
from .types.bigtable_table_admin import DeleteBackupRequest
+from .types.bigtable_table_admin import DeleteSchemaBundleRequest
from .types.bigtable_table_admin import DeleteSnapshotRequest
from .types.bigtable_table_admin import DeleteTableRequest
from .types.bigtable_table_admin import DropRowRangeRequest
@@ -86,12 +99,15 @@
from .types.bigtable_table_admin import GenerateConsistencyTokenResponse
from .types.bigtable_table_admin import GetAuthorizedViewRequest
from .types.bigtable_table_admin import GetBackupRequest
+from .types.bigtable_table_admin import GetSchemaBundleRequest
from .types.bigtable_table_admin import GetSnapshotRequest
from .types.bigtable_table_admin import GetTableRequest
from .types.bigtable_table_admin import ListAuthorizedViewsRequest
from .types.bigtable_table_admin import ListAuthorizedViewsResponse
from .types.bigtable_table_admin import ListBackupsRequest
from .types.bigtable_table_admin import ListBackupsResponse
+from .types.bigtable_table_admin import ListSchemaBundlesRequest
+from .types.bigtable_table_admin import ListSchemaBundlesResponse
from .types.bigtable_table_admin import ListSnapshotsRequest
from .types.bigtable_table_admin import ListSnapshotsResponse
from .types.bigtable_table_admin import ListTablesRequest
@@ -108,6 +124,8 @@
from .types.bigtable_table_admin import UpdateAuthorizedViewMetadata
from .types.bigtable_table_admin import UpdateAuthorizedViewRequest
from .types.bigtable_table_admin import UpdateBackupRequest
+from .types.bigtable_table_admin import UpdateSchemaBundleMetadata
+from .types.bigtable_table_admin import UpdateSchemaBundleRequest
from .types.bigtable_table_admin import UpdateTableMetadata
from .types.bigtable_table_admin import UpdateTableRequest
from .types.common import OperationProgress
@@ -127,23 +145,121 @@
from .types.table import ColumnFamily
from .types.table import EncryptionInfo
from .types.table import GcRule
+from .types.table import ProtoSchema
from .types.table import RestoreInfo
+from .types.table import SchemaBundle
from .types.table import Snapshot
from .types.table import Table
+from .types.table import TieredStorageConfig
+from .types.table import TieredStorageRule
from .types.table import RestoreSourceType
from .types.types import Type
+if hasattr(api_core, "check_python_version") and hasattr(
+ api_core, "check_dependency_versions"
+): # pragma: NO COVER
+ api_core.check_python_version("google.cloud.bigtable_admin_v2") # type: ignore
+ api_core.check_dependency_versions("google.cloud.bigtable_admin_v2") # type: ignore
+else: # pragma: NO COVER
+ # An older version of api_core is installed which does not define the
+ # functions above. We do equivalent checks manually.
+ try:
+ import warnings
+ import sys
+
+ _py_version_str = sys.version.split()[0]
+ _package_label = "google.cloud.bigtable_admin_v2"
+ if sys.version_info < (3, 9):
+ warnings.warn(
+ "You are using a non-supported Python version "
+ + f"({_py_version_str}). Google will not post any further "
+ + f"updates to {_package_label} supporting this Python version. "
+ + "Please upgrade to the latest Python version, or at "
+ + f"least to Python 3.9, and then update {_package_label}.",
+ FutureWarning,
+ )
+ if sys.version_info[:2] == (3, 9):
+ warnings.warn(
+ f"You are using a Python version ({_py_version_str}) "
+ + f"which Google will stop supporting in {_package_label} in "
+ + "January 2026. Please "
+ + "upgrade to the latest Python version, or at "
+ + "least to Python 3.10, before then, and "
+ + f"then update {_package_label}.",
+ FutureWarning,
+ )
+
+ def parse_version_to_tuple(version_string: str):
+ """Safely converts a semantic version string to a comparable tuple of integers.
+ Example: "4.25.8" -> (4, 25, 8)
+ Ignores non-numeric parts and handles common version formats.
+ Args:
+ version_string: Version string in the format "x.y.z" or "x.y.z"
+ Returns:
+ Tuple of integers for the parsed version string.
+ """
+ parts = []
+ for part in version_string.split("."):
+ try:
+ parts.append(int(part))
+ except ValueError:
+ # If it's a non-numeric part (e.g., '1.0.0b1' -> 'b1'), stop here.
+ # This is a simplification compared to 'packaging.parse_version', but sufficient
+ # for comparing strictly numeric semantic versions.
+ break
+ return tuple(parts)
+
+ def _get_version(dependency_name):
+ try:
+ version_string: str = metadata.version(dependency_name)
+ parsed_version = parse_version_to_tuple(version_string)
+ return (parsed_version, version_string)
+ except Exception:
+ # Catch exceptions from metadata.version() (e.g., PackageNotFoundError)
+ # or errors during parse_version_to_tuple
+ return (None, "--")
+
+ _dependency_package = "google.protobuf"
+ _next_supported_version = "4.25.8"
+ _next_supported_version_tuple = (4, 25, 8)
+ _recommendation = " (we recommend 6.x)"
+ (_version_used, _version_used_string) = _get_version(_dependency_package)
+ if _version_used and _version_used < _next_supported_version_tuple:
+ warnings.warn(
+ f"Package {_package_label} depends on "
+ + f"{_dependency_package}, currently installed at version "
+ + f"{_version_used_string}. Future updates to "
+ + f"{_package_label} will require {_dependency_package} at "
+ + f"version {_next_supported_version} or higher{_recommendation}."
+ + " Please ensure "
+ + "that either (a) your Python environment doesn't pin the "
+ + f"version of {_dependency_package}, so that updates to "
+ + f"{_package_label} can require the higher version, or "
+ + "(b) you manually update your Python environment to use at "
+ + f"least version {_next_supported_version} of "
+ + f"{_dependency_package}.",
+ FutureWarning,
+ )
+ except Exception:
+ warnings.warn(
+ "Could not determine the version of Python "
+ + "currently being used. To continue receiving "
+ + "updates for {_package_label}, ensure you are "
+ + "using a supported version of Python; see "
+ + "https://devguide.python.org/versions/"
+ )
+
__all__ = (
+ "BaseBigtableTableAdminAsyncClient",
"BigtableInstanceAdminAsyncClient",
- "BigtableTableAdminAsyncClient",
"AppProfile",
"AuthorizedView",
"AutoscalingLimits",
"AutoscalingTargets",
"Backup",
"BackupInfo",
+ "BaseBigtableTableAdminClient",
"BigtableInstanceAdminClient",
- "BigtableTableAdminClient",
"ChangeStreamConfig",
"CheckConsistencyRequest",
"CheckConsistencyResponse",
@@ -164,6 +280,8 @@
"CreateLogicalViewRequest",
"CreateMaterializedViewMetadata",
"CreateMaterializedViewRequest",
+ "CreateSchemaBundleMetadata",
+ "CreateSchemaBundleRequest",
"CreateTableFromSnapshotMetadata",
"CreateTableFromSnapshotRequest",
"CreateTableRequest",
@@ -175,6 +293,7 @@
"DeleteInstanceRequest",
"DeleteLogicalViewRequest",
"DeleteMaterializedViewRequest",
+ "DeleteSchemaBundleRequest",
"DeleteSnapshotRequest",
"DeleteTableRequest",
"DropRowRangeRequest",
@@ -189,6 +308,7 @@
"GetInstanceRequest",
"GetLogicalViewRequest",
"GetMaterializedViewRequest",
+ "GetSchemaBundleRequest",
"GetSnapshotRequest",
"GetTableRequest",
"HotTablet",
@@ -209,6 +329,8 @@
"ListLogicalViewsResponse",
"ListMaterializedViewsRequest",
"ListMaterializedViewsResponse",
+ "ListSchemaBundlesRequest",
+ "ListSchemaBundlesResponse",
"ListSnapshotsRequest",
"ListSnapshotsResponse",
"ListTablesRequest",
@@ -221,16 +343,20 @@
"PartialUpdateClusterMetadata",
"PartialUpdateClusterRequest",
"PartialUpdateInstanceRequest",
+ "ProtoSchema",
"RestoreInfo",
"RestoreSourceType",
"RestoreTableMetadata",
"RestoreTableRequest",
+ "SchemaBundle",
"Snapshot",
"SnapshotTableMetadata",
"SnapshotTableRequest",
"StandardReadRemoteWrites",
"StorageType",
"Table",
+ "TieredStorageConfig",
+ "TieredStorageRule",
"Type",
"UndeleteTableMetadata",
"UndeleteTableRequest",
@@ -245,6 +371,12 @@
"UpdateLogicalViewRequest",
"UpdateMaterializedViewMetadata",
"UpdateMaterializedViewRequest",
+ "UpdateSchemaBundleMetadata",
+ "UpdateSchemaBundleRequest",
"UpdateTableMetadata",
"UpdateTableRequest",
)
+
+from .overlay import * # noqa: F403
+
+__all__ += overlay.__all__ # noqa: F405
diff --git a/google/cloud/bigtable_admin_v2/gapic_metadata.json b/google/cloud/bigtable_admin_v2/gapic_metadata.json
index c56fde6e7..9725d3384 100644
--- a/google/cloud/bigtable_admin_v2/gapic_metadata.json
+++ b/google/cloud/bigtable_admin_v2/gapic_metadata.json
@@ -492,7 +492,7 @@
"BigtableTableAdmin": {
"clients": {
"grpc": {
- "libraryClient": "BigtableTableAdminClient",
+ "libraryClient": "BaseBigtableTableAdminClient",
"rpcs": {
"CheckConsistency": {
"methods": [
@@ -514,6 +514,11 @@
"create_backup"
]
},
+ "CreateSchemaBundle": {
+ "methods": [
+ "create_schema_bundle"
+ ]
+ },
"CreateTable": {
"methods": [
"create_table"
@@ -534,6 +539,11 @@
"delete_backup"
]
},
+ "DeleteSchemaBundle": {
+ "methods": [
+ "delete_schema_bundle"
+ ]
+ },
"DeleteSnapshot": {
"methods": [
"delete_snapshot"
@@ -569,6 +579,11 @@
"get_iam_policy"
]
},
+ "GetSchemaBundle": {
+ "methods": [
+ "get_schema_bundle"
+ ]
+ },
"GetSnapshot": {
"methods": [
"get_snapshot"
@@ -589,6 +604,11 @@
"list_backups"
]
},
+ "ListSchemaBundles": {
+ "methods": [
+ "list_schema_bundles"
+ ]
+ },
"ListSnapshots": {
"methods": [
"list_snapshots"
@@ -606,7 +626,7 @@
},
"RestoreTable": {
"methods": [
- "restore_table"
+ "_restore_table"
]
},
"SetIamPolicy": {
@@ -639,6 +659,11 @@
"update_backup"
]
},
+ "UpdateSchemaBundle": {
+ "methods": [
+ "update_schema_bundle"
+ ]
+ },
"UpdateTable": {
"methods": [
"update_table"
@@ -647,7 +672,7 @@
}
},
"grpc-async": {
- "libraryClient": "BigtableTableAdminAsyncClient",
+ "libraryClient": "BaseBigtableTableAdminAsyncClient",
"rpcs": {
"CheckConsistency": {
"methods": [
@@ -669,6 +694,11 @@
"create_backup"
]
},
+ "CreateSchemaBundle": {
+ "methods": [
+ "create_schema_bundle"
+ ]
+ },
"CreateTable": {
"methods": [
"create_table"
@@ -689,6 +719,11 @@
"delete_backup"
]
},
+ "DeleteSchemaBundle": {
+ "methods": [
+ "delete_schema_bundle"
+ ]
+ },
"DeleteSnapshot": {
"methods": [
"delete_snapshot"
@@ -724,6 +759,11 @@
"get_iam_policy"
]
},
+ "GetSchemaBundle": {
+ "methods": [
+ "get_schema_bundle"
+ ]
+ },
"GetSnapshot": {
"methods": [
"get_snapshot"
@@ -744,6 +784,11 @@
"list_backups"
]
},
+ "ListSchemaBundles": {
+ "methods": [
+ "list_schema_bundles"
+ ]
+ },
"ListSnapshots": {
"methods": [
"list_snapshots"
@@ -761,7 +806,7 @@
},
"RestoreTable": {
"methods": [
- "restore_table"
+ "_restore_table"
]
},
"SetIamPolicy": {
@@ -794,6 +839,11 @@
"update_backup"
]
},
+ "UpdateSchemaBundle": {
+ "methods": [
+ "update_schema_bundle"
+ ]
+ },
"UpdateTable": {
"methods": [
"update_table"
@@ -802,7 +852,7 @@
}
},
"rest": {
- "libraryClient": "BigtableTableAdminClient",
+ "libraryClient": "BaseBigtableTableAdminClient",
"rpcs": {
"CheckConsistency": {
"methods": [
@@ -824,6 +874,11 @@
"create_backup"
]
},
+ "CreateSchemaBundle": {
+ "methods": [
+ "create_schema_bundle"
+ ]
+ },
"CreateTable": {
"methods": [
"create_table"
@@ -844,6 +899,11 @@
"delete_backup"
]
},
+ "DeleteSchemaBundle": {
+ "methods": [
+ "delete_schema_bundle"
+ ]
+ },
"DeleteSnapshot": {
"methods": [
"delete_snapshot"
@@ -879,6 +939,11 @@
"get_iam_policy"
]
},
+ "GetSchemaBundle": {
+ "methods": [
+ "get_schema_bundle"
+ ]
+ },
"GetSnapshot": {
"methods": [
"get_snapshot"
@@ -899,6 +964,11 @@
"list_backups"
]
},
+ "ListSchemaBundles": {
+ "methods": [
+ "list_schema_bundles"
+ ]
+ },
"ListSnapshots": {
"methods": [
"list_snapshots"
@@ -916,7 +986,7 @@
},
"RestoreTable": {
"methods": [
- "restore_table"
+ "_restore_table"
]
},
"SetIamPolicy": {
@@ -949,6 +1019,11 @@
"update_backup"
]
},
+ "UpdateSchemaBundle": {
+ "methods": [
+ "update_schema_bundle"
+ ]
+ },
"UpdateTable": {
"methods": [
"update_table"
diff --git a/google/cloud/bigtable_admin_v2/gapic_version.py b/google/cloud/bigtable_admin_v2/gapic_version.py
index 8ab09c42e..6d72a226d 100644
--- a/google/cloud/bigtable_admin_v2/gapic_version.py
+++ b/google/cloud/bigtable_admin_v2/gapic_version.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,4 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-__version__ = "2.31.0" # {x-release-please-version}
+__version__ = "2.35.0" # {x-release-please-version}
diff --git a/google/cloud/bigtable_admin_v2/overlay/__init__.py b/google/cloud/bigtable_admin_v2/overlay/__init__.py
new file mode 100644
index 000000000..f66c7f8dd
--- /dev/null
+++ b/google/cloud/bigtable_admin_v2/overlay/__init__.py
@@ -0,0 +1,49 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This directory and all its subdirectories are the only handwritten
+# components of the otherwise autogenerated google/cloud/bigtable/admin_v2.
+# The purpose of the overlay directory is to add additional functionality to
+# the autogenerated library while preserving its developer experience. These
+# handwritten additions currently consist of the following:
+#
+# 1. TODO: Document final GcRule design choice here
+# 2. An LRO class for restore_table that exposes an Operation for
+# OptimizeRestoreTable, if that LRO exists.
+# 3. New methods (wait_for_consistency and wait_for_replication) that return
+# a polling future class for automatically polling check_consistency.
+#
+# This directory is structured to mirror that of a typical autogenerated library (e.g.
+# services/types subdirectories), and the aforementioned handwritten additions are
+# currently implemented as either types under overlay/types or in methods in an overwritten
+# client class under overlay/services.
+
+from .types import (
+ AsyncRestoreTableOperation,
+ RestoreTableOperation,
+ WaitForConsistencyRequest,
+)
+
+from .services.bigtable_table_admin import (
+ BigtableTableAdminAsyncClient,
+ BigtableTableAdminClient,
+)
+
+__all__ = (
+ "AsyncRestoreTableOperation",
+ "RestoreTableOperation",
+ "BigtableTableAdminAsyncClient",
+ "BigtableTableAdminClient",
+ "WaitForConsistencyRequest",
+)
diff --git a/.github/.OwlBot.lock.yaml b/google/cloud/bigtable_admin_v2/overlay/services/__init__.py
similarity index 72%
rename from .github/.OwlBot.lock.yaml
rename to google/cloud/bigtable_admin_v2/overlay/services/__init__.py
index c631e1f7d..ab7686e26 100644
--- a/.github/.OwlBot.lock.yaml
+++ b/google/cloud/bigtable_admin_v2/overlay/services/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2025 Google LLC
+# Copyright 2025 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,7 +11,3 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-docker:
- image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest
- digest: sha256:5581906b957284864632cde4e9c51d1cc66b0094990b27e689132fe5cd036046
-# created: 2025-03-05
diff --git a/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/__init__.py b/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/__init__.py
new file mode 100644
index 000000000..f80e3234f
--- /dev/null
+++ b/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/__init__.py
@@ -0,0 +1,23 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TODO: Add the async client after owlbot changes.
+
+from .async_client import BigtableTableAdminAsyncClient
+from .client import BigtableTableAdminClient
+
+__all__ = (
+ "BigtableTableAdminAsyncClient",
+ "BigtableTableAdminClient",
+)
diff --git a/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/async_client.py
new file mode 100644
index 000000000..ee8e5757d
--- /dev/null
+++ b/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/async_client.py
@@ -0,0 +1,375 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import functools
+
+from typing import Callable, Optional, Sequence, Tuple, Union
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+
+try:
+ OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore
+
+from google.api_core import client_options as client_options_lib
+from google.auth import credentials as ga_credentials # type: ignore
+
+from google.cloud.bigtable_admin_v2.types import bigtable_table_admin
+
+from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
+ async_client as base_client,
+)
+from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.base import (
+ BigtableTableAdminTransport,
+)
+from google.cloud.bigtable_admin_v2.overlay.types import (
+ async_consistency,
+ async_restore_table,
+ wait_for_consistency_request,
+)
+
+from google.cloud.bigtable.gapic_version import __version__ as bigtable_version
+
+
+DEFAULT_CLIENT_INFO = copy.copy(base_client.DEFAULT_CLIENT_INFO)
+DEFAULT_CLIENT_INFO.client_library_version = f"{bigtable_version}-admin-overlay-async"
+
+
+class BigtableTableAdminAsyncClient(base_client.BaseBigtableTableAdminAsyncClient):
+ def __init__(
+ self,
+ *,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ transport: Optional[
+ Union[
+ str,
+ BigtableTableAdminTransport,
+ Callable[..., BigtableTableAdminTransport],
+ ]
+ ] = "grpc_asyncio",
+ client_options: Optional[client_options_lib.ClientOptions] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiates the Bigtable table admin async client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Optional[Union[str,BigtableTableAdminTransport,Callable[..., BigtableTableAdminTransport]]]):
+ The transport to use, or a Callable that constructs and returns a new transport to use.
+ If a Callable is given, it will be called with the same set of initialization
+ arguments as used in the BigtableTableAdminTransport constructor.
+ If set to None, a transport is chosen automatically.
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
+ Custom options for the client.
+
+ 1. The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client when ``transport`` is
+ not explicitly provided. Only if this property is not set and
+ ``transport`` was not explicitly provided, the endpoint is
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
+ variable, which have one of the following values:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto-switch to the
+ default mTLS endpoint if client certificate is present; this is
+ the default value).
+
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide a client certificate for mTLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+
+ 3. The ``universe_domain`` property can be used to override the
+ default "googleapis.com" universe. Note that ``api_endpoint``
+ property still takes precedence; and ``universe_domain`` is
+ currently not supported for mTLS.
+
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+ super(BigtableTableAdminAsyncClient, self).__init__(
+ credentials=credentials,
+ transport=transport,
+ client_options=client_options,
+ client_info=client_info,
+ )
+
+ async def restore_table(
+ self,
+ request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> async_restore_table.AsyncRestoreTableOperation:
+ r"""Create a new table by restoring from a completed backup. The
+ returned table :class:`long-running operation
+ `
+ can be used to track the progress of the operation, and to cancel it. The
+ :attr:`metadata ` field type is
+ :class:`RestoreTableMetadata `.
+ The :meth:`response ` type is
+ :class:`google.cloud.bigtable_admin_v2.types.Table`, if successful.
+
+ Additionally, the returned :class:`long-running-operation `
+ provides a method, :meth:`google.cloud.bigtable_admin_v2.overlay.types.async_restore_table.AsyncRestoreTableOperation.optimize_restore_table_operation` that
+ provides access to a :class:`google.api_core.operation_async.AsyncOperation` object representing the OptimizeRestoreTable long-running-operation
+ after the current one has completed.
+
+ .. code-block:: python
+
+ # This snippet should be regarded as a code template only.
+ #
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud.bigtable import admin_v2
+
+ async def sample_restore_table():
+ # Create a client
+ client = admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = admin_v2.RestoreTableRequest(
+ backup="backup_value",
+ parent="parent_value",
+ table_id="table_id_value",
+ )
+
+ # Make the request
+ operation = await client.restore_table(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = await operation.result()
+
+ # Handle the response
+ print(response)
+
+ # Handle LRO2
+ optimize_operation = await operation.optimize_restore_table_operation()
+
+ if optimize_operation:
+ print("Waiting for table optimization to complete...")
+
+ response = await optimize_operation.result()
+
+ Args:
+ request (Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]):
+ The request object. The request for
+ [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.bigtable_admin_v2.overlay.types.async_restore_table.AsyncRestoreTableOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp.
+ Each table is served using the resources of its
+ parent cluster.
+ """
+ operation = await self._restore_table(
+ request=request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ restore_table_operation = async_restore_table.AsyncRestoreTableOperation(
+ self._client._transport.operations_client, operation
+ )
+ return restore_table_operation
+
+ async def wait_for_consistency(
+ self,
+ request: Optional[
+ Union[wait_for_consistency_request.WaitForConsistencyRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> bool:
+ r"""Blocks until the mutations for the specified Table that have been
+ made before the call have been replicated or reads using an app profile with `DataBoostIsolationReadOnly`
+ can see all writes committed before the token was created. This is done by generating
+ a consistency token for the Table, then polling :meth:`check_consistency`
+ for the specified table until the call returns True.
+
+ .. code-block:: python
+
+ # This snippet should be regarded as a code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud.bigtable import admin_v2
+
+ async def sample_wait_for_consistency():
+ # Create a client
+ client = admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = admin_v2.WaitForConsistencyRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ print("Waiting for operation to complete...")
+
+ response = await client.wait_for_replication(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.bigtable_admin_v2.overlay.types.WaitForConsistencyRequest, dict]):
+ The request object.
+ name (str):
+ Required. The unique name of the Table for which to
+ create a consistency token. Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ bool:
+ If the `standard_read_remote_writes` mode is specified in the request object, returns
+ `True` after the mutations of the specified table have been fully replicated. If the
+ `data_boost_read_local_writes` mode is specified in the request object, returns `True`
+ after reads using an app profile with `DataBoostIsolationReadOnly` can see all writes
+ committed before the token was created.
+
+ Raises:
+ google.api_core.GoogleAPICallError: If the operation errors or if
+ the timeout is reached before the operation completes.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, wait_for_consistency_request.WaitForConsistencyRequest
+ ):
+ request = wait_for_consistency_request.WaitForConsistencyRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Generate the consistency token.
+ generate_consistency_token_request = (
+ bigtable_table_admin.GenerateConsistencyTokenRequest(
+ name=request.name,
+ )
+ )
+
+ generate_consistency_response = await self.generate_consistency_token(
+ generate_consistency_token_request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Create the CheckConsistencyRequest object.
+ check_consistency_request = bigtable_table_admin.CheckConsistencyRequest(
+ name=request.name,
+ consistency_token=generate_consistency_response.consistency_token,
+ )
+
+ # Since the default values of StandardReadRemoteWrites and DataBoostReadLocalWrites evaluate to
+ # False in proto plus, we cannot do a simple "if request.standard_read_remote_writes" to check
+ # whether or not that field is defined in the original request object.
+ mode_oneof_field = request._pb.WhichOneof("mode")
+ if mode_oneof_field:
+ setattr(
+ check_consistency_request,
+ mode_oneof_field,
+ getattr(request, mode_oneof_field),
+ )
+
+ check_consistency_call = functools.partial(
+ self.check_consistency,
+ check_consistency_request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Block and wait until the polling harness returns True.
+ check_consistency_future = (
+ async_consistency._AsyncCheckConsistencyPollingFuture(
+ check_consistency_call
+ )
+ )
+ return await check_consistency_future.result()
diff --git a/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/client.py
new file mode 100644
index 000000000..1b6770b10
--- /dev/null
+++ b/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/client.py
@@ -0,0 +1,373 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import functools
+
+from typing import Callable, Optional, Sequence, Tuple, Union
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
+
+from google.api_core import client_options as client_options_lib
+from google.auth import credentials as ga_credentials # type: ignore
+
+from google.cloud.bigtable_admin_v2.types import bigtable_table_admin
+
+from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
+ client as base_client,
+)
+from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.base import (
+ BigtableTableAdminTransport,
+)
+from google.cloud.bigtable_admin_v2.overlay.types import (
+ consistency,
+ restore_table,
+ wait_for_consistency_request,
+)
+
+from google.cloud.bigtable.gapic_version import __version__ as bigtable_version
+
+
+DEFAULT_CLIENT_INFO = copy.copy(base_client.DEFAULT_CLIENT_INFO)
+DEFAULT_CLIENT_INFO.client_library_version = f"{bigtable_version}-admin-overlay"
+
+
+class BigtableTableAdminClient(base_client.BaseBigtableTableAdminClient):
+ def __init__(
+ self,
+ *,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ transport: Optional[
+ Union[
+ str,
+ BigtableTableAdminTransport,
+ Callable[..., BigtableTableAdminTransport],
+ ]
+ ] = None,
+ client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiates the Bigtable table admin client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Optional[Union[str,BigtableTableAdminTransport,Callable[..., BigtableTableAdminTransport]]]):
+ The transport to use, or a Callable that constructs and returns a new transport.
+ If a Callable is given, it will be called with the same set of initialization
+ arguments as used in the BigtableTableAdminTransport constructor.
+ If set to None, a transport is chosen automatically.
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
+ Custom options for the client.
+
+ 1. The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client when ``transport`` is
+ not explicitly provided. Only if this property is not set and
+ ``transport`` was not explicitly provided, the endpoint is
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
+ variable, which have one of the following values:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto-switch to the
+ default mTLS endpoint if client certificate is present; this is
+ the default value).
+
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide a client certificate for mTLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+
+ 3. The ``universe_domain`` property can be used to override the
+ default "googleapis.com" universe. Note that the ``api_endpoint``
+ property still takes precedence; and ``universe_domain`` is
+ currently not supported for mTLS.
+
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+ super(BigtableTableAdminClient, self).__init__(
+ credentials=credentials,
+ transport=transport,
+ client_options=client_options,
+ client_info=client_info,
+ )
+
+ def restore_table(
+ self,
+ request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> restore_table.RestoreTableOperation:
+ r"""Create a new table by restoring from a completed backup. The
+ returned table :class:`long-running operation
+ `
+ can be used to track the progress of the operation, and to cancel it. The
+ :attr:`metadata ` field type is
+ :class:`RestoreTableMetadata `.
+ The :meth:`response ` type is
+ :class:`google.cloud.bigtable_admin_v2.types.Table`, if successful.
+
+ Additionally, the returned :class:`long-running-operation `
+ provides a method, :meth:`google.cloud.bigtable_admin_v2.overlay.types.restore_table.RestoreTableOperation.optimize_restore_table_operation` that
+ provides access to a :class:`google.api_core.operation.Operation` object representing the OptimizeRestoreTable long-running-operation
+ after the current one has completed.
+
+ .. code-block:: python
+
+ # This snippet should be regarded as a code template only.
+ #
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud.bigtable import admin_v2
+
+ def sample_restore_table():
+ # Create a client
+ client = admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = admin_v2.RestoreTableRequest(
+ backup="backup_value",
+ parent="parent_value",
+ table_id="table_id_value",
+ )
+
+ # Make the request
+ operation = client.restore_table(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+ # Handle LRO2
+ optimize_operation = operation.optimize_restore_table_operation()
+
+ if optimize_operation:
+ print("Waiting for table optimization to complete...")
+
+ response = optimize_operation.result()
+
+ Args:
+ request (Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]):
+ The request object. The request for
+ [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.bigtable_admin_v2.overlay.types.restore_table.RestoreTableOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp.
+ Each table is served using the resources of its
+ parent cluster.
+ """
+ operation = self._restore_table(
+ request=request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ restore_table_operation = restore_table.RestoreTableOperation(
+ self._transport.operations_client, operation
+ )
+ return restore_table_operation
+
+ def wait_for_consistency(
+ self,
+ request: Optional[
+ Union[wait_for_consistency_request.WaitForConsistencyRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> bool:
+ r"""Blocks until the mutations for the specified Table that have been
+ made before the call have been replicated or reads using an app profile with `DataBoostIsolationReadOnly`
+ can see all writes committed before the token was created. This is done by generating
+ a consistency token for the Table, then polling :meth:`check_consistency`
+ for the specified table until the call returns True.
+
+ .. code-block:: python
+
+ # This snippet should be regarded as a code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud.bigtable import admin_v2
+
+ def sample_wait_for_consistency():
+ # Create a client
+ client = admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = admin_v2.WaitForConsistencyRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ print("Waiting for operation to complete...")
+
+ response = client.wait_for_replication(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.bigtable_admin_v2.overlay.types.WaitForConsistencyRequest, dict]):
+ The request object.
+ name (str):
+ Required. The unique name of the Table for which to
+ create a consistency token. Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ bool:
+ If the `standard_read_remote_writes` mode is specified in the request object, returns
+ `True` after the mutations of the specified table have been fully replicated. If the
+ `data_boost_read_local_writes` mode is specified in the request object, returns `True`
+ after reads using an app profile with `DataBoostIsolationReadOnly` can see all writes
+ committed before the token was created.
+
+ Raises:
+ google.api_core.GoogleAPICallError: If the operation errors or if
+ the timeout is reached before the operation completes.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(
+ request, wait_for_consistency_request.WaitForConsistencyRequest
+ ):
+ request = wait_for_consistency_request.WaitForConsistencyRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Generate the consistency token.
+ generate_consistency_token_request = (
+ bigtable_table_admin.GenerateConsistencyTokenRequest(
+ name=request.name,
+ )
+ )
+
+ generate_consistency_response = self.generate_consistency_token(
+ generate_consistency_token_request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Create the CheckConsistencyRequest object.
+ check_consistency_request = bigtable_table_admin.CheckConsistencyRequest(
+ name=request.name,
+ consistency_token=generate_consistency_response.consistency_token,
+ )
+
+ # Since the default values of StandardReadRemoteWrites and DataBoostReadLocalWrites evaluate to
+ # False in proto plus, we cannot do a simple "if request.standard_read_remote_writes" to check
+ # whether or not that field is defined in the original request object.
+ mode_oneof_field = request._pb.WhichOneof("mode")
+ if mode_oneof_field:
+ setattr(
+ check_consistency_request,
+ mode_oneof_field,
+ getattr(request, mode_oneof_field),
+ )
+
+ check_consistency_call = functools.partial(
+ self.check_consistency,
+ check_consistency_request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Block and wait until the polling harness returns True.
+ check_consistency_future = consistency._CheckConsistencyPollingFuture(
+ check_consistency_call
+ )
+ return check_consistency_future.result()
diff --git a/google/cloud/bigtable_admin_v2/overlay/types/__init__.py b/google/cloud/bigtable_admin_v2/overlay/types/__init__.py
new file mode 100644
index 000000000..16b032ac4
--- /dev/null
+++ b/google/cloud/bigtable_admin_v2/overlay/types/__init__.py
@@ -0,0 +1,31 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .async_restore_table import (
+ AsyncRestoreTableOperation,
+)
+
+from .restore_table import (
+ RestoreTableOperation,
+)
+
+from .wait_for_consistency_request import (
+ WaitForConsistencyRequest,
+)
+
+__all__ = (
+ "AsyncRestoreTableOperation",
+ "RestoreTableOperation",
+ "WaitForConsistencyRequest",
+)
diff --git a/google/cloud/bigtable_admin_v2/overlay/types/async_consistency.py b/google/cloud/bigtable_admin_v2/overlay/types/async_consistency.py
new file mode 100644
index 000000000..0703940d5
--- /dev/null
+++ b/google/cloud/bigtable_admin_v2/overlay/types/async_consistency.py
@@ -0,0 +1,104 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Awaitable, Union, Callable
+
+from google.api_core.future import async_future
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+from google.cloud.bigtable_admin_v2.types import bigtable_table_admin
+
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
+
+
+# The consistency check could take a very long time, so we wait indefinitely.
+DEFAULT_RETRY = async_future.DEFAULT_RETRY.with_timeout(None)
+
+
+class _AsyncCheckConsistencyPollingFuture(async_future.AsyncFuture):
+ """A Future that polls an underlying `check_consistency` operation until it returns True.
+
+ **This class should not be instantiated by users** and should only be instantiated by the admin
+ client's
+ :meth:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.AsyncBigtableTableAdminClient.wait_for_consistency`
+ or
+ :meth:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.AsyncBigtableTableAdminClient.wait_for_replication`
+ methods.
+
+ Args:
+ check_consistency_call(Callable[
+ [Optional[google.api_core.retry.Retry],
+ google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse]):
+ A :meth:`check_consistency
+ `
+ call from the admin client. The call should fix every user parameter except for retry,
+ which will be done via :meth:`functools.partial`.
+ default_retry(Optional[google.api_core.retry.Retry]): The `retry` parameter passed in to either
+ :meth:`wait_for_consistency
+ `
+ or :meth:`wait_for_replication
+ `
+ retry (google.api_core.retry.AsyncRetry): The retry configuration used
+ when polling. This can be used to control how often :meth:`done`
+ is polled. Regardless of the retry's ``deadline``, it will be
+ overridden by the ``timeout`` argument to :meth:`result`.
+ """
+
+ def __init__(
+ self,
+ check_consistency_call: Callable[
+ [OptionalRetry], Awaitable[bigtable_table_admin.CheckConsistencyResponse]
+ ],
+ retry: retries.AsyncRetry = DEFAULT_RETRY,
+ **kwargs
+ ):
+ super(_AsyncCheckConsistencyPollingFuture, self).__init__(retry=retry, **kwargs)
+
+ # Done is called with two different scenarios, retry is specified or not specified.
+ # API_call will be a functools partial with everything except retry specified because of
+ # that.
+ self._check_consistency_call = check_consistency_call
+
+ async def done(self, retry: OptionalRetry = None):
+ """Polls the underlying `check_consistency` call to see if the future is complete.
+
+ Args:
+ retry (google.api_core.retry.Retry): (Optional) How to retry the
+ polling RPC (to not be confused with polling configuration. See
+ the documentation for :meth:`result `
+ for details).
+
+ Returns:
+ bool: True if the future is complete, False otherwise.
+ """
+ if self._future.done():
+ return True
+
+ try:
+ check_consistency_response = await self._check_consistency_call()
+ if check_consistency_response.consistent:
+ self.set_result(True)
+
+ return check_consistency_response.consistent
+ except Exception as e:
+ self.set_exception(e)
+
+ def cancel(self):
+ raise NotImplementedError("Cannot cancel consistency token operation")
+
+ def cancelled(self):
+ raise NotImplementedError("Cannot cancel consistency token operation")
diff --git a/google/cloud/bigtable_admin_v2/overlay/types/async_restore_table.py b/google/cloud/bigtable_admin_v2/overlay/types/async_restore_table.py
new file mode 100644
index 000000000..9edfb4963
--- /dev/null
+++ b/google/cloud/bigtable_admin_v2/overlay/types/async_restore_table.py
@@ -0,0 +1,99 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Optional
+
+from google.api_core import exceptions
+from google.api_core import operation_async
+from google.protobuf import empty_pb2
+
+from google.cloud.bigtable_admin_v2.types import OptimizeRestoredTableMetadata
+
+
+class AsyncRestoreTableOperation(operation_async.AsyncOperation):
+ """A Future for interacting with Bigtable Admin's RestoreTable Long-Running Operation.
+
+ This is needed to expose a potential long-running operation that might run after this operation
+ finishes, OptimizeRestoreTable. This is exposed via the the :meth:`optimize_restore_table_operation`
+ method.
+
+ **This class should not be instantiated by users** and should only be instantiated by the admin
+ client's :meth:`restore_table
+ `
+ method.
+
+ Args:
+ operations_client (google.api_core.operations_v1.AbstractOperationsClient): The operations
+ client from the admin client class's transport.
+ restore_table_operation (google.api_core.operation_async.AsyncOperation): A
+ :class:`google.api_core.operation_async.AsyncOperation`
+ instance resembling a RestoreTable long-running operation
+ """
+
+ def __init__(
+ self, operations_client, restore_table_operation: operation_async.AsyncOperation
+ ):
+ self._operations_client = operations_client
+ self._optimize_restored_table_operation = None
+ super().__init__(
+ restore_table_operation._operation,
+ restore_table_operation._refresh,
+ restore_table_operation._cancel,
+ restore_table_operation._result_type,
+ restore_table_operation._metadata_type,
+ retry=restore_table_operation._retry,
+ )
+
+ async def optimize_restored_table_operation(
+ self,
+ ) -> Optional[operation_async.AsyncOperation]:
+ """Gets the OptimizeRestoredTable long-running operation that runs after this operation finishes.
+ The current operation might not trigger a follow-up OptimizeRestoredTable operation, in which case, this
+ method will return `None`.
+ This method must not be called before the parent restore_table operation is complete.
+ Returns:
+ An object representing a long-running operation, or None if there is no OptimizeRestoredTable operation
+ after this one.
+ Raises:
+ RuntimeError: raised when accessed before the restore_table operation is complete
+
+ Raises:
+ google.api_core.GoogleAPIError: raised when accessed before the restore_table operation is complete
+ """
+ if not await self.done():
+ raise exceptions.GoogleAPIError(
+ "optimize_restored_table operation can't be accessed until the restore_table operation is complete"
+ )
+
+ if self._optimize_restored_table_operation is not None:
+ return self._optimize_restored_table_operation
+
+ operation_name = self.metadata.optimize_table_operation_name
+
+ # When the RestoreTable operation finishes, it might not necessarily trigger
+ # an optimize operation.
+ if operation_name:
+ gapic_operation = await self._operations_client.get_operation(
+ name=operation_name
+ )
+ self._optimize_restored_table_operation = operation_async.from_gapic(
+ gapic_operation,
+ self._operations_client,
+ empty_pb2.Empty,
+ metadata_type=OptimizeRestoredTableMetadata,
+ )
+ return self._optimize_restored_table_operation
+ else:
+ # no optimize operation found
+ return None
diff --git a/google/cloud/bigtable_admin_v2/overlay/types/consistency.py b/google/cloud/bigtable_admin_v2/overlay/types/consistency.py
new file mode 100644
index 000000000..63a110975
--- /dev/null
+++ b/google/cloud/bigtable_admin_v2/overlay/types/consistency.py
@@ -0,0 +1,101 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Union, Callable
+
+from google.api_core.future import polling
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+from google.cloud.bigtable_admin_v2.types import bigtable_table_admin
+
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
+
+
+# The consistency check could take a very long time, so we wait indefinitely.
+DEFAULT_RETRY = polling.DEFAULT_POLLING.with_timeout(None)
+
+
+class _CheckConsistencyPollingFuture(polling.PollingFuture):
+ """A Future that polls an underlying `check_consistency` operation until it returns True.
+
+ **This class should not be instantiated by users** and should only be instantiated by the admin
+ client's
+ :meth:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.BigtableTableAdminClient.wait_for_consistency`
+ or
+ :meth:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.BigtableTableAdminClient.wait_for_replication`
+ methods.
+
+ Args:
+ check_consistency_call(Callable[
+ [Optional[google.api_core.retry.Retry],
+ google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse]):
+ A :meth:`check_consistency
+ `
+ call from the admin client. The call should fix every user parameter,
+ which will be done via :meth:`functools.partial`.
+ polling (google.api_core.retry.Retry): The configuration used for polling.
+ This parameter controls how often :meth:`done` is polled. If the
+ ``timeout`` argument is specified in the :meth:`result
+ ` method it will
+ override the ``polling.timeout`` property.
+ """
+
+ def __init__(
+ self,
+ check_consistency_call: Callable[
+ [OptionalRetry], bigtable_table_admin.CheckConsistencyResponse
+ ],
+ polling: retries.Retry = DEFAULT_RETRY,
+ **kwargs
+ ):
+ super(_CheckConsistencyPollingFuture, self).__init__(polling=polling, **kwargs)
+
+ # Done is called with two different scenarios, retry is specified or not specified.
+ # API_call will be a functools partial with everything except retry specified because of
+ # that.
+ self._check_consistency_call = check_consistency_call
+
+ def done(self, retry: OptionalRetry = None):
+ """Polls the underlying `check_consistency` call to see if the future is complete.
+
+ Args:
+ retry (google.api_core.retry.Retry): (Optional) How to retry the
+ polling RPC (to not be confused with polling configuration. See
+ the documentation for :meth:`result `
+ for details).
+
+ Returns:
+ bool: True if the future is complete, False otherwise.
+ """
+
+ if self._result_set:
+ return True
+
+ try:
+ check_consistency_response = self._check_consistency_call()
+ if check_consistency_response.consistent:
+ self.set_result(True)
+
+ return check_consistency_response.consistent
+ except Exception as e:
+ self.set_exception(e)
+
+ def cancel(self):
+ raise NotImplementedError("Cannot cancel consistency token operation")
+
+ def cancelled(self):
+ raise NotImplementedError("Cannot cancel consistency token operation")
diff --git a/google/cloud/bigtable_admin_v2/overlay/types/restore_table.py b/google/cloud/bigtable_admin_v2/overlay/types/restore_table.py
new file mode 100644
index 000000000..84c9c5d91
--- /dev/null
+++ b/google/cloud/bigtable_admin_v2/overlay/types/restore_table.py
@@ -0,0 +1,102 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Optional
+
+from google.api_core import exceptions
+from google.api_core import operation
+from google.protobuf import empty_pb2
+
+from google.cloud.bigtable_admin_v2.types import OptimizeRestoredTableMetadata
+
+
+class RestoreTableOperation(operation.Operation):
+ """A Future for interacting with Bigtable Admin's RestoreTable Long-Running Operation.
+
+ This is needed to expose a potential long-running operation that might run after this operation
+ finishes, OptimizeRestoreTable. This is exposed via the the :meth:`optimize_restore_table_operation`
+ method.
+
+ **This class should not be instantiated by users** and should only be instantiated by the admin
+ client's :meth:`restore_table
+ `
+ method.
+
+ Args:
+ operations_client (google.api_core.operations_v1.AbstractOperationsClient): The operations
+ client from the admin client class's transport.
+ restore_table_operation (google.api_core.operation.Operation): A :class:`google.api_core.operation.Operation`
+ instance resembling a RestoreTable long-running operation
+ """
+
+ def __init__(self, operations_client, restore_table_operation: operation.Operation):
+ self._operations_client = operations_client
+ self._optimize_restored_table_operation = None
+ super().__init__(
+ restore_table_operation._operation,
+ restore_table_operation._refresh,
+ restore_table_operation._cancel,
+ restore_table_operation._result_type,
+ restore_table_operation._metadata_type,
+ polling=restore_table_operation._polling,
+ )
+
+ def optimize_restored_table_operation(self) -> Optional[operation.Operation]:
+ """Gets the OptimizeRestoredTable long-running operation that runs after this operation finishes.
+
+ This must not be called before the parent restore_table operation is complete. You can guarantee
+ this happening by calling this function after this class's :meth:`google.api_core.operation.Operation.result`
+ method.
+
+ The follow-up operation has
+ :attr:`metadata ` type
+ :class:`OptimizeRestoredTableMetadata
+ `
+ and no return value, but can be waited for with `result`.
+
+ The current operation might not trigger a follow-up OptimizeRestoredTable operation, in which case, this
+ method will return `None`.
+
+ Returns:
+ Optional[google.api_core.operation.Operation]:
+ An object representing a long-running operation, or None if there is no OptimizeRestoredTable operation
+ after this one.
+
+ Raises:
+ google.api_core.GoogleAPIError: raised when accessed before the restore_table operation is complete
+ """
+ if not self.done():
+ raise exceptions.GoogleAPIError(
+ "optimize_restored_table operation can't be accessed until the restore_table operation is complete"
+ )
+
+ if self._optimize_restored_table_operation is not None:
+ return self._optimize_restored_table_operation
+
+ operation_name = self.metadata.optimize_table_operation_name
+
+ # When the RestoreTable operation finishes, it might not necessarily trigger
+ # an optimize operation.
+ if operation_name:
+ gapic_operation = self._operations_client.get_operation(name=operation_name)
+ self._optimize_restored_table_operation = operation.from_gapic(
+ gapic_operation,
+ self._operations_client,
+ empty_pb2.Empty,
+ metadata_type=OptimizeRestoredTableMetadata,
+ )
+ return self._optimize_restored_table_operation
+ else:
+ # no optimize operation found
+ return None
diff --git a/google/cloud/bigtable_admin_v2/overlay/types/wait_for_consistency_request.py b/google/cloud/bigtable_admin_v2/overlay/types/wait_for_consistency_request.py
new file mode 100644
index 000000000..51070230a
--- /dev/null
+++ b/google/cloud/bigtable_admin_v2/overlay/types/wait_for_consistency_request.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto
+
+from google.cloud.bigtable_admin_v2.types import bigtable_table_admin
+
+__protobuf__ = proto.module(
+ package="google.bigtable.admin.v2",
+ manifest={
+ "WaitForConsistencyRequest",
+ },
+)
+
+
+# The WaitForConsistencyRequest object is not a real proto. It is a wrapper
+# class intended for the handwritten method wait_for_consistency. It is
+# constructed by extending a Proto Plus message class to get a developer
+# experience closest to that of an autogenerated GAPIC method, and to allow
+# developers to manipulate the wrapper class like they would a request proto
+# for an autogenerated call.
+class WaitForConsistencyRequest(proto.Message):
+ """Wrapper class for encapsulating parameters for the `wait_for_consistency` method in both
+ :class:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.client.BigtableTableAdminClient`
+ and :class:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.async_client.BigtableTableAdmiAsyncClient`.
+
+
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
+ Attributes:
+ name (str):
+ Required. The unique name of the Table for which to check
+ replication consistency. Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}``.
+ standard_read_remote_writes (google.cloud.bigtable_admin_v2.types.StandardReadRemoteWrites):
+ Checks that reads using an app profile with
+ ``StandardIsolation`` can see all writes committed before
+ the token was created, even if the read and write target
+ different clusters.
+
+ This field is a member of `oneof`_ ``mode``.
+ data_boost_read_local_writes (google.cloud.bigtable_admin_v2.types.DataBoostReadLocalWrites):
+ Checks that reads using an app profile with
+ ``DataBoostIsolationReadOnly`` can see all writes committed
+ before the token was created, but only if the read and write
+ target the same cluster.
+
+ This field is a member of `oneof`_ ``mode``.
+ """
+
+ name: str = proto.Field(proto.STRING, number=1)
+ standard_read_remote_writes: bigtable_table_admin.StandardReadRemoteWrites = (
+ proto.Field(
+ proto.MESSAGE,
+ number=2,
+ oneof="mode",
+ message=bigtable_table_admin.StandardReadRemoteWrites,
+ )
+ )
+ data_boost_read_local_writes: bigtable_table_admin.DataBoostReadLocalWrites = (
+ proto.Field(
+ proto.MESSAGE,
+ number=3,
+ oneof="mode",
+ message=bigtable_table_admin.DataBoostReadLocalWrites,
+ )
+ )
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py
index b150b7123..632496543 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py
@@ -354,6 +354,41 @@ async def create_instance(
scaled. If cluster_config.cluster_autoscaling_config is
non-empty, then autoscaling is enabled.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_create_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ instance = bigtable_admin_v2.Instance()
+ instance.display_name = "display_name_value"
+
+ request = bigtable_admin_v2.CreateInstanceRequest(
+ parent="parent_value",
+ instance_id="instance_id_value",
+ instance=instance,
+ )
+
+ # Make the request
+ operation = client.create_instance(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]]):
The request object. Request message for
@@ -487,6 +522,32 @@ async def get_instance(
) -> instance.Instance:
r"""Gets information about an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_get_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetInstanceRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_instance(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetInstanceRequest, dict]]):
The request object. Request message for
@@ -578,6 +639,32 @@ async def list_instances(
) -> bigtable_instance_admin.ListInstancesResponse:
r"""Lists information about instances in a project.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_list_instances():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListInstancesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = await client.list_instances(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListInstancesRequest, dict]]):
The request object. Request message for
@@ -666,6 +753,32 @@ async def update_instance(
To update other Instance properties, such as labels, use
PartialUpdateInstance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_update_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.Instance(
+ display_name="display_name_value",
+ )
+
+ # Make the request
+ response = await client.update_instance(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.Instance, dict]]):
The request object. A collection of Bigtable
@@ -739,6 +852,39 @@ async def partial_update_instance(
method can modify all fields of an Instance and is the
preferred way to update an Instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_partial_update_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ instance = bigtable_admin_v2.Instance()
+ instance.display_name = "display_name_value"
+
+ request = bigtable_admin_v2.PartialUpdateInstanceRequest(
+ instance=instance,
+ )
+
+ # Make the request
+ operation = client.partial_update_instance(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest, dict]]):
The request object. Request message for
@@ -853,6 +999,29 @@ async def delete_instance(
) -> None:
r"""Delete an instance from a project.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_delete_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteInstanceRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_instance(request=request)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest, dict]]):
The request object. Request message for
@@ -940,6 +1109,37 @@ async def create_cluster(
scaled. If cluster_config.cluster_autoscaling_config is
non-empty, then autoscaling is enabled.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_create_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CreateClusterRequest(
+ parent="parent_value",
+ cluster_id="cluster_id_value",
+ )
+
+ # Make the request
+ operation = client.create_cluster(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]]):
The request object. Request message for
@@ -1060,6 +1260,32 @@ async def get_cluster(
) -> instance.Cluster:
r"""Gets information about a cluster.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_get_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetClusterRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_cluster(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetClusterRequest, dict]]):
The request object. Request message for
@@ -1150,6 +1376,32 @@ async def list_clusters(
) -> bigtable_instance_admin.ListClustersResponse:
r"""Lists information about clusters in an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_list_clusters():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListClustersRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = await client.list_clusters(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListClustersRequest, dict]]):
The request object. Request message for
@@ -1241,6 +1493,35 @@ async def update_cluster(
cluster_config.cluster_autoscaling_config. In order to update
it, you must use PartialUpdateCluster.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_update_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.Cluster(
+ )
+
+ # Make the request
+ operation = client.update_cluster(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]]):
The request object. A resizable group of nodes in a particular cloud
@@ -1332,6 +1613,35 @@ async def partial_update_cluster(
cluster_config.cluster_autoscaling_config, and explicitly set a
serve_node count via the update_mask.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_partial_update_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.PartialUpdateClusterRequest(
+ )
+
+ # Make the request
+ operation = client.partial_update_cluster(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest, dict]]):
The request object. Request message for
@@ -1442,6 +1752,29 @@ async def delete_cluster(
) -> None:
r"""Deletes a cluster from an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_delete_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteClusterRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_cluster(request=request)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteClusterRequest, dict]]):
The request object. Request message for
@@ -1523,6 +1856,37 @@ async def create_app_profile(
) -> instance.AppProfile:
r"""Creates an app profile within an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_create_app_profile():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ app_profile = bigtable_admin_v2.AppProfile()
+ app_profile.priority = "PRIORITY_HIGH"
+
+ request = bigtable_admin_v2.CreateAppProfileRequest(
+ parent="parent_value",
+ app_profile_id="app_profile_id_value",
+ app_profile=app_profile,
+ )
+
+ # Make the request
+ response = await client.create_app_profile(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest, dict]]):
The request object. Request message for
@@ -1632,6 +1996,32 @@ async def get_app_profile(
) -> instance.AppProfile:
r"""Gets information about an app profile.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_get_app_profile():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetAppProfileRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_app_profile(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetAppProfileRequest, dict]]):
The request object. Request message for
@@ -1721,6 +2111,33 @@ async def list_app_profiles(
) -> pagers.ListAppProfilesAsyncPager:
r"""Lists information about app profiles in an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_list_app_profiles():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListAppProfilesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_app_profiles(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest, dict]]):
The request object. Request message for
@@ -1827,6 +2244,39 @@ async def update_app_profile(
) -> operation_async.AsyncOperation:
r"""Updates an app profile within an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_update_app_profile():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ app_profile = bigtable_admin_v2.AppProfile()
+ app_profile.priority = "PRIORITY_HIGH"
+
+ request = bigtable_admin_v2.UpdateAppProfileRequest(
+ app_profile=app_profile,
+ )
+
+ # Make the request
+ operation = client.update_app_profile(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest, dict]]):
The request object. Request message for
@@ -1937,6 +2387,30 @@ async def delete_app_profile(
) -> None:
r"""Deletes an app profile from an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_delete_app_profile():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteAppProfileRequest(
+ name="name_value",
+ ignore_warnings=True,
+ )
+
+ # Make the request
+ await client.delete_app_profile(request=request)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest, dict]]):
The request object. Request message for
@@ -2025,6 +2499,33 @@ async def get_iam_policy(
resource. Returns an empty policy if an instance exists
but does not have a policy set.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ async def sample_get_iam_policy():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.GetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = await client.get_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]):
The request object. Request message for ``GetIamPolicy`` method.
@@ -2064,19 +2565,19 @@ async def get_iam_policy(
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
- documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
- :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
- documentation](\ https://cloud.google.com/iam/docs/).
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
@@ -2137,6 +2638,33 @@ async def set_iam_policy(
r"""Sets the access control policy on an instance
resource. Replaces any existing policy.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ async def sample_set_iam_policy():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.SetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = await client.set_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]):
The request object. Request message for ``SetIamPolicy`` method.
@@ -2176,19 +2704,19 @@ async def set_iam_policy(
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
- documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
- :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
- documentation](\ https://cloud.google.com/iam/docs/).
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
@@ -2250,6 +2778,34 @@ async def test_iam_permissions(
r"""Returns permissions that the caller has on the
specified instance resource.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ async def sample_test_iam_permissions():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.TestIamPermissionsRequest(
+ resource="resource_value",
+ permissions=['permissions_value1', 'permissions_value2'],
+ )
+
+ # Make the request
+ response = await client.test_iam_permissions(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]):
The request object. Request message for ``TestIamPermissions`` method.
@@ -2345,6 +2901,33 @@ async def list_hot_tablets(
r"""Lists hot tablets in a cluster, within the time range
provided. Hot tablets are ordered based on CPU usage.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_list_hot_tablets():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListHotTabletsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_hot_tablets(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest, dict]]):
The request object. Request message for
@@ -2449,6 +3032,41 @@ async def create_logical_view(
) -> operation_async.AsyncOperation:
r"""Creates a logical view within an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_create_logical_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ logical_view = bigtable_admin_v2.LogicalView()
+ logical_view.query = "query_value"
+
+ request = bigtable_admin_v2.CreateLogicalViewRequest(
+ parent="parent_value",
+ logical_view_id="logical_view_id_value",
+ logical_view=logical_view,
+ )
+
+ # Make the request
+ operation = client.create_logical_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest, dict]]):
The request object. Request message for
@@ -2567,6 +3185,32 @@ async def get_logical_view(
) -> instance.LogicalView:
r"""Gets information about a logical view.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_get_logical_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetLogicalViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_logical_view(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetLogicalViewRequest, dict]]):
The request object. Request message for
@@ -2655,6 +3299,33 @@ async def list_logical_views(
) -> pagers.ListLogicalViewsAsyncPager:
r"""Lists information about logical views in an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_list_logical_views():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListLogicalViewsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_logical_views(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest, dict]]):
The request object. Request message for
@@ -2758,6 +3429,39 @@ async def update_logical_view(
) -> operation_async.AsyncOperation:
r"""Updates a logical view within an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_update_logical_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ logical_view = bigtable_admin_v2.LogicalView()
+ logical_view.query = "query_value"
+
+ request = bigtable_admin_v2.UpdateLogicalViewRequest(
+ logical_view=logical_view,
+ )
+
+ # Make the request
+ operation = client.update_logical_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest, dict]]):
The request object. Request message for
@@ -2871,6 +3575,29 @@ async def delete_logical_view(
) -> None:
r"""Deletes a logical view from an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_delete_logical_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteLogicalViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_logical_view(request=request)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteLogicalViewRequest, dict]]):
The request object. Request message for
@@ -2952,6 +3679,41 @@ async def create_materialized_view(
) -> operation_async.AsyncOperation:
r"""Creates a materialized view within an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_create_materialized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ materialized_view = bigtable_admin_v2.MaterializedView()
+ materialized_view.query = "query_value"
+
+ request = bigtable_admin_v2.CreateMaterializedViewRequest(
+ parent="parent_value",
+ materialized_view_id="materialized_view_id_value",
+ materialized_view=materialized_view,
+ )
+
+ # Make the request
+ operation = client.create_materialized_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest, dict]]):
The request object. Request message for
@@ -3074,6 +3836,32 @@ async def get_materialized_view(
) -> instance.MaterializedView:
r"""Gets information about a materialized view.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_get_materialized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetMaterializedViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_materialized_view(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetMaterializedViewRequest, dict]]):
The request object. Request message for
@@ -3163,6 +3951,33 @@ async def list_materialized_views(
r"""Lists information about materialized views in an
instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_list_materialized_views():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListMaterializedViewsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_materialized_views(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest, dict]]):
The request object. Request message for
@@ -3268,6 +4083,39 @@ async def update_materialized_view(
) -> operation_async.AsyncOperation:
r"""Updates a materialized view within an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_update_materialized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ materialized_view = bigtable_admin_v2.MaterializedView()
+ materialized_view.query = "query_value"
+
+ request = bigtable_admin_v2.UpdateMaterializedViewRequest(
+ materialized_view=materialized_view,
+ )
+
+ # Make the request
+ operation = client.update_materialized_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest, dict]]):
The request object. Request message for
@@ -3383,6 +4231,29 @@ async def delete_materialized_view(
) -> None:
r"""Deletes a materialized view from an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_delete_materialized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteMaterializedViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_materialized_view(request=request)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteMaterializedViewRequest, dict]]):
The request object. Request message for
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py
index accaa1e03..9d64108bb 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py
@@ -161,6 +161,34 @@ def _get_default_mtls_endpoint(api_endpoint):
_DEFAULT_ENDPOINT_TEMPLATE = "bigtableadmin.{UNIVERSE_DOMAIN}"
_DEFAULT_UNIVERSE = "googleapis.com"
+ @staticmethod
+ def _use_client_cert_effective():
+ """Returns whether client certificate should be used for mTLS if the
+ google-auth version supports should_use_client_cert automatic mTLS enablement.
+
+ Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var.
+
+ Returns:
+ bool: whether client certificate should be used for mTLS
+ Raises:
+ ValueError: (If using a version of google-auth without should_use_client_cert and
+ GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.)
+ """
+ # check if google-auth version supports should_use_client_cert for automatic mTLS enablement
+ if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER
+ return mtls.should_use_client_cert()
+ else: # pragma: NO COVER
+ # if unsupported, fallback to reading from env var
+ use_client_cert_str = os.getenv(
+ "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
+ ).lower()
+ if use_client_cert_str not in ("true", "false"):
+ raise ValueError(
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be"
+ " either `true` or `false`"
+ )
+ return use_client_cert_str == "true"
+
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
@@ -503,12 +531,8 @@ def get_mtls_endpoint_and_cert_source(
)
if client_options is None:
client_options = client_options_lib.ClientOptions()
- use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
+ use_client_cert = BigtableInstanceAdminClient._use_client_cert_effective()
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
- if use_client_cert not in ("true", "false"):
- raise ValueError(
- "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
@@ -516,7 +540,7 @@ def get_mtls_endpoint_and_cert_source(
# Figure out the client cert source to use.
client_cert_source = None
- if use_client_cert == "true":
+ if use_client_cert:
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
@@ -548,20 +572,14 @@ def _read_environment_variables():
google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT
is not any of ["auto", "never", "always"].
"""
- use_client_cert = os.getenv(
- "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
- ).lower()
+ use_client_cert = BigtableInstanceAdminClient._use_client_cert_effective()
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower()
universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN")
- if use_client_cert not in ("true", "false"):
- raise ValueError(
- "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
- return use_client_cert == "true", use_mtls_endpoint, universe_domain_env
+ return use_client_cert, use_mtls_endpoint, universe_domain_env
@staticmethod
def _get_client_cert_source(provided_cert_source, use_cert_flag):
@@ -906,6 +924,41 @@ def create_instance(
scaled. If cluster_config.cluster_autoscaling_config is
non-empty, then autoscaling is enabled.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_create_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ instance = bigtable_admin_v2.Instance()
+ instance.display_name = "display_name_value"
+
+ request = bigtable_admin_v2.CreateInstanceRequest(
+ parent="parent_value",
+ instance_id="instance_id_value",
+ instance=instance,
+ )
+
+ # Make the request
+ operation = client.create_instance(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]):
The request object. Request message for
@@ -1035,6 +1088,32 @@ def get_instance(
) -> instance.Instance:
r"""Gets information about an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_get_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetInstanceRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_instance(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.GetInstanceRequest, dict]):
The request object. Request message for
@@ -1123,6 +1202,32 @@ def list_instances(
) -> bigtable_instance_admin.ListInstancesResponse:
r"""Lists information about instances in a project.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_list_instances():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListInstancesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.list_instances(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.ListInstancesRequest, dict]):
The request object. Request message for
@@ -1208,6 +1313,32 @@ def update_instance(
To update other Instance properties, such as labels, use
PartialUpdateInstance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_update_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.Instance(
+ display_name="display_name_value",
+ )
+
+ # Make the request
+ response = client.update_instance(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.Instance, dict]):
The request object. A collection of Bigtable
@@ -1279,6 +1410,39 @@ def partial_update_instance(
method can modify all fields of an Instance and is the
preferred way to update an Instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_partial_update_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ instance = bigtable_admin_v2.Instance()
+ instance.display_name = "display_name_value"
+
+ request = bigtable_admin_v2.PartialUpdateInstanceRequest(
+ instance=instance,
+ )
+
+ # Make the request
+ operation = client.partial_update_instance(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest, dict]):
The request object. Request message for
@@ -1390,6 +1554,29 @@ def delete_instance(
) -> None:
r"""Delete an instance from a project.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_delete_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteInstanceRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_instance(request=request)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest, dict]):
The request object. Request message for
@@ -1474,6 +1661,37 @@ def create_cluster(
scaled. If cluster_config.cluster_autoscaling_config is
non-empty, then autoscaling is enabled.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_create_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CreateClusterRequest(
+ parent="parent_value",
+ cluster_id="cluster_id_value",
+ )
+
+ # Make the request
+ operation = client.create_cluster(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]):
The request object. Request message for
@@ -1591,6 +1809,32 @@ def get_cluster(
) -> instance.Cluster:
r"""Gets information about a cluster.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_get_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetClusterRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_cluster(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.GetClusterRequest, dict]):
The request object. Request message for
@@ -1678,6 +1922,32 @@ def list_clusters(
) -> bigtable_instance_admin.ListClustersResponse:
r"""Lists information about clusters in an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_list_clusters():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListClustersRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.list_clusters(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.ListClustersRequest, dict]):
The request object. Request message for
@@ -1766,6 +2036,35 @@ def update_cluster(
cluster_config.cluster_autoscaling_config. In order to update
it, you must use PartialUpdateCluster.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_update_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.Cluster(
+ )
+
+ # Make the request
+ operation = client.update_cluster(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]):
The request object. A resizable group of nodes in a particular cloud
@@ -1855,6 +2154,35 @@ def partial_update_cluster(
cluster_config.cluster_autoscaling_config, and explicitly set a
serve_node count via the update_mask.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_partial_update_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.PartialUpdateClusterRequest(
+ )
+
+ # Make the request
+ operation = client.partial_update_cluster(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest, dict]):
The request object. Request message for
@@ -1962,6 +2290,29 @@ def delete_cluster(
) -> None:
r"""Deletes a cluster from an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_delete_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteClusterRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_cluster(request=request)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.DeleteClusterRequest, dict]):
The request object. Request message for
@@ -2040,6 +2391,37 @@ def create_app_profile(
) -> instance.AppProfile:
r"""Creates an app profile within an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_create_app_profile():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ app_profile = bigtable_admin_v2.AppProfile()
+ app_profile.priority = "PRIORITY_HIGH"
+
+ request = bigtable_admin_v2.CreateAppProfileRequest(
+ parent="parent_value",
+ app_profile_id="app_profile_id_value",
+ app_profile=app_profile,
+ )
+
+ # Make the request
+ response = client.create_app_profile(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest, dict]):
The request object. Request message for
@@ -2146,6 +2528,32 @@ def get_app_profile(
) -> instance.AppProfile:
r"""Gets information about an app profile.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_get_app_profile():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetAppProfileRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_app_profile(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.GetAppProfileRequest, dict]):
The request object. Request message for
@@ -2232,6 +2640,33 @@ def list_app_profiles(
) -> pagers.ListAppProfilesPager:
r"""Lists information about app profiles in an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_list_app_profiles():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListAppProfilesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_app_profiles(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest, dict]):
The request object. Request message for
@@ -2335,6 +2770,39 @@ def update_app_profile(
) -> operation.Operation:
r"""Updates an app profile within an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_update_app_profile():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ app_profile = bigtable_admin_v2.AppProfile()
+ app_profile.priority = "PRIORITY_HIGH"
+
+ request = bigtable_admin_v2.UpdateAppProfileRequest(
+ app_profile=app_profile,
+ )
+
+ # Make the request
+ operation = client.update_app_profile(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest, dict]):
The request object. Request message for
@@ -2442,6 +2910,30 @@ def delete_app_profile(
) -> None:
r"""Deletes an app profile from an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_delete_app_profile():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteAppProfileRequest(
+ name="name_value",
+ ignore_warnings=True,
+ )
+
+ # Make the request
+ client.delete_app_profile(request=request)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest, dict]):
The request object. Request message for
@@ -2527,6 +3019,33 @@ def get_iam_policy(
resource. Returns an empty policy if an instance exists
but does not have a policy set.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ def sample_get_iam_policy():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.GetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = client.get_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]):
The request object. Request message for ``GetIamPolicy`` method.
@@ -2566,19 +3085,19 @@ def get_iam_policy(
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
- documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
- :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
- documentation](\ https://cloud.google.com/iam/docs/).
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
@@ -2640,6 +3159,33 @@ def set_iam_policy(
r"""Sets the access control policy on an instance
resource. Replaces any existing policy.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ def sample_set_iam_policy():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.SetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = client.set_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]):
The request object. Request message for ``SetIamPolicy`` method.
@@ -2679,19 +3225,19 @@ def set_iam_policy(
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
- documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
- :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
- documentation](\ https://cloud.google.com/iam/docs/).
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
@@ -2754,6 +3300,34 @@ def test_iam_permissions(
r"""Returns permissions that the caller has on the
specified instance resource.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ def sample_test_iam_permissions():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.TestIamPermissionsRequest(
+ resource="resource_value",
+ permissions=['permissions_value1', 'permissions_value2'],
+ )
+
+ # Make the request
+ response = client.test_iam_permissions(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]):
The request object. Request message for ``TestIamPermissions`` method.
@@ -2850,6 +3424,33 @@ def list_hot_tablets(
r"""Lists hot tablets in a cluster, within the time range
provided. Hot tablets are ordered based on CPU usage.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_list_hot_tablets():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListHotTabletsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_hot_tablets(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest, dict]):
The request object. Request message for
@@ -2951,6 +3552,41 @@ def create_logical_view(
) -> operation.Operation:
r"""Creates a logical view within an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_create_logical_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ logical_view = bigtable_admin_v2.LogicalView()
+ logical_view.query = "query_value"
+
+ request = bigtable_admin_v2.CreateLogicalViewRequest(
+ parent="parent_value",
+ logical_view_id="logical_view_id_value",
+ logical_view=logical_view,
+ )
+
+ # Make the request
+ operation = client.create_logical_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest, dict]):
The request object. Request message for
@@ -3066,6 +3702,32 @@ def get_logical_view(
) -> instance.LogicalView:
r"""Gets information about a logical view.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_get_logical_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetLogicalViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_logical_view(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.GetLogicalViewRequest, dict]):
The request object. Request message for
@@ -3151,6 +3813,33 @@ def list_logical_views(
) -> pagers.ListLogicalViewsPager:
r"""Lists information about logical views in an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_list_logical_views():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListLogicalViewsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_logical_views(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest, dict]):
The request object. Request message for
@@ -3251,6 +3940,39 @@ def update_logical_view(
) -> operation.Operation:
r"""Updates a logical view within an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_update_logical_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ logical_view = bigtable_admin_v2.LogicalView()
+ logical_view.query = "query_value"
+
+ request = bigtable_admin_v2.UpdateLogicalViewRequest(
+ logical_view=logical_view,
+ )
+
+ # Make the request
+ operation = client.update_logical_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest, dict]):
The request object. Request message for
@@ -3361,6 +4083,29 @@ def delete_logical_view(
) -> None:
r"""Deletes a logical view from an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_delete_logical_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteLogicalViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_logical_view(request=request)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.DeleteLogicalViewRequest, dict]):
The request object. Request message for
@@ -3439,6 +4184,41 @@ def create_materialized_view(
) -> operation.Operation:
r"""Creates a materialized view within an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_create_materialized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ materialized_view = bigtable_admin_v2.MaterializedView()
+ materialized_view.query = "query_value"
+
+ request = bigtable_admin_v2.CreateMaterializedViewRequest(
+ parent="parent_value",
+ materialized_view_id="materialized_view_id_value",
+ materialized_view=materialized_view,
+ )
+
+ # Make the request
+ operation = client.create_materialized_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest, dict]):
The request object. Request message for
@@ -3558,6 +4338,32 @@ def get_materialized_view(
) -> instance.MaterializedView:
r"""Gets information about a materialized view.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_get_materialized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetMaterializedViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_materialized_view(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.GetMaterializedViewRequest, dict]):
The request object. Request message for
@@ -3644,6 +4450,33 @@ def list_materialized_views(
r"""Lists information about materialized views in an
instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_list_materialized_views():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListMaterializedViewsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_materialized_views(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest, dict]):
The request object. Request message for
@@ -3746,6 +4579,39 @@ def update_materialized_view(
) -> operation.Operation:
r"""Updates a materialized view within an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_update_materialized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ materialized_view = bigtable_admin_v2.MaterializedView()
+ materialized_view.query = "query_value"
+
+ request = bigtable_admin_v2.UpdateMaterializedViewRequest(
+ materialized_view=materialized_view,
+ )
+
+ # Make the request
+ operation = client.update_materialized_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest, dict]):
The request object. Request message for
@@ -3858,6 +4724,29 @@ def delete_materialized_view(
) -> None:
r"""Deletes a materialized view from an instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_delete_materialized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteMaterializedViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_materialized_view(request=request)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.DeleteMaterializedViewRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py
index f5ceeeb68..3a05dd663 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py
@@ -81,9 +81,10 @@ def __init__(
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is mutually exclusive with credentials.
+ This argument is mutually exclusive with credentials. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py
index a294144ef..d5d5cf1e5 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py
@@ -160,9 +160,10 @@ def __init__(
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if a ``channel`` instance is provided.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if a ``channel`` instance is provided.
+ This argument will be removed in the next major version of this library.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if a ``channel`` instance is provided.
channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]):
@@ -296,9 +297,10 @@ def create_channel(
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is mutually exclusive with credentials.
+ This argument is mutually exclusive with credentials. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py
index aae0f44c4..7ce762764 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py
@@ -157,8 +157,9 @@ def create_channel(
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
- be loaded with :func:`google.auth.load_credentials_from_file`.
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
@@ -209,9 +210,10 @@ def __init__(
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if a ``channel`` instance is provided.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if a ``channel`` instance is provided.
+ This argument will be removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py
index 12af0792b..9879c4c45 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py
@@ -1719,9 +1719,10 @@ def __init__(
are specified, the client will attempt to ascertain the
credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is ignored if ``channel`` is provided.
+ This argument is ignored if ``channel`` is provided. This argument will be
+ removed in the next major version of this library.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py
index cd916a2c8..c5e8544d6 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py
@@ -13,10 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-from .client import BigtableTableAdminClient
-from .async_client import BigtableTableAdminAsyncClient
+from .client import BaseBigtableTableAdminClient
+from .async_client import BaseBigtableTableAdminAsyncClient
__all__ = (
- "BigtableTableAdminClient",
- "BigtableTableAdminAsyncClient",
+ "BaseBigtableTableAdminClient",
+ "BaseBigtableTableAdminAsyncClient",
)
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py
index 1bf544db6..7f772c87c 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py
@@ -58,7 +58,7 @@
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport
-from .client import BigtableTableAdminClient
+from .client import BaseBigtableTableAdminClient
try:
from google.api_core import client_logging # type: ignore
@@ -70,7 +70,7 @@
_LOGGER = std_logging.getLogger(__name__)
-class BigtableTableAdminAsyncClient:
+class BaseBigtableTableAdminAsyncClient:
"""Service for creating, configuring, and deleting Cloud
Bigtable tables.
@@ -78,58 +78,66 @@ class BigtableTableAdminAsyncClient:
within the tables.
"""
- _client: BigtableTableAdminClient
+ _client: BaseBigtableTableAdminClient
# Copy defaults from the synchronous client for use here.
# Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
- DEFAULT_ENDPOINT = BigtableTableAdminClient.DEFAULT_ENDPOINT
- DEFAULT_MTLS_ENDPOINT = BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT
- _DEFAULT_ENDPOINT_TEMPLATE = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE
- _DEFAULT_UNIVERSE = BigtableTableAdminClient._DEFAULT_UNIVERSE
+ DEFAULT_ENDPOINT = BaseBigtableTableAdminClient.DEFAULT_ENDPOINT
+ DEFAULT_MTLS_ENDPOINT = BaseBigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT
+ _DEFAULT_ENDPOINT_TEMPLATE = BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE
+ _DEFAULT_UNIVERSE = BaseBigtableTableAdminClient._DEFAULT_UNIVERSE
- authorized_view_path = staticmethod(BigtableTableAdminClient.authorized_view_path)
+ authorized_view_path = staticmethod(
+ BaseBigtableTableAdminClient.authorized_view_path
+ )
parse_authorized_view_path = staticmethod(
- BigtableTableAdminClient.parse_authorized_view_path
+ BaseBigtableTableAdminClient.parse_authorized_view_path
)
- backup_path = staticmethod(BigtableTableAdminClient.backup_path)
- parse_backup_path = staticmethod(BigtableTableAdminClient.parse_backup_path)
- cluster_path = staticmethod(BigtableTableAdminClient.cluster_path)
- parse_cluster_path = staticmethod(BigtableTableAdminClient.parse_cluster_path)
+ backup_path = staticmethod(BaseBigtableTableAdminClient.backup_path)
+ parse_backup_path = staticmethod(BaseBigtableTableAdminClient.parse_backup_path)
+ cluster_path = staticmethod(BaseBigtableTableAdminClient.cluster_path)
+ parse_cluster_path = staticmethod(BaseBigtableTableAdminClient.parse_cluster_path)
crypto_key_version_path = staticmethod(
- BigtableTableAdminClient.crypto_key_version_path
+ BaseBigtableTableAdminClient.crypto_key_version_path
)
parse_crypto_key_version_path = staticmethod(
- BigtableTableAdminClient.parse_crypto_key_version_path
+ BaseBigtableTableAdminClient.parse_crypto_key_version_path
+ )
+ instance_path = staticmethod(BaseBigtableTableAdminClient.instance_path)
+ parse_instance_path = staticmethod(BaseBigtableTableAdminClient.parse_instance_path)
+ schema_bundle_path = staticmethod(BaseBigtableTableAdminClient.schema_bundle_path)
+ parse_schema_bundle_path = staticmethod(
+ BaseBigtableTableAdminClient.parse_schema_bundle_path
)
- instance_path = staticmethod(BigtableTableAdminClient.instance_path)
- parse_instance_path = staticmethod(BigtableTableAdminClient.parse_instance_path)
- snapshot_path = staticmethod(BigtableTableAdminClient.snapshot_path)
- parse_snapshot_path = staticmethod(BigtableTableAdminClient.parse_snapshot_path)
- table_path = staticmethod(BigtableTableAdminClient.table_path)
- parse_table_path = staticmethod(BigtableTableAdminClient.parse_table_path)
+ snapshot_path = staticmethod(BaseBigtableTableAdminClient.snapshot_path)
+ parse_snapshot_path = staticmethod(BaseBigtableTableAdminClient.parse_snapshot_path)
+ table_path = staticmethod(BaseBigtableTableAdminClient.table_path)
+ parse_table_path = staticmethod(BaseBigtableTableAdminClient.parse_table_path)
common_billing_account_path = staticmethod(
- BigtableTableAdminClient.common_billing_account_path
+ BaseBigtableTableAdminClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
- BigtableTableAdminClient.parse_common_billing_account_path
+ BaseBigtableTableAdminClient.parse_common_billing_account_path
)
- common_folder_path = staticmethod(BigtableTableAdminClient.common_folder_path)
+ common_folder_path = staticmethod(BaseBigtableTableAdminClient.common_folder_path)
parse_common_folder_path = staticmethod(
- BigtableTableAdminClient.parse_common_folder_path
+ BaseBigtableTableAdminClient.parse_common_folder_path
)
common_organization_path = staticmethod(
- BigtableTableAdminClient.common_organization_path
+ BaseBigtableTableAdminClient.common_organization_path
)
parse_common_organization_path = staticmethod(
- BigtableTableAdminClient.parse_common_organization_path
+ BaseBigtableTableAdminClient.parse_common_organization_path
)
- common_project_path = staticmethod(BigtableTableAdminClient.common_project_path)
+ common_project_path = staticmethod(BaseBigtableTableAdminClient.common_project_path)
parse_common_project_path = staticmethod(
- BigtableTableAdminClient.parse_common_project_path
+ BaseBigtableTableAdminClient.parse_common_project_path
+ )
+ common_location_path = staticmethod(
+ BaseBigtableTableAdminClient.common_location_path
)
- common_location_path = staticmethod(BigtableTableAdminClient.common_location_path)
parse_common_location_path = staticmethod(
- BigtableTableAdminClient.parse_common_location_path
+ BaseBigtableTableAdminClient.parse_common_location_path
)
@classmethod
@@ -143,9 +151,9 @@ def from_service_account_info(cls, info: dict, *args, **kwargs):
kwargs: Additional arguments to pass to the constructor.
Returns:
- BigtableTableAdminAsyncClient: The constructed client.
+ BaseBigtableTableAdminAsyncClient: The constructed client.
"""
- return BigtableTableAdminClient.from_service_account_info.__func__(BigtableTableAdminAsyncClient, info, *args, **kwargs) # type: ignore
+ return BaseBigtableTableAdminClient.from_service_account_info.__func__(BaseBigtableTableAdminAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
@@ -159,9 +167,9 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
kwargs: Additional arguments to pass to the constructor.
Returns:
- BigtableTableAdminAsyncClient: The constructed client.
+ BaseBigtableTableAdminAsyncClient: The constructed client.
"""
- return BigtableTableAdminClient.from_service_account_file.__func__(BigtableTableAdminAsyncClient, filename, *args, **kwargs) # type: ignore
+ return BaseBigtableTableAdminClient.from_service_account_file.__func__(BaseBigtableTableAdminAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@@ -199,7 +207,7 @@ def get_mtls_endpoint_and_cert_source(
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
- return BigtableTableAdminClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
+ return BaseBigtableTableAdminClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> BigtableTableAdminTransport:
@@ -229,7 +237,7 @@ def universe_domain(self) -> str:
"""
return self._client._universe_domain
- get_transport_class = BigtableTableAdminClient.get_transport_class
+ get_transport_class = BaseBigtableTableAdminClient.get_transport_class
def __init__(
self,
@@ -245,7 +253,7 @@ def __init__(
client_options: Optional[ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
- """Instantiates the bigtable table admin async client.
+ """Instantiates the base bigtable table admin async client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
@@ -294,7 +302,7 @@ def __init__(
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
- self._client = BigtableTableAdminClient(
+ self._client = BaseBigtableTableAdminClient(
credentials=credentials,
transport=transport,
client_options=client_options,
@@ -305,7 +313,7 @@ def __init__(
std_logging.DEBUG
): # pragma: NO COVER
_LOGGER.debug(
- "Created client `google.bigtable.admin_v2.BigtableTableAdminAsyncClient`.",
+ "Created client `google.bigtable.admin_v2.BaseBigtableTableAdminAsyncClient`.",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"universeDomain": getattr(
@@ -338,6 +346,33 @@ async def create_table(
The table can be created with a full set of initial
column families, specified in the request.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_create_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CreateTableRequest(
+ parent="parent_value",
+ table_id="table_id_value",
+ )
+
+ # Make the request
+ response = await client.create_table(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateTableRequest, dict]]):
The request object. Request message for
@@ -457,6 +492,38 @@ async def create_table_from_snapshot(
recommended for production use. It is not subject to any
SLA or deprecation policy.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_create_table_from_snapshot():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CreateTableFromSnapshotRequest(
+ parent="parent_value",
+ table_id="table_id_value",
+ source_snapshot="source_snapshot_value",
+ )
+
+ # Make the request
+ operation = client.create_table_from_snapshot(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]]):
The request object. Request message for
@@ -582,6 +649,33 @@ async def list_tables(
) -> pagers.ListTablesAsyncPager:
r"""Lists all tables served from a specified instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_list_tables():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListTablesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tables(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListTablesRequest, dict]]):
The request object. Request message for
@@ -682,6 +776,32 @@ async def get_table(
) -> table.Table:
r"""Gets metadata information about the specified table.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_get_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetTableRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_table(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetTableRequest, dict]]):
The request object. Request message for
@@ -771,6 +891,35 @@ async def update_table(
) -> operation_async.AsyncOperation:
r"""Updates a specified table.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_update_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.UpdateTableRequest(
+ )
+
+ # Make the request
+ operation = client.update_table(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateTableRequest, dict]]):
The request object. The request for
@@ -787,14 +936,14 @@ async def update_table(
specifying which fields (e.g. ``change_stream_config``)
in the ``table`` field should be updated. This mask is
relative to the ``table`` field, not to the request
- message. The wildcard (*) path is currently not
+ message. The wildcard (\*) path is currently not
supported. Currently UpdateTable is only supported for
the following fields:
- - ``change_stream_config``
- - ``change_stream_config.retention_period``
- - ``deletion_protection``
- - ``row_key_schema``
+ - ``change_stream_config``
+ - ``change_stream_config.retention_period``
+ - ``deletion_protection``
+ - ``row_key_schema``
If ``column_families`` is set in ``update_mask``, it
will return an UNIMPLEMENTED error.
@@ -892,6 +1041,29 @@ async def delete_table(
r"""Permanently deletes a specified table and all of its
data.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_delete_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteTableRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_table(request=request)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteTableRequest, dict]]):
The request object. Request message for
@@ -972,6 +1144,36 @@ async def undelete_table(
r"""Restores a specified table which was accidentally
deleted.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_undelete_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.UndeleteTableRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.undelete_table(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.UndeleteTableRequest, dict]]):
The request object. Request message for
@@ -1073,6 +1275,37 @@ async def create_authorized_view(
) -> operation_async.AsyncOperation:
r"""Creates a new AuthorizedView in a table.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_create_authorized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CreateAuthorizedViewRequest(
+ parent="parent_value",
+ authorized_view_id="authorized_view_id_value",
+ )
+
+ # Make the request
+ operation = client.create_authorized_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest, dict]]):
The request object. The request for
@@ -1194,6 +1427,33 @@ async def list_authorized_views(
) -> pagers.ListAuthorizedViewsAsyncPager:
r"""Lists all AuthorizedViews from a specific table.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_list_authorized_views():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListAuthorizedViewsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_authorized_views(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest, dict]]):
The request object. Request message for
@@ -1296,6 +1556,32 @@ async def get_authorized_view(
) -> table.AuthorizedView:
r"""Gets information from a specified AuthorizedView.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_get_authorized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetAuthorizedViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_authorized_view(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetAuthorizedViewRequest, dict]]):
The request object. Request message for
@@ -1389,6 +1675,35 @@ async def update_authorized_view(
) -> operation_async.AsyncOperation:
r"""Updates an AuthorizedView in a table.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_update_authorized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.UpdateAuthorizedViewRequest(
+ )
+
+ # Make the request
+ operation = client.update_authorized_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateAuthorizedViewRequest, dict]]):
The request object. The request for
@@ -1396,8 +1711,8 @@ async def update_authorized_view(
authorized_view (:class:`google.cloud.bigtable_admin_v2.types.AuthorizedView`):
Required. The AuthorizedView to update. The ``name`` in
``authorized_view`` is used to identify the
- AuthorizedView. AuthorizedView name must in this format
- projects//instances//tables//authorizedViews/
+ AuthorizedView. AuthorizedView name must in this format:
+ ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``.
This corresponds to the ``authorized_view`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -1507,6 +1822,29 @@ async def delete_authorized_view(
) -> None:
r"""Permanently deletes a specified AuthorizedView.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_delete_authorized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteAuthorizedViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_authorized_view(request=request)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteAuthorizedViewRequest, dict]]):
The request object. Request message for
@@ -1595,6 +1933,32 @@ async def modify_column_families(
data requests received prior to that point may see a
table where only some modifications have taken effect.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_modify_column_families():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ModifyColumnFamiliesRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.modify_column_families(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest, dict]]):
The request object. Request message for
@@ -1699,6 +2063,30 @@ async def drop_row_range(
rows in a table, or only those that match a particular
prefix.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_drop_row_range():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DropRowRangeRequest(
+ row_key_prefix=b'row_key_prefix_blob',
+ name="name_value",
+ )
+
+ # Make the request
+ await client.drop_row_range(request=request)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]]):
The request object. Request message for
@@ -1757,6 +2145,32 @@ async def generate_consistency_token(
been replicated. The tokens will be available for 90
days.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_generate_consistency_token():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GenerateConsistencyTokenRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.generate_consistency_token(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest, dict]]):
The request object. Request message for
@@ -1851,6 +2265,33 @@ async def check_consistency(
the conditions specified in the token and the check
request.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_check_consistency():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CheckConsistencyRequest(
+ name="name_value",
+ consistency_token="consistency_token_value",
+ )
+
+ # Make the request
+ response = await client.check_consistency(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest, dict]]):
The request object. Request message for
@@ -1960,6 +2401,38 @@ async def snapshot_table(
recommended for production use. It is not subject to any
SLA or deprecation policy.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_snapshot_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.SnapshotTableRequest(
+ name="name_value",
+ cluster="cluster_value",
+ snapshot_id="snapshot_id_value",
+ )
+
+ # Make the request
+ operation = client.snapshot_table(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]]):
The request object. Request message for
@@ -2107,6 +2580,32 @@ async def get_snapshot(
recommended for production use. It is not subject to any
SLA or deprecation policy.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_get_snapshot():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetSnapshotRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_snapshot(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]]):
The request object. Request message for
@@ -2220,6 +2719,33 @@ async def list_snapshots(
recommended for production use. It is not subject to any
SLA or deprecation policy.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_list_snapshots():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListSnapshotsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_snapshots(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]]):
The request object. Request message for
@@ -2346,6 +2872,29 @@ async def delete_snapshot(
recommended for production use. It is not subject to any
SLA or deprecation policy.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_delete_snapshot():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteSnapshotRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_snapshot(request=request)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]]):
The request object. Request message for
@@ -2440,6 +2989,41 @@ async def create_backup(
Cancelling the returned operation will stop the creation and
delete the backup.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_create_backup():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ backup = bigtable_admin_v2.Backup()
+ backup.source_table = "source_table_value"
+
+ request = bigtable_admin_v2.CreateBackupRequest(
+ parent="parent_value",
+ backup_id="backup_id_value",
+ backup=backup,
+ )
+
+ # Make the request
+ operation = client.create_backup(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateBackupRequest, dict]]):
The request object. The request for
@@ -2460,7 +3044,7 @@ async def create_backup(
full backup name, of the form:
``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``.
This string must be between 1 and 50 characters in
- length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*.
+ length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*.
This corresponds to the ``backup_id`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -2560,6 +3144,32 @@ async def get_backup(
r"""Gets metadata on a pending or completed Cloud
Bigtable Backup.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_get_backup():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetBackupRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_backup(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetBackupRequest, dict]]):
The request object. The request for
@@ -2644,6 +3254,35 @@ async def update_backup(
) -> table.Backup:
r"""Updates a pending or completed Cloud Bigtable Backup.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_update_backup():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ backup = bigtable_admin_v2.Backup()
+ backup.source_table = "source_table_value"
+
+ request = bigtable_admin_v2.UpdateBackupRequest(
+ backup=backup,
+ )
+
+ # Make the request
+ response = await client.update_backup(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateBackupRequest, dict]]):
The request object. The request for
@@ -2654,7 +3293,7 @@ async def update_backup(
required. Other fields are ignored. Update is only
supported for the following fields:
- - ``backup.expire_time``.
+ - ``backup.expire_time``.
This corresponds to the ``backup`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -2747,6 +3386,29 @@ async def delete_backup(
) -> None:
r"""Deletes a pending or completed Cloud Bigtable backup.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_delete_backup():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteBackupRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_backup(request=request)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteBackupRequest, dict]]):
The request object. The request for
@@ -2825,6 +3487,33 @@ async def list_backups(
r"""Lists Cloud Bigtable backups. Returns both completed
and pending backups.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_list_backups():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListBackupsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_backups(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListBackupsRequest, dict]]):
The request object. The request for
@@ -2917,7 +3606,7 @@ async def list_backups(
# Done; return the response.
return response
- async def restore_table(
+ async def _restore_table(
self,
request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None,
*,
@@ -2934,6 +3623,38 @@ async def restore_table(
The [response][google.longrunning.Operation.response] type is
[Table][google.bigtable.admin.v2.Table], if successful.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_restore_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.RestoreTableRequest(
+ backup="backup_value",
+ parent="parent_value",
+ table_id="table_id_value",
+ )
+
+ # Make the request
+ operation = client._restore_table(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]]):
The request object. The request for
@@ -3011,6 +3732,38 @@ async def copy_backup(
destination cluster located in the destination instance
and project.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_copy_backup():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CopyBackupRequest(
+ parent="parent_value",
+ backup_id="backup_id_value",
+ source_backup="source_backup_value",
+ )
+
+ # Make the request
+ operation = client.copy_backup(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]]):
The request object. The request for
@@ -3031,7 +3784,7 @@ async def copy_backup(
name, of the form:
``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``.
This string must be between 1 and 50 characters in
- length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*.
+ length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*.
This corresponds to the ``backup_id`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -3148,10 +3901,37 @@ async def get_iam_policy(
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> policy_pb2.Policy:
- r"""Gets the access control policy for a Table or Backup
+ r"""Gets the access control policy for a Bigtable
resource. Returns an empty policy if the resource exists
but does not have a policy set.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ async def sample_get_iam_policy():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.GetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = await client.get_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]):
The request object. Request message for ``GetIamPolicy`` method.
@@ -3191,19 +3971,19 @@ async def get_iam_policy(
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
- documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
- :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
- documentation](\ https://cloud.google.com/iam/docs/).
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
@@ -3261,9 +4041,36 @@ async def set_iam_policy(
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> policy_pb2.Policy:
- r"""Sets the access control policy on a Table or Backup
+ r"""Sets the access control policy on a Bigtable
resource. Replaces any existing policy.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ async def sample_set_iam_policy():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.SetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = await client.set_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]):
The request object. Request message for ``SetIamPolicy`` method.
@@ -3303,19 +4110,19 @@ async def set_iam_policy(
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
- documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
- :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
- documentation](\ https://cloud.google.com/iam/docs/).
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
@@ -3375,7 +4182,35 @@ async def test_iam_permissions(
metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
r"""Returns permissions that the caller has on the
- specified Table or Backup resource.
+ specified Bigtable resource.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ async def sample_test_iam_permissions():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.TestIamPermissionsRequest(
+ resource="resource_value",
+ permissions=['permissions_value1', 'permissions_value2'],
+ )
+
+ # Make the request
+ response = await client.test_iam_permissions(request=request)
+
+ # Handle the response
+ print(response)
Args:
request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]):
@@ -3458,7 +4293,656 @@ async def test_iam_permissions(
# Done; return the response.
return response
- async def __aenter__(self) -> "BigtableTableAdminAsyncClient":
+ async def create_schema_bundle(
+ self,
+ request: Optional[
+ Union[bigtable_table_admin.CreateSchemaBundleRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ schema_bundle_id: Optional[str] = None,
+ schema_bundle: Optional[table.SchemaBundle] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Creates a new schema bundle in the specified table.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_create_schema_bundle():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ schema_bundle = bigtable_admin_v2.SchemaBundle()
+ schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob'
+
+ request = bigtable_admin_v2.CreateSchemaBundleRequest(
+ parent="parent_value",
+ schema_bundle_id="schema_bundle_id_value",
+ schema_bundle=schema_bundle,
+ )
+
+ # Make the request
+ operation = client.create_schema_bundle(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateSchemaBundleRequest, dict]]):
+ The request object. The request for
+ [CreateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle].
+ parent (:class:`str`):
+ Required. The parent resource where this schema bundle
+ will be created. Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ schema_bundle_id (:class:`str`):
+ Required. The unique ID to use for
+ the schema bundle, which will become the
+ final component of the schema bundle's
+ resource name.
+
+ This corresponds to the ``schema_bundle_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ schema_bundle (:class:`google.cloud.bigtable_admin_v2.types.SchemaBundle`):
+ Required. The schema bundle to
+ create.
+
+ This corresponds to the ``schema_bundle`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.bigtable_admin_v2.types.SchemaBundle`
+ A named collection of related schemas.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, schema_bundle_id, schema_bundle]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_table_admin.CreateSchemaBundleRequest):
+ request = bigtable_table_admin.CreateSchemaBundleRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+ if schema_bundle_id is not None:
+ request.schema_bundle_id = schema_bundle_id
+ if schema_bundle is not None:
+ request.schema_bundle = schema_bundle
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.create_schema_bundle
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ table.SchemaBundle,
+ metadata_type=bigtable_table_admin.CreateSchemaBundleMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def update_schema_bundle(
+ self,
+ request: Optional[
+ Union[bigtable_table_admin.UpdateSchemaBundleRequest, dict]
+ ] = None,
+ *,
+ schema_bundle: Optional[table.SchemaBundle] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Updates a schema bundle in the specified table.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_update_schema_bundle():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ schema_bundle = bigtable_admin_v2.SchemaBundle()
+ schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob'
+
+ request = bigtable_admin_v2.UpdateSchemaBundleRequest(
+ schema_bundle=schema_bundle,
+ )
+
+ # Make the request
+ operation = client.update_schema_bundle(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateSchemaBundleRequest, dict]]):
+ The request object. The request for
+ [UpdateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle].
+ schema_bundle (:class:`google.cloud.bigtable_admin_v2.types.SchemaBundle`):
+ Required. The schema bundle to update.
+
+ The schema bundle's ``name`` field is used to identify
+ the schema bundle to update. Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}``
+
+ This corresponds to the ``schema_bundle`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
+ Optional. The list of fields to
+ update.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.bigtable_admin_v2.types.SchemaBundle`
+ A named collection of related schemas.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [schema_bundle, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_table_admin.UpdateSchemaBundleRequest):
+ request = bigtable_table_admin.UpdateSchemaBundleRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if schema_bundle is not None:
+ request.schema_bundle = schema_bundle
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.update_schema_bundle
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("schema_bundle.name", request.schema_bundle.name),)
+ ),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ table.SchemaBundle,
+ metadata_type=bigtable_table_admin.UpdateSchemaBundleMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_schema_bundle(
+ self,
+ request: Optional[
+ Union[bigtable_table_admin.GetSchemaBundleRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> table.SchemaBundle:
+ r"""Gets metadata information about the specified schema
+ bundle.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_get_schema_bundle():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetSchemaBundleRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_schema_bundle(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetSchemaBundleRequest, dict]]):
+ The request object. The request for
+ [GetSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle].
+ name (:class:`str`):
+ Required. The unique name of the schema bundle to
+ retrieve. Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.bigtable_admin_v2.types.SchemaBundle:
+ A named collection of related
+ schemas.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_table_admin.GetSchemaBundleRequest):
+ request = bigtable_table_admin.GetSchemaBundleRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.get_schema_bundle
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def list_schema_bundles(
+ self,
+ request: Optional[
+ Union[bigtable_table_admin.ListSchemaBundlesRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> pagers.ListSchemaBundlesAsyncPager:
+ r"""Lists all schema bundles associated with the
+ specified table.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_list_schema_bundles():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListSchemaBundlesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_schema_bundles(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest, dict]]):
+ The request object. The request for
+ [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles].
+ parent (:class:`str`):
+ Required. The parent, which owns this collection of
+ schema bundles. Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSchemaBundlesAsyncPager:
+ The response for
+ [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles].
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_table_admin.ListSchemaBundlesRequest):
+ request = bigtable_table_admin.ListSchemaBundlesRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.list_schema_bundles
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListSchemaBundlesAsyncPager(
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_schema_bundle(
+ self,
+ request: Optional[
+ Union[bigtable_table_admin.DeleteSchemaBundleRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Deletes a schema bundle in the specified table.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ async def sample_delete_schema_bundle():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteSchemaBundleRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_schema_bundle(request=request)
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteSchemaBundleRequest, dict]]):
+ The request object. The request for
+ [DeleteSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle].
+ name (:class:`str`):
+ Required. The unique name of the schema bundle to
+ delete. Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_table_admin.DeleteSchemaBundleRequest):
+ request = bigtable_table_admin.DeleteSchemaBundleRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._client._transport._wrapped_methods[
+ self._client._transport.delete_schema_bundle
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._client._validate_universe_domain()
+
+ # Send the request.
+ await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ async def __aenter__(self) -> "BaseBigtableTableAdminAsyncClient":
return self
async def __aexit__(self, exc_type, exc, tb):
@@ -3473,4 +4957,4 @@ async def __aexit__(self, exc_type, exc, tb):
DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__
-__all__ = ("BigtableTableAdminAsyncClient",)
+__all__ = ("BaseBigtableTableAdminAsyncClient",)
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py
index abb82b1ed..ce251db7d 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py
@@ -78,7 +78,7 @@
from .transports.rest import BigtableTableAdminRestTransport
-class BigtableTableAdminClientMeta(type):
+class BaseBigtableTableAdminClientMeta(type):
"""Metaclass for the BigtableTableAdmin client.
This provides class-level methods for building and retrieving
@@ -115,7 +115,7 @@ def get_transport_class(
return next(iter(cls._transport_registry.values()))
-class BigtableTableAdminClient(metaclass=BigtableTableAdminClientMeta):
+class BaseBigtableTableAdminClient(metaclass=BaseBigtableTableAdminClientMeta):
"""Service for creating, configuring, and deleting Cloud
Bigtable tables.
@@ -162,6 +162,34 @@ def _get_default_mtls_endpoint(api_endpoint):
_DEFAULT_ENDPOINT_TEMPLATE = "bigtableadmin.{UNIVERSE_DOMAIN}"
_DEFAULT_UNIVERSE = "googleapis.com"
+ @staticmethod
+ def _use_client_cert_effective():
+ """Returns whether client certificate should be used for mTLS if the
+ google-auth version supports should_use_client_cert automatic mTLS enablement.
+
+ Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var.
+
+ Returns:
+ bool: whether client certificate should be used for mTLS
+ Raises:
+ ValueError: (If using a version of google-auth without should_use_client_cert and
+ GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.)
+ """
+ # check if google-auth version supports should_use_client_cert for automatic mTLS enablement
+ if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER
+ return mtls.should_use_client_cert()
+ else: # pragma: NO COVER
+ # if unsupported, fallback to reading from env var
+ use_client_cert_str = os.getenv(
+ "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
+ ).lower()
+ if use_client_cert_str not in ("true", "false"):
+ raise ValueError(
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be"
+ " either `true` or `false`"
+ )
+ return use_client_cert_str == "true"
+
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
@@ -173,7 +201,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs):
kwargs: Additional arguments to pass to the constructor.
Returns:
- BigtableTableAdminClient: The constructed client.
+ BaseBigtableTableAdminClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
@@ -191,7 +219,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
kwargs: Additional arguments to pass to the constructor.
Returns:
- BigtableTableAdminClient: The constructed client.
+ BaseBigtableTableAdminClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
@@ -322,6 +350,30 @@ def parse_instance_path(path: str) -> Dict[str, str]:
m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path)
return m.groupdict() if m else {}
+ @staticmethod
+ def schema_bundle_path(
+ project: str,
+ instance: str,
+ table: str,
+ schema_bundle: str,
+ ) -> str:
+ """Returns a fully-qualified schema_bundle string."""
+ return "projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}".format(
+ project=project,
+ instance=instance,
+ table=table,
+ schema_bundle=schema_bundle,
+ )
+
+ @staticmethod
+ def parse_schema_bundle_path(path: str) -> Dict[str, str]:
+ """Parses a schema_bundle path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P.+?)/schemaBundles/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
@staticmethod
def snapshot_path(
project: str,
@@ -486,12 +538,8 @@ def get_mtls_endpoint_and_cert_source(
)
if client_options is None:
client_options = client_options_lib.ClientOptions()
- use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
+ use_client_cert = BaseBigtableTableAdminClient._use_client_cert_effective()
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
- if use_client_cert not in ("true", "false"):
- raise ValueError(
- "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
@@ -499,7 +547,7 @@ def get_mtls_endpoint_and_cert_source(
# Figure out the client cert source to use.
client_cert_source = None
- if use_client_cert == "true":
+ if use_client_cert:
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
@@ -531,20 +579,14 @@ def _read_environment_variables():
google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT
is not any of ["auto", "never", "always"].
"""
- use_client_cert = os.getenv(
- "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
- ).lower()
+ use_client_cert = BaseBigtableTableAdminClient._use_client_cert_effective()
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower()
universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN")
- if use_client_cert not in ("true", "false"):
- raise ValueError(
- "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
- return use_client_cert == "true", use_mtls_endpoint, universe_domain_env
+ return use_client_cert, use_mtls_endpoint, universe_domain_env
@staticmethod
def _get_client_cert_source(provided_cert_source, use_cert_flag):
@@ -587,15 +629,17 @@ def _get_api_endpoint(
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
- _default_universe = BigtableTableAdminClient._DEFAULT_UNIVERSE
+ _default_universe = BaseBigtableTableAdminClient._DEFAULT_UNIVERSE
if universe_domain != _default_universe:
raise MutualTLSChannelError(
f"mTLS is not supported in any universe other than {_default_universe}."
)
- api_endpoint = BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT
+ api_endpoint = BaseBigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT
else:
- api_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(
- UNIVERSE_DOMAIN=universe_domain
+ api_endpoint = (
+ BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ UNIVERSE_DOMAIN=universe_domain
+ )
)
return api_endpoint
@@ -615,7 +659,7 @@ def _get_universe_domain(
Raises:
ValueError: If the universe domain is an empty string.
"""
- universe_domain = BigtableTableAdminClient._DEFAULT_UNIVERSE
+ universe_domain = BaseBigtableTableAdminClient._DEFAULT_UNIVERSE
if client_universe_domain is not None:
universe_domain = client_universe_domain
elif universe_domain_env is not None:
@@ -696,7 +740,7 @@ def __init__(
client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
- """Instantiates the bigtable table admin client.
+ """Instantiates the base bigtable table admin client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
@@ -760,11 +804,11 @@ def __init__(
self._use_client_cert,
self._use_mtls_endpoint,
self._universe_domain_env,
- ) = BigtableTableAdminClient._read_environment_variables()
- self._client_cert_source = BigtableTableAdminClient._get_client_cert_source(
+ ) = BaseBigtableTableAdminClient._read_environment_variables()
+ self._client_cert_source = BaseBigtableTableAdminClient._get_client_cert_source(
self._client_options.client_cert_source, self._use_client_cert
)
- self._universe_domain = BigtableTableAdminClient._get_universe_domain(
+ self._universe_domain = BaseBigtableTableAdminClient._get_universe_domain(
universe_domain_opt, self._universe_domain_env
)
self._api_endpoint = None # updated below, depending on `transport`
@@ -803,7 +847,7 @@ def __init__(
self._api_endpoint = (
self._api_endpoint
- or BigtableTableAdminClient._get_api_endpoint(
+ or BaseBigtableTableAdminClient._get_api_endpoint(
self._client_options.api_endpoint,
self._client_cert_source,
self._universe_domain,
@@ -825,7 +869,7 @@ def __init__(
Type[BigtableTableAdminTransport],
Callable[..., BigtableTableAdminTransport],
] = (
- BigtableTableAdminClient.get_transport_class(transport)
+ BaseBigtableTableAdminClient.get_transport_class(transport)
if isinstance(transport, str) or transport is None
else cast(Callable[..., BigtableTableAdminTransport], transport)
)
@@ -847,7 +891,7 @@ def __init__(
std_logging.DEBUG
): # pragma: NO COVER
_LOGGER.debug(
- "Created client `google.bigtable.admin_v2.BigtableTableAdminClient`.",
+ "Created client `google.bigtable.admin_v2.BaseBigtableTableAdminClient`.",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"universeDomain": getattr(
@@ -880,6 +924,33 @@ def create_table(
The table can be created with a full set of initial
column families, specified in the request.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_create_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CreateTableRequest(
+ parent="parent_value",
+ table_id="table_id_value",
+ )
+
+ # Make the request
+ response = client.create_table(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.CreateTableRequest, dict]):
The request object. Request message for
@@ -996,6 +1067,38 @@ def create_table_from_snapshot(
recommended for production use. It is not subject to any
SLA or deprecation policy.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_create_table_from_snapshot():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CreateTableFromSnapshotRequest(
+ parent="parent_value",
+ table_id="table_id_value",
+ source_snapshot="source_snapshot_value",
+ )
+
+ # Make the request
+ operation = client.create_table_from_snapshot(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]):
The request object. Request message for
@@ -1120,6 +1223,33 @@ def list_tables(
) -> pagers.ListTablesPager:
r"""Lists all tables served from a specified instance.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_list_tables():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListTablesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tables(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.ListTablesRequest, dict]):
The request object. Request message for
@@ -1217,6 +1347,32 @@ def get_table(
) -> table.Table:
r"""Gets metadata information about the specified table.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_get_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetTableRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_table(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.GetTableRequest, dict]):
The request object. Request message for
@@ -1303,6 +1459,35 @@ def update_table(
) -> operation.Operation:
r"""Updates a specified table.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_update_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.UpdateTableRequest(
+ )
+
+ # Make the request
+ operation = client.update_table(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.UpdateTableRequest, dict]):
The request object. The request for
@@ -1319,14 +1504,14 @@ def update_table(
specifying which fields (e.g. ``change_stream_config``)
in the ``table`` field should be updated. This mask is
relative to the ``table`` field, not to the request
- message. The wildcard (*) path is currently not
+ message. The wildcard (\*) path is currently not
supported. Currently UpdateTable is only supported for
the following fields:
- - ``change_stream_config``
- - ``change_stream_config.retention_period``
- - ``deletion_protection``
- - ``row_key_schema``
+ - ``change_stream_config``
+ - ``change_stream_config.retention_period``
+ - ``deletion_protection``
+ - ``row_key_schema``
If ``column_families`` is set in ``update_mask``, it
will return an UNIMPLEMENTED error.
@@ -1421,6 +1606,29 @@ def delete_table(
r"""Permanently deletes a specified table and all of its
data.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_delete_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteTableRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_table(request=request)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.DeleteTableRequest, dict]):
The request object. Request message for
@@ -1498,6 +1706,36 @@ def undelete_table(
r"""Restores a specified table which was accidentally
deleted.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_undelete_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.UndeleteTableRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.undelete_table(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.UndeleteTableRequest, dict]):
The request object. Request message for
@@ -1596,6 +1834,37 @@ def create_authorized_view(
) -> operation.Operation:
r"""Creates a new AuthorizedView in a table.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_create_authorized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CreateAuthorizedViewRequest(
+ parent="parent_value",
+ authorized_view_id="authorized_view_id_value",
+ )
+
+ # Make the request
+ operation = client.create_authorized_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest, dict]):
The request object. The request for
@@ -1714,6 +1983,33 @@ def list_authorized_views(
) -> pagers.ListAuthorizedViewsPager:
r"""Lists all AuthorizedViews from a specific table.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_list_authorized_views():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListAuthorizedViewsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_authorized_views(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest, dict]):
The request object. Request message for
@@ -1813,6 +2109,32 @@ def get_authorized_view(
) -> table.AuthorizedView:
r"""Gets information from a specified AuthorizedView.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_get_authorized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetAuthorizedViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_authorized_view(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.GetAuthorizedViewRequest, dict]):
The request object. Request message for
@@ -1903,6 +2225,35 @@ def update_authorized_view(
) -> operation.Operation:
r"""Updates an AuthorizedView in a table.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_update_authorized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.UpdateAuthorizedViewRequest(
+ )
+
+ # Make the request
+ operation = client.update_authorized_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.UpdateAuthorizedViewRequest, dict]):
The request object. The request for
@@ -1910,8 +2261,8 @@ def update_authorized_view(
authorized_view (google.cloud.bigtable_admin_v2.types.AuthorizedView):
Required. The AuthorizedView to update. The ``name`` in
``authorized_view`` is used to identify the
- AuthorizedView. AuthorizedView name must in this format
- projects//instances//tables//authorizedViews/
+ AuthorizedView. AuthorizedView name must in this format:
+ ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``.
This corresponds to the ``authorized_view`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -2018,6 +2369,29 @@ def delete_authorized_view(
) -> None:
r"""Permanently deletes a specified AuthorizedView.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_delete_authorized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteAuthorizedViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_authorized_view(request=request)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.DeleteAuthorizedViewRequest, dict]):
The request object. Request message for
@@ -2103,6 +2477,32 @@ def modify_column_families(
data requests received prior to that point may see a
table where only some modifications have taken effect.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_modify_column_families():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ModifyColumnFamiliesRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.modify_column_families(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest, dict]):
The request object. Request message for
@@ -2204,6 +2604,30 @@ def drop_row_range(
rows in a table, or only those that match a particular
prefix.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_drop_row_range():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DropRowRangeRequest(
+ row_key_prefix=b'row_key_prefix_blob',
+ name="name_value",
+ )
+
+ # Make the request
+ client.drop_row_range(request=request)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]):
The request object. Request message for
@@ -2260,6 +2684,32 @@ def generate_consistency_token(
been replicated. The tokens will be available for 90
days.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_generate_consistency_token():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GenerateConsistencyTokenRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.generate_consistency_token(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest, dict]):
The request object. Request message for
@@ -2353,6 +2803,33 @@ def check_consistency(
the conditions specified in the token and the check
request.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_check_consistency():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CheckConsistencyRequest(
+ name="name_value",
+ consistency_token="consistency_token_value",
+ )
+
+ # Make the request
+ response = client.check_consistency(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest, dict]):
The request object. Request message for
@@ -2459,6 +2936,38 @@ def snapshot_table(
recommended for production use. It is not subject to any
SLA or deprecation policy.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_snapshot_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.SnapshotTableRequest(
+ name="name_value",
+ cluster="cluster_value",
+ snapshot_id="snapshot_id_value",
+ )
+
+ # Make the request
+ operation = client.snapshot_table(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]):
The request object. Request message for
@@ -2603,6 +3112,32 @@ def get_snapshot(
recommended for production use. It is not subject to any
SLA or deprecation policy.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_get_snapshot():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetSnapshotRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_snapshot(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]):
The request object. Request message for
@@ -2713,6 +3248,33 @@ def list_snapshots(
recommended for production use. It is not subject to any
SLA or deprecation policy.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_list_snapshots():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListSnapshotsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_snapshots(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]):
The request object. Request message for
@@ -2836,6 +3398,29 @@ def delete_snapshot(
recommended for production use. It is not subject to any
SLA or deprecation policy.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_delete_snapshot():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteSnapshotRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_snapshot(request=request)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]):
The request object. Request message for
@@ -2927,6 +3512,41 @@ def create_backup(
Cancelling the returned operation will stop the creation and
delete the backup.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_create_backup():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ backup = bigtable_admin_v2.Backup()
+ backup.source_table = "source_table_value"
+
+ request = bigtable_admin_v2.CreateBackupRequest(
+ parent="parent_value",
+ backup_id="backup_id_value",
+ backup=backup,
+ )
+
+ # Make the request
+ operation = client.create_backup(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.CreateBackupRequest, dict]):
The request object. The request for
@@ -2947,7 +3567,7 @@ def create_backup(
full backup name, of the form:
``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``.
This string must be between 1 and 50 characters in
- length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*.
+ length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*.
This corresponds to the ``backup_id`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -3044,6 +3664,32 @@ def get_backup(
r"""Gets metadata on a pending or completed Cloud
Bigtable Backup.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_get_backup():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetBackupRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_backup(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.GetBackupRequest, dict]):
The request object. The request for
@@ -3125,6 +3771,35 @@ def update_backup(
) -> table.Backup:
r"""Updates a pending or completed Cloud Bigtable Backup.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_update_backup():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ backup = bigtable_admin_v2.Backup()
+ backup.source_table = "source_table_value"
+
+ request = bigtable_admin_v2.UpdateBackupRequest(
+ backup=backup,
+ )
+
+ # Make the request
+ response = client.update_backup(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.UpdateBackupRequest, dict]):
The request object. The request for
@@ -3135,7 +3810,7 @@ def update_backup(
required. Other fields are ignored. Update is only
supported for the following fields:
- - ``backup.expire_time``.
+ - ``backup.expire_time``.
This corresponds to the ``backup`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -3225,6 +3900,29 @@ def delete_backup(
) -> None:
r"""Deletes a pending or completed Cloud Bigtable backup.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_delete_backup():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteBackupRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_backup(request=request)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.DeleteBackupRequest, dict]):
The request object. The request for
@@ -3300,6 +3998,33 @@ def list_backups(
r"""Lists Cloud Bigtable backups. Returns both completed
and pending backups.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_list_backups():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListBackupsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_backups(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.ListBackupsRequest, dict]):
The request object. The request for
@@ -3389,7 +4114,7 @@ def list_backups(
# Done; return the response.
return response
- def restore_table(
+ def _restore_table(
self,
request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None,
*,
@@ -3406,6 +4131,38 @@ def restore_table(
The [response][google.longrunning.Operation.response] type is
[Table][google.bigtable.admin.v2.Table], if successful.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_restore_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.RestoreTableRequest(
+ backup="backup_value",
+ parent="parent_value",
+ table_id="table_id_value",
+ )
+
+ # Make the request
+ operation = client._restore_table(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]):
The request object. The request for
@@ -3481,6 +4238,38 @@ def copy_backup(
destination cluster located in the destination instance
and project.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_copy_backup():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CopyBackupRequest(
+ parent="parent_value",
+ backup_id="backup_id_value",
+ source_backup="source_backup_value",
+ )
+
+ # Make the request
+ operation = client.copy_backup(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]):
The request object. The request for
@@ -3501,7 +4290,7 @@ def copy_backup(
name, of the form:
``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``.
This string must be between 1 and 50 characters in
- length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*.
+ length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*.
This corresponds to the ``backup_id`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -3615,10 +4404,37 @@ def get_iam_policy(
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> policy_pb2.Policy:
- r"""Gets the access control policy for a Table or Backup
+ r"""Gets the access control policy for a Bigtable
resource. Returns an empty policy if the resource exists
but does not have a policy set.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ def sample_get_iam_policy():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.GetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = client.get_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]):
The request object. Request message for ``GetIamPolicy`` method.
@@ -3658,19 +4474,19 @@ def get_iam_policy(
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
- documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
- :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
- documentation](\ https://cloud.google.com/iam/docs/).
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
@@ -3729,9 +4545,36 @@ def set_iam_policy(
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> policy_pb2.Policy:
- r"""Sets the access control policy on a Table or Backup
+ r"""Sets the access control policy on a Bigtable
resource. Replaces any existing policy.
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ def sample_set_iam_policy():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.SetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = client.set_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]):
The request object. Request message for ``SetIamPolicy`` method.
@@ -3771,19 +4614,19 @@ def set_iam_policy(
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
- documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
+ documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
- :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
+ :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
+ :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
- documentation](\ https://cloud.google.com/iam/docs/).
+ documentation](https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
@@ -3844,7 +4687,35 @@ def test_iam_permissions(
metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
r"""Returns permissions that the caller has on the
- specified Table or Backup resource.
+ specified Bigtable resource.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+ from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+ def sample_test_iam_permissions():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.TestIamPermissionsRequest(
+ resource="resource_value",
+ permissions=['permissions_value1', 'permissions_value2'],
+ )
+
+ # Make the request
+ response = client.test_iam_permissions(request=request)
+
+ # Handle the response
+ print(response)
Args:
request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]):
@@ -3928,7 +4799,641 @@ def test_iam_permissions(
# Done; return the response.
return response
- def __enter__(self) -> "BigtableTableAdminClient":
+ def create_schema_bundle(
+ self,
+ request: Optional[
+ Union[bigtable_table_admin.CreateSchemaBundleRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ schema_bundle_id: Optional[str] = None,
+ schema_bundle: Optional[table.SchemaBundle] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation.Operation:
+ r"""Creates a new schema bundle in the specified table.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_create_schema_bundle():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ schema_bundle = bigtable_admin_v2.SchemaBundle()
+ schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob'
+
+ request = bigtable_admin_v2.CreateSchemaBundleRequest(
+ parent="parent_value",
+ schema_bundle_id="schema_bundle_id_value",
+ schema_bundle=schema_bundle,
+ )
+
+ # Make the request
+ operation = client.create_schema_bundle(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.bigtable_admin_v2.types.CreateSchemaBundleRequest, dict]):
+ The request object. The request for
+ [CreateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle].
+ parent (str):
+ Required. The parent resource where this schema bundle
+ will be created. Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ schema_bundle_id (str):
+ Required. The unique ID to use for
+ the schema bundle, which will become the
+ final component of the schema bundle's
+ resource name.
+
+ This corresponds to the ``schema_bundle_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ schema_bundle (google.cloud.bigtable_admin_v2.types.SchemaBundle):
+ Required. The schema bundle to
+ create.
+
+ This corresponds to the ``schema_bundle`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.bigtable_admin_v2.types.SchemaBundle`
+ A named collection of related schemas.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent, schema_bundle_id, schema_bundle]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_table_admin.CreateSchemaBundleRequest):
+ request = bigtable_table_admin.CreateSchemaBundleRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+ if schema_bundle_id is not None:
+ request.schema_bundle_id = schema_bundle_id
+ if schema_bundle is not None:
+ request.schema_bundle = schema_bundle
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.create_schema_bundle]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ table.SchemaBundle,
+ metadata_type=bigtable_table_admin.CreateSchemaBundleMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def update_schema_bundle(
+ self,
+ request: Optional[
+ Union[bigtable_table_admin.UpdateSchemaBundleRequest, dict]
+ ] = None,
+ *,
+ schema_bundle: Optional[table.SchemaBundle] = None,
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operation.Operation:
+ r"""Updates a schema bundle in the specified table.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_update_schema_bundle():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ schema_bundle = bigtable_admin_v2.SchemaBundle()
+ schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob'
+
+ request = bigtable_admin_v2.UpdateSchemaBundleRequest(
+ schema_bundle=schema_bundle,
+ )
+
+ # Make the request
+ operation = client.update_schema_bundle(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.bigtable_admin_v2.types.UpdateSchemaBundleRequest, dict]):
+ The request object. The request for
+ [UpdateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle].
+ schema_bundle (google.cloud.bigtable_admin_v2.types.SchemaBundle):
+ Required. The schema bundle to update.
+
+ The schema bundle's ``name`` field is used to identify
+ the schema bundle to update. Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}``
+
+ This corresponds to the ``schema_bundle`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Optional. The list of fields to
+ update.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.bigtable_admin_v2.types.SchemaBundle`
+ A named collection of related schemas.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [schema_bundle, update_mask]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_table_admin.UpdateSchemaBundleRequest):
+ request = bigtable_table_admin.UpdateSchemaBundleRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if schema_bundle is not None:
+ request.schema_bundle = schema_bundle
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.update_schema_bundle]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("schema_bundle.name", request.schema_bundle.name),)
+ ),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ table.SchemaBundle,
+ metadata_type=bigtable_table_admin.UpdateSchemaBundleMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def get_schema_bundle(
+ self,
+ request: Optional[
+ Union[bigtable_table_admin.GetSchemaBundleRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> table.SchemaBundle:
+ r"""Gets metadata information about the specified schema
+ bundle.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_get_schema_bundle():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetSchemaBundleRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_schema_bundle(request=request)
+
+ # Handle the response
+ print(response)
+
+ Args:
+ request (Union[google.cloud.bigtable_admin_v2.types.GetSchemaBundleRequest, dict]):
+ The request object. The request for
+ [GetSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle].
+ name (str):
+ Required. The unique name of the schema bundle to
+ retrieve. Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.bigtable_admin_v2.types.SchemaBundle:
+ A named collection of related
+ schemas.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_table_admin.GetSchemaBundleRequest):
+ request = bigtable_table_admin.GetSchemaBundleRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.get_schema_bundle]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def list_schema_bundles(
+ self,
+ request: Optional[
+ Union[bigtable_table_admin.ListSchemaBundlesRequest, dict]
+ ] = None,
+ *,
+ parent: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> pagers.ListSchemaBundlesPager:
+ r"""Lists all schema bundles associated with the
+ specified table.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_list_schema_bundles():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListSchemaBundlesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_schema_bundles(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
+ Args:
+ request (Union[google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest, dict]):
+ The request object. The request for
+ [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles].
+ parent (str):
+ Required. The parent, which owns this collection of
+ schema bundles. Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSchemaBundlesPager:
+ The response for
+ [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles].
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [parent]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_table_admin.ListSchemaBundlesRequest):
+ request = bigtable_table_admin.ListSchemaBundlesRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.list_schema_bundles]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.ListSchemaBundlesPager(
+ method=rpc,
+ request=request,
+ response=response,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def delete_schema_bundle(
+ self,
+ request: Optional[
+ Union[bigtable_table_admin.DeleteSchemaBundleRequest, dict]
+ ] = None,
+ *,
+ name: Optional[str] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> None:
+ r"""Deletes a schema bundle in the specified table.
+
+ .. code-block:: python
+
+ # This snippet has been automatically generated and should be regarded as a
+ # code template only.
+ # It will require modifications to work:
+ # - It may require correct/in-range values for request initialization.
+ # - It may require specifying regional endpoints when creating the service
+ # client as shown in:
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
+ from google.cloud import bigtable_admin_v2
+
+ def sample_delete_schema_bundle():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteSchemaBundleRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_schema_bundle(request=request)
+
+ Args:
+ request (Union[google.cloud.bigtable_admin_v2.types.DeleteSchemaBundleRequest, dict]):
+ The request object. The request for
+ [DeleteSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle].
+ name (str):
+ Required. The unique name of the schema bundle to
+ delete. Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ # Create or coerce a protobuf request object.
+ # - Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ flattened_params = [name]
+ has_flattened_params = (
+ len([param for param in flattened_params if param is not None]) > 0
+ )
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # - Use the request object if provided (there's no risk of modifying the input as
+ # there are no flattened fields), or create one.
+ if not isinstance(request, bigtable_table_admin.DeleteSchemaBundleRequest):
+ request = bigtable_table_admin.DeleteSchemaBundleRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.delete_schema_bundle]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Validate the universe domain.
+ self._validate_universe_domain()
+
+ # Send the request.
+ rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ def __enter__(self) -> "BaseBigtableTableAdminClient":
return self
def __exit__(self, type, value, traceback):
@@ -3949,4 +5454,4 @@ def __exit__(self, type, value, traceback):
if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER
DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__
-__all__ = ("BigtableTableAdminClient",)
+__all__ = ("BaseBigtableTableAdminClient",)
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py
index 8b1ffba34..e6d83ba63 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py
@@ -667,3 +667,163 @@ async def async_generator():
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListSchemaBundlesPager:
+ """A pager for iterating through ``list_schema_bundles`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``schema_bundles`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``ListSchemaBundles`` requests and continue to iterate
+ through the ``schema_bundles`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., bigtable_table_admin.ListSchemaBundlesResponse],
+ request: bigtable_table_admin.ListSchemaBundlesRequest,
+ response: bigtable_table_admin.ListSchemaBundlesResponse,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest):
+ The initial request object.
+ response (google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse):
+ The initial response object.
+ retry (google.api_core.retry.Retry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ self._method = method
+ self._request = bigtable_table_admin.ListSchemaBundlesRequest(request)
+ self._response = response
+ self._retry = retry
+ self._timeout = timeout
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterator[bigtable_table_admin.ListSchemaBundlesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
+ yield self._response
+
+ def __iter__(self) -> Iterator[table.SchemaBundle]:
+ for page in self.pages:
+ yield from page.schema_bundles
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListSchemaBundlesAsyncPager:
+ """A pager for iterating through ``list_schema_bundles`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``schema_bundles`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListSchemaBundles`` requests and continue to iterate
+ through the ``schema_bundles`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[
+ ..., Awaitable[bigtable_table_admin.ListSchemaBundlesResponse]
+ ],
+ request: bigtable_table_admin.ListSchemaBundlesRequest,
+ response: bigtable_table_admin.ListSchemaBundlesResponse,
+ *,
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
+ ):
+ """Instantiates the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest):
+ The initial request object.
+ response (google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse):
+ The initial response object.
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
+ if any, should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+ self._method = method
+ self._request = bigtable_table_admin.ListSchemaBundlesRequest(request)
+ self._response = response
+ self._retry = retry
+ self._timeout = timeout
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(
+ self,
+ ) -> AsyncIterator[bigtable_table_admin.ListSchemaBundlesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(
+ self._request,
+ retry=self._retry,
+ timeout=self._timeout,
+ metadata=self._metadata,
+ )
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterator[table.SchemaBundle]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.schema_bundles:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py
index 9a549b7ca..8ad08df3f 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py
@@ -81,9 +81,10 @@ def __init__(
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is mutually exclusive with credentials.
+ This argument is mutually exclusive with credentials. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
@@ -397,6 +398,31 @@ def _prep_wrapped_messages(self, client_info):
default_timeout=60.0,
client_info=client_info,
),
+ self.create_schema_bundle: gapic_v1.method.wrap_method(
+ self.create_schema_bundle,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.update_schema_bundle: gapic_v1.method.wrap_method(
+ self.update_schema_bundle,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.get_schema_bundle: gapic_v1.method.wrap_method(
+ self.get_schema_bundle,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.list_schema_bundles: gapic_v1.method.wrap_method(
+ self.list_schema_bundles,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.delete_schema_bundle: gapic_v1.method.wrap_method(
+ self.delete_schema_bundle,
+ default_timeout=None,
+ client_info=client_info,
+ ),
}
def close(self):
@@ -704,6 +730,54 @@ def test_iam_permissions(
]:
raise NotImplementedError()
+ @property
+ def create_schema_bundle(
+ self,
+ ) -> Callable[
+ [bigtable_table_admin.CreateSchemaBundleRequest],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def update_schema_bundle(
+ self,
+ ) -> Callable[
+ [bigtable_table_admin.UpdateSchemaBundleRequest],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def get_schema_bundle(
+ self,
+ ) -> Callable[
+ [bigtable_table_admin.GetSchemaBundleRequest],
+ Union[table.SchemaBundle, Awaitable[table.SchemaBundle]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def list_schema_bundles(
+ self,
+ ) -> Callable[
+ [bigtable_table_admin.ListSchemaBundlesRequest],
+ Union[
+ bigtable_table_admin.ListSchemaBundlesResponse,
+ Awaitable[bigtable_table_admin.ListSchemaBundlesResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def delete_schema_bundle(
+ self,
+ ) -> Callable[
+ [bigtable_table_admin.DeleteSchemaBundleRequest],
+ Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
+ ]:
+ raise NotImplementedError()
+
@property
def kind(self) -> str:
raise NotImplementedError()
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py
index b18f13133..f8d1058c8 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py
@@ -162,9 +162,10 @@ def __init__(
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if a ``channel`` instance is provided.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if a ``channel`` instance is provided.
+ This argument will be removed in the next major version of this library.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if a ``channel`` instance is provided.
channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]):
@@ -298,9 +299,10 @@ def create_channel(
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is mutually exclusive with credentials.
+ This argument is mutually exclusive with credentials. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
@@ -1166,7 +1168,7 @@ def get_iam_policy(
) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]:
r"""Return a callable for the get iam policy method over gRPC.
- Gets the access control policy for a Table or Backup
+ Gets the access control policy for a Bigtable
resource. Returns an empty policy if the resource exists
but does not have a policy set.
@@ -1194,7 +1196,7 @@ def set_iam_policy(
) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]:
r"""Return a callable for the set iam policy method over gRPC.
- Sets the access control policy on a Table or Backup
+ Sets the access control policy on a Bigtable
resource. Replaces any existing policy.
Returns:
@@ -1225,7 +1227,7 @@ def test_iam_permissions(
r"""Return a callable for the test iam permissions method over gRPC.
Returns permissions that the caller has on the
- specified Table or Backup resource.
+ specified Bigtable resource.
Returns:
Callable[[~.TestIamPermissionsRequest],
@@ -1245,6 +1247,145 @@ def test_iam_permissions(
)
return self._stubs["test_iam_permissions"]
+ @property
+ def create_schema_bundle(
+ self,
+ ) -> Callable[
+ [bigtable_table_admin.CreateSchemaBundleRequest], operations_pb2.Operation
+ ]:
+ r"""Return a callable for the create schema bundle method over gRPC.
+
+ Creates a new schema bundle in the specified table.
+
+ Returns:
+ Callable[[~.CreateSchemaBundleRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_schema_bundle" not in self._stubs:
+ self._stubs["create_schema_bundle"] = self._logged_channel.unary_unary(
+ "/google.bigtable.admin.v2.BigtableTableAdmin/CreateSchemaBundle",
+ request_serializer=bigtable_table_admin.CreateSchemaBundleRequest.serialize,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["create_schema_bundle"]
+
+ @property
+ def update_schema_bundle(
+ self,
+ ) -> Callable[
+ [bigtable_table_admin.UpdateSchemaBundleRequest], operations_pb2.Operation
+ ]:
+ r"""Return a callable for the update schema bundle method over gRPC.
+
+ Updates a schema bundle in the specified table.
+
+ Returns:
+ Callable[[~.UpdateSchemaBundleRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_schema_bundle" not in self._stubs:
+ self._stubs["update_schema_bundle"] = self._logged_channel.unary_unary(
+ "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateSchemaBundle",
+ request_serializer=bigtable_table_admin.UpdateSchemaBundleRequest.serialize,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["update_schema_bundle"]
+
+ @property
+ def get_schema_bundle(
+ self,
+ ) -> Callable[[bigtable_table_admin.GetSchemaBundleRequest], table.SchemaBundle]:
+ r"""Return a callable for the get schema bundle method over gRPC.
+
+ Gets metadata information about the specified schema
+ bundle.
+
+ Returns:
+ Callable[[~.GetSchemaBundleRequest],
+ ~.SchemaBundle]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_schema_bundle" not in self._stubs:
+ self._stubs["get_schema_bundle"] = self._logged_channel.unary_unary(
+ "/google.bigtable.admin.v2.BigtableTableAdmin/GetSchemaBundle",
+ request_serializer=bigtable_table_admin.GetSchemaBundleRequest.serialize,
+ response_deserializer=table.SchemaBundle.deserialize,
+ )
+ return self._stubs["get_schema_bundle"]
+
+ @property
+ def list_schema_bundles(
+ self,
+ ) -> Callable[
+ [bigtable_table_admin.ListSchemaBundlesRequest],
+ bigtable_table_admin.ListSchemaBundlesResponse,
+ ]:
+ r"""Return a callable for the list schema bundles method over gRPC.
+
+ Lists all schema bundles associated with the
+ specified table.
+
+ Returns:
+ Callable[[~.ListSchemaBundlesRequest],
+ ~.ListSchemaBundlesResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_schema_bundles" not in self._stubs:
+ self._stubs["list_schema_bundles"] = self._logged_channel.unary_unary(
+ "/google.bigtable.admin.v2.BigtableTableAdmin/ListSchemaBundles",
+ request_serializer=bigtable_table_admin.ListSchemaBundlesRequest.serialize,
+ response_deserializer=bigtable_table_admin.ListSchemaBundlesResponse.deserialize,
+ )
+ return self._stubs["list_schema_bundles"]
+
+ @property
+ def delete_schema_bundle(
+ self,
+ ) -> Callable[[bigtable_table_admin.DeleteSchemaBundleRequest], empty_pb2.Empty]:
+ r"""Return a callable for the delete schema bundle method over gRPC.
+
+ Deletes a schema bundle in the specified table.
+
+ Returns:
+ Callable[[~.DeleteSchemaBundleRequest],
+ ~.Empty]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_schema_bundle" not in self._stubs:
+ self._stubs["delete_schema_bundle"] = self._logged_channel.unary_unary(
+ "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSchemaBundle",
+ request_serializer=bigtable_table_admin.DeleteSchemaBundleRequest.serialize,
+ response_deserializer=empty_pb2.Empty.FromString,
+ )
+ return self._stubs["delete_schema_bundle"]
+
def close(self):
self._logged_channel.close()
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py
index 8b08cbe8c..5017f17d0 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py
@@ -159,8 +159,9 @@ def create_channel(
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
- be loaded with :func:`google.auth.load_credentials_from_file`.
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
@@ -211,9 +212,10 @@ def __init__(
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if a ``channel`` instance is provided.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if a ``channel`` instance is provided.
+ This argument will be removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
@@ -1199,7 +1201,7 @@ def get_iam_policy(
) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the get iam policy method over gRPC.
- Gets the access control policy for a Table or Backup
+ Gets the access control policy for a Bigtable
resource. Returns an empty policy if the resource exists
but does not have a policy set.
@@ -1227,7 +1229,7 @@ def set_iam_policy(
) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the set iam policy method over gRPC.
- Sets the access control policy on a Table or Backup
+ Sets the access control policy on a Bigtable
resource. Replaces any existing policy.
Returns:
@@ -1258,7 +1260,7 @@ def test_iam_permissions(
r"""Return a callable for the test iam permissions method over gRPC.
Returns permissions that the caller has on the
- specified Table or Backup resource.
+ specified Bigtable resource.
Returns:
Callable[[~.TestIamPermissionsRequest],
@@ -1278,6 +1280,151 @@ def test_iam_permissions(
)
return self._stubs["test_iam_permissions"]
+ @property
+ def create_schema_bundle(
+ self,
+ ) -> Callable[
+ [bigtable_table_admin.CreateSchemaBundleRequest],
+ Awaitable[operations_pb2.Operation],
+ ]:
+ r"""Return a callable for the create schema bundle method over gRPC.
+
+ Creates a new schema bundle in the specified table.
+
+ Returns:
+ Callable[[~.CreateSchemaBundleRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_schema_bundle" not in self._stubs:
+ self._stubs["create_schema_bundle"] = self._logged_channel.unary_unary(
+ "/google.bigtable.admin.v2.BigtableTableAdmin/CreateSchemaBundle",
+ request_serializer=bigtable_table_admin.CreateSchemaBundleRequest.serialize,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["create_schema_bundle"]
+
+ @property
+ def update_schema_bundle(
+ self,
+ ) -> Callable[
+ [bigtable_table_admin.UpdateSchemaBundleRequest],
+ Awaitable[operations_pb2.Operation],
+ ]:
+ r"""Return a callable for the update schema bundle method over gRPC.
+
+ Updates a schema bundle in the specified table.
+
+ Returns:
+ Callable[[~.UpdateSchemaBundleRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_schema_bundle" not in self._stubs:
+ self._stubs["update_schema_bundle"] = self._logged_channel.unary_unary(
+ "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateSchemaBundle",
+ request_serializer=bigtable_table_admin.UpdateSchemaBundleRequest.serialize,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["update_schema_bundle"]
+
+ @property
+ def get_schema_bundle(
+ self,
+ ) -> Callable[
+ [bigtable_table_admin.GetSchemaBundleRequest], Awaitable[table.SchemaBundle]
+ ]:
+ r"""Return a callable for the get schema bundle method over gRPC.
+
+ Gets metadata information about the specified schema
+ bundle.
+
+ Returns:
+ Callable[[~.GetSchemaBundleRequest],
+ Awaitable[~.SchemaBundle]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_schema_bundle" not in self._stubs:
+ self._stubs["get_schema_bundle"] = self._logged_channel.unary_unary(
+ "/google.bigtable.admin.v2.BigtableTableAdmin/GetSchemaBundle",
+ request_serializer=bigtable_table_admin.GetSchemaBundleRequest.serialize,
+ response_deserializer=table.SchemaBundle.deserialize,
+ )
+ return self._stubs["get_schema_bundle"]
+
+ @property
+ def list_schema_bundles(
+ self,
+ ) -> Callable[
+ [bigtable_table_admin.ListSchemaBundlesRequest],
+ Awaitable[bigtable_table_admin.ListSchemaBundlesResponse],
+ ]:
+ r"""Return a callable for the list schema bundles method over gRPC.
+
+ Lists all schema bundles associated with the
+ specified table.
+
+ Returns:
+ Callable[[~.ListSchemaBundlesRequest],
+ Awaitable[~.ListSchemaBundlesResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_schema_bundles" not in self._stubs:
+ self._stubs["list_schema_bundles"] = self._logged_channel.unary_unary(
+ "/google.bigtable.admin.v2.BigtableTableAdmin/ListSchemaBundles",
+ request_serializer=bigtable_table_admin.ListSchemaBundlesRequest.serialize,
+ response_deserializer=bigtable_table_admin.ListSchemaBundlesResponse.deserialize,
+ )
+ return self._stubs["list_schema_bundles"]
+
+ @property
+ def delete_schema_bundle(
+ self,
+ ) -> Callable[
+ [bigtable_table_admin.DeleteSchemaBundleRequest], Awaitable[empty_pb2.Empty]
+ ]:
+ r"""Return a callable for the delete schema bundle method over gRPC.
+
+ Deletes a schema bundle in the specified table.
+
+ Returns:
+ Callable[[~.DeleteSchemaBundleRequest],
+ Awaitable[~.Empty]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_schema_bundle" not in self._stubs:
+ self._stubs["delete_schema_bundle"] = self._logged_channel.unary_unary(
+ "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSchemaBundle",
+ request_serializer=bigtable_table_admin.DeleteSchemaBundleRequest.serialize,
+ response_deserializer=empty_pb2.Empty.FromString,
+ )
+ return self._stubs["delete_schema_bundle"]
+
def _prep_wrapped_messages(self, client_info):
"""Precompute the wrapped methods, overriding the base class method to use async wrappers."""
self._wrapped_methods = {
@@ -1531,6 +1678,31 @@ def _prep_wrapped_messages(self, client_info):
default_timeout=60.0,
client_info=client_info,
),
+ self.create_schema_bundle: self._wrap_method(
+ self.create_schema_bundle,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.update_schema_bundle: self._wrap_method(
+ self.update_schema_bundle,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.get_schema_bundle: self._wrap_method(
+ self.get_schema_bundle,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.list_schema_bundles: self._wrap_method(
+ self.list_schema_bundles,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.delete_schema_bundle: self._wrap_method(
+ self.delete_schema_bundle,
+ default_timeout=None,
+ client_info=client_info,
+ ),
}
def _wrap_method(self, func, *args, **kwargs):
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py
index fd9445161..6c3815f79 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py
@@ -117,6 +117,14 @@ def post_create_backup(self, response):
logging.log(f"Received response: {response}")
return response
+ def pre_create_schema_bundle(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_create_schema_bundle(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
def pre_create_table(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
@@ -141,6 +149,10 @@ def pre_delete_backup(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
+ def pre_delete_schema_bundle(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
def pre_delete_snapshot(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
@@ -185,6 +197,14 @@ def post_get_iam_policy(self, response):
logging.log(f"Received response: {response}")
return response
+ def pre_get_schema_bundle(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_get_schema_bundle(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
def pre_get_snapshot(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
@@ -217,6 +237,14 @@ def post_list_backups(self, response):
logging.log(f"Received response: {response}")
return response
+ def pre_list_schema_bundles(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_list_schema_bundles(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
def pre_list_snapshots(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
@@ -297,6 +325,14 @@ def post_update_backup(self, response):
logging.log(f"Received response: {response}")
return response
+ def pre_update_schema_bundle(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_update_schema_bundle(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
def pre_update_table(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
@@ -306,7 +342,7 @@ def post_update_table(self, response):
return response
transport = BigtableTableAdminRestTransport(interceptor=MyCustomBigtableTableAdminInterceptor())
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
"""
@@ -509,6 +545,55 @@ def post_create_backup_with_metadata(
"""
return response, metadata
+ def pre_create_schema_bundle(
+ self,
+ request: bigtable_table_admin.CreateSchemaBundleRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ bigtable_table_admin.CreateSchemaBundleRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for create_schema_bundle
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the BigtableTableAdmin server.
+ """
+ return request, metadata
+
+ def post_create_schema_bundle(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for create_schema_bundle
+
+ DEPRECATED. Please use the `post_create_schema_bundle_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the BigtableTableAdmin server but before
+ it is returned to user code. This `post_create_schema_bundle` interceptor runs
+ before the `post_create_schema_bundle_with_metadata` interceptor.
+ """
+ return response
+
+ def post_create_schema_bundle_with_metadata(
+ self,
+ response: operations_pb2.Operation,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for create_schema_bundle
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the BigtableTableAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_create_schema_bundle_with_metadata`
+ interceptor in new development instead of the `post_create_schema_bundle` interceptor.
+ When both interceptors are used, this `post_create_schema_bundle_with_metadata` interceptor runs after the
+ `post_create_schema_bundle` interceptor. The (possibly modified) response returned by
+ `post_create_schema_bundle` will be passed to
+ `post_create_schema_bundle_with_metadata`.
+ """
+ return response, metadata
+
def pre_create_table(
self,
request: bigtable_table_admin.CreateTableRequest,
@@ -634,6 +719,21 @@ def pre_delete_backup(
"""
return request, metadata
+ def pre_delete_schema_bundle(
+ self,
+ request: bigtable_table_admin.DeleteSchemaBundleRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ bigtable_table_admin.DeleteSchemaBundleRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for delete_schema_bundle
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the BigtableTableAdmin server.
+ """
+ return request, metadata
+
def pre_delete_snapshot(
self,
request: bigtable_table_admin.DeleteSnapshotRequest,
@@ -869,6 +969,55 @@ def post_get_iam_policy_with_metadata(
"""
return response, metadata
+ def pre_get_schema_bundle(
+ self,
+ request: bigtable_table_admin.GetSchemaBundleRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ bigtable_table_admin.GetSchemaBundleRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for get_schema_bundle
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the BigtableTableAdmin server.
+ """
+ return request, metadata
+
+ def post_get_schema_bundle(
+ self, response: table.SchemaBundle
+ ) -> table.SchemaBundle:
+ """Post-rpc interceptor for get_schema_bundle
+
+ DEPRECATED. Please use the `post_get_schema_bundle_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the BigtableTableAdmin server but before
+ it is returned to user code. This `post_get_schema_bundle` interceptor runs
+ before the `post_get_schema_bundle_with_metadata` interceptor.
+ """
+ return response
+
+ def post_get_schema_bundle_with_metadata(
+ self,
+ response: table.SchemaBundle,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[table.SchemaBundle, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for get_schema_bundle
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the BigtableTableAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_get_schema_bundle_with_metadata`
+ interceptor in new development instead of the `post_get_schema_bundle` interceptor.
+ When both interceptors are used, this `post_get_schema_bundle_with_metadata` interceptor runs after the
+ `post_get_schema_bundle` interceptor. The (possibly modified) response returned by
+ `post_get_schema_bundle` will be passed to
+ `post_get_schema_bundle_with_metadata`.
+ """
+ return response, metadata
+
def pre_get_snapshot(
self,
request: bigtable_table_admin.GetSnapshotRequest,
@@ -1062,6 +1211,58 @@ def post_list_backups_with_metadata(
"""
return response, metadata
+ def pre_list_schema_bundles(
+ self,
+ request: bigtable_table_admin.ListSchemaBundlesRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ bigtable_table_admin.ListSchemaBundlesRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for list_schema_bundles
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the BigtableTableAdmin server.
+ """
+ return request, metadata
+
+ def post_list_schema_bundles(
+ self, response: bigtable_table_admin.ListSchemaBundlesResponse
+ ) -> bigtable_table_admin.ListSchemaBundlesResponse:
+ """Post-rpc interceptor for list_schema_bundles
+
+ DEPRECATED. Please use the `post_list_schema_bundles_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the BigtableTableAdmin server but before
+ it is returned to user code. This `post_list_schema_bundles` interceptor runs
+ before the `post_list_schema_bundles_with_metadata` interceptor.
+ """
+ return response
+
+ def post_list_schema_bundles_with_metadata(
+ self,
+ response: bigtable_table_admin.ListSchemaBundlesResponse,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ bigtable_table_admin.ListSchemaBundlesResponse,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Post-rpc interceptor for list_schema_bundles
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the BigtableTableAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_list_schema_bundles_with_metadata`
+ interceptor in new development instead of the `post_list_schema_bundles` interceptor.
+ When both interceptors are used, this `post_list_schema_bundles_with_metadata` interceptor runs after the
+ `post_list_schema_bundles` interceptor. The (possibly modified) response returned by
+ `post_list_schema_bundles` will be passed to
+ `post_list_schema_bundles_with_metadata`.
+ """
+ return response, metadata
+
def pre_list_snapshots(
self,
request: bigtable_table_admin.ListSnapshotsRequest,
@@ -1548,6 +1749,55 @@ def post_update_backup_with_metadata(
"""
return response, metadata
+ def pre_update_schema_bundle(
+ self,
+ request: bigtable_table_admin.UpdateSchemaBundleRequest,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[
+ bigtable_table_admin.UpdateSchemaBundleRequest,
+ Sequence[Tuple[str, Union[str, bytes]]],
+ ]:
+ """Pre-rpc interceptor for update_schema_bundle
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the BigtableTableAdmin server.
+ """
+ return request, metadata
+
+ def post_update_schema_bundle(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for update_schema_bundle
+
+ DEPRECATED. Please use the `post_update_schema_bundle_with_metadata`
+ interceptor instead.
+
+ Override in a subclass to read or manipulate the response
+ after it is returned by the BigtableTableAdmin server but before
+ it is returned to user code. This `post_update_schema_bundle` interceptor runs
+ before the `post_update_schema_bundle_with_metadata` interceptor.
+ """
+ return response
+
+ def post_update_schema_bundle_with_metadata(
+ self,
+ response: operations_pb2.Operation,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
+ ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]:
+ """Post-rpc interceptor for update_schema_bundle
+
+ Override in a subclass to read or manipulate the response or metadata after it
+ is returned by the BigtableTableAdmin server but before it is returned to user code.
+
+ We recommend only using this `post_update_schema_bundle_with_metadata`
+ interceptor in new development instead of the `post_update_schema_bundle` interceptor.
+ When both interceptors are used, this `post_update_schema_bundle_with_metadata` interceptor runs after the
+ `post_update_schema_bundle` interceptor. The (possibly modified) response returned by
+ `post_update_schema_bundle` will be passed to
+ `post_update_schema_bundle_with_metadata`.
+ """
+ return response, metadata
+
def pre_update_table(
self,
request: bigtable_table_admin.UpdateTableRequest,
@@ -1646,9 +1896,10 @@ def __init__(
are specified, the client will attempt to ascertain the
credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is ignored if ``channel`` is provided.
+ This argument is ignored if ``channel`` is provided. This argument will be
+ removed in the next major version of this library.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
@@ -1837,7 +2088,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CheckConsistency",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CheckConsistency",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "CheckConsistency",
@@ -1888,7 +2139,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.check_consistency",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.check_consistency",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "CheckConsistency",
@@ -1993,7 +2244,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CopyBackup",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CopyBackup",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "CopyBackup",
@@ -2040,7 +2291,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.copy_backup",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.copy_backup",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "CopyBackup",
@@ -2148,7 +2399,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CreateAuthorizedView",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CreateAuthorizedView",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "CreateAuthorizedView",
@@ -2197,7 +2448,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.create_authorized_view",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.create_authorized_view",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "CreateAuthorizedView",
@@ -2303,7 +2554,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CreateBackup",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CreateBackup",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "CreateBackup",
@@ -2350,7 +2601,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.create_backup",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.create_backup",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "CreateBackup",
@@ -2360,12 +2611,12 @@ def __call__(
)
return resp
- class _CreateTable(
- _BaseBigtableTableAdminRestTransport._BaseCreateTable,
+ class _CreateSchemaBundle(
+ _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle,
BigtableTableAdminRestStub,
):
def __hash__(self):
- return hash("BigtableTableAdminRestTransport.CreateTable")
+ return hash("BigtableTableAdminRestTransport.CreateSchemaBundle")
@staticmethod
def _get_response(
@@ -2392,18 +2643,18 @@ def _get_response(
def __call__(
self,
- request: bigtable_table_admin.CreateTableRequest,
+ request: bigtable_table_admin.CreateSchemaBundleRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
- ) -> gba_table.Table:
- r"""Call the create table method over HTTP.
+ ) -> operations_pb2.Operation:
+ r"""Call the create schema bundle method over HTTP.
Args:
- request (~.bigtable_table_admin.CreateTableRequest):
- The request object. Request message for
- [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable]
+ request (~.bigtable_table_admin.CreateSchemaBundleRequest):
+ The request object. The request for
+ [CreateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -2413,29 +2664,30 @@ def __call__(
be of type `bytes`.
Returns:
- ~.gba_table.Table:
- A collection of user data indexed by
- row, column, and timestamp. Each table
- is served using the resources of its
- parent cluster.
+ ~.operations_pb2.Operation:
+ This resource represents a
+ long-running operation that is the
+ result of a network API call.
"""
http_options = (
- _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_http_options()
+ _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle._get_http_options()
)
- request, metadata = self._interceptor.pre_create_table(request, metadata)
- transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_transcoded_request(
+ request, metadata = self._interceptor.pre_create_schema_bundle(
+ request, metadata
+ )
+ transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle._get_transcoded_request(
http_options, request
)
- body = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_request_body_json(
+ body = _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle._get_request_body_json(
transcoded_request
)
# Jsonify the query params
- query_params = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_query_params_json(
+ query_params = _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle._get_query_params_json(
transcoded_request
)
@@ -2447,7 +2699,7 @@ def __call__(
)
method = transcoded_request["method"]
try:
- request_payload = type(request).to_json(request)
+ request_payload = json_format.MessageToJson(request)
except:
request_payload = None
http_request = {
@@ -2457,24 +2709,26 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CreateTable",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CreateSchemaBundle",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
- "rpcName": "CreateTable",
+ "rpcName": "CreateSchemaBundle",
"httpRequest": http_request,
"metadata": http_request["headers"],
},
)
# Send the request
- response = BigtableTableAdminRestTransport._CreateTable._get_response(
- self._host,
- metadata,
- query_params,
- self._session,
- timeout,
- transcoded_request,
- body,
+ response = (
+ BigtableTableAdminRestTransport._CreateSchemaBundle._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
@@ -2483,21 +2737,19 @@ def __call__(
raise core_exceptions.from_http_response(response)
# Return the response
- resp = gba_table.Table()
- pb_resp = gba_table.Table.pb(resp)
-
- json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+ resp = operations_pb2.Operation()
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
- resp = self._interceptor.post_create_table(resp)
+ resp = self._interceptor.post_create_schema_bundle(resp)
response_metadata = [(k, str(v)) for k, v in response.headers.items()]
- resp, _ = self._interceptor.post_create_table_with_metadata(
+ resp, _ = self._interceptor.post_create_schema_bundle_with_metadata(
resp, response_metadata
)
if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
logging.DEBUG
): # pragma: NO COVER
try:
- response_payload = gba_table.Table.to_json(response)
+ response_payload = json_format.MessageToJson(resp)
except:
response_payload = None
http_response = {
@@ -2506,22 +2758,22 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.create_table",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.create_schema_bundle",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
- "rpcName": "CreateTable",
+ "rpcName": "CreateSchemaBundle",
"metadata": http_response["headers"],
"httpResponse": http_response,
},
)
return resp
- class _CreateTableFromSnapshot(
- _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot,
+ class _CreateTable(
+ _BaseBigtableTableAdminRestTransport._BaseCreateTable,
BigtableTableAdminRestStub,
):
def __hash__(self):
- return hash("BigtableTableAdminRestTransport.CreateTableFromSnapshot")
+ return hash("BigtableTableAdminRestTransport.CreateTable")
@staticmethod
def _get_response(
@@ -2548,29 +2800,185 @@ def _get_response(
def __call__(
self,
- request: bigtable_table_admin.CreateTableFromSnapshotRequest,
+ request: bigtable_table_admin.CreateTableRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
- ) -> operations_pb2.Operation:
- r"""Call the create table from
- snapshot method over HTTP.
+ ) -> gba_table.Table:
+ r"""Call the create table method over HTTP.
- Args:
- request (~.bigtable_table_admin.CreateTableFromSnapshotRequest):
- The request object. Request message for
- [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot]
+ Args:
+ request (~.bigtable_table_admin.CreateTableRequest):
+ The request object. Request message for
+ [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable]
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
- Note: This is a private alpha release of Cloud Bigtable
- snapshots. This feature is not currently available to
- most Cloud Bigtable customers. This feature might be
- changed in backward-incompatible ways and is not
- recommended for production use. It is not subject to any
- SLA or deprecation policy.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
- should be retried.
- timeout (float): The timeout for this request.
+ Returns:
+ ~.gba_table.Table:
+ A collection of user data indexed by
+ row, column, and timestamp. Each table
+ is served using the resources of its
+ parent cluster.
+
+ """
+
+ http_options = (
+ _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_create_table(request, metadata)
+ transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CreateTable",
+ extra={
+ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "rpcName": "CreateTable",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = BigtableTableAdminRestTransport._CreateTable._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = gba_table.Table()
+ pb_resp = gba_table.Table.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_create_table(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_create_table_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = gba_table.Table.to_json(response)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.create_table",
+ extra={
+ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "rpcName": "CreateTable",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _CreateTableFromSnapshot(
+ _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot,
+ BigtableTableAdminRestStub,
+ ):
+ def __hash__(self):
+ return hash("BigtableTableAdminRestTransport.CreateTableFromSnapshot")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: bigtable_table_admin.CreateTableFromSnapshotRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the create table from
+ snapshot method over HTTP.
+
+ Args:
+ request (~.bigtable_table_admin.CreateTableFromSnapshotRequest):
+ The request object. Request message for
+ [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot]
+
+ Note: This is a private alpha release of Cloud Bigtable
+ snapshots. This feature is not currently available to
+ most Cloud Bigtable customers. This feature might be
+ changed in backward-incompatible ways and is not
+ recommended for production use. It is not subject to any
+ SLA or deprecation policy.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
sent along with the request as metadata. Normally, each value must be of type `str`,
but for metadata keys ending with the suffix `-bin`, the corresponding values must
@@ -2622,7 +3030,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CreateTableFromSnapshot",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CreateTableFromSnapshot",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "CreateTableFromSnapshot",
@@ -2671,7 +3079,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.create_table_from_snapshot",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.create_table_from_snapshot",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "CreateTableFromSnapshot",
@@ -2767,7 +3175,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DeleteAuthorizedView",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DeleteAuthorizedView",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "DeleteAuthorizedView",
@@ -2877,7 +3285,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DeleteBackup",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DeleteBackup",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "DeleteBackup",
@@ -2901,6 +3309,118 @@ def __call__(
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
+ class _DeleteSchemaBundle(
+ _BaseBigtableTableAdminRestTransport._BaseDeleteSchemaBundle,
+ BigtableTableAdminRestStub,
+ ):
+ def __hash__(self):
+ return hash("BigtableTableAdminRestTransport.DeleteSchemaBundle")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: bigtable_table_admin.DeleteSchemaBundleRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ):
+ r"""Call the delete schema bundle method over HTTP.
+
+ Args:
+ request (~.bigtable_table_admin.DeleteSchemaBundleRequest):
+ The request object. The request for
+ [DeleteSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+ """
+
+ http_options = (
+ _BaseBigtableTableAdminRestTransport._BaseDeleteSchemaBundle._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_delete_schema_bundle(
+ request, metadata
+ )
+ transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteSchemaBundle._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseBigtableTableAdminRestTransport._BaseDeleteSchemaBundle._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DeleteSchemaBundle",
+ extra={
+ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "rpcName": "DeleteSchemaBundle",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = (
+ BigtableTableAdminRestTransport._DeleteSchemaBundle._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
class _DeleteSnapshot(
_BaseBigtableTableAdminRestTransport._BaseDeleteSnapshot,
BigtableTableAdminRestStub,
@@ -2992,7 +3512,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DeleteSnapshot",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DeleteSnapshot",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "DeleteSnapshot",
@@ -3100,7 +3620,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DeleteTable",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DeleteTable",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "DeleteTable",
@@ -3213,7 +3733,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DropRowRange",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DropRowRange",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "DropRowRange",
@@ -3336,7 +3856,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GenerateConsistencyToken",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GenerateConsistencyToken",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "GenerateConsistencyToken",
@@ -3391,7 +3911,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.generate_consistency_token",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.generate_consistency_token",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "GenerateConsistencyToken",
@@ -3497,7 +4017,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetAuthorizedView",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetAuthorizedView",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "GetAuthorizedView",
@@ -3545,7 +4065,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_authorized_view",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_authorized_view",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "GetAuthorizedView",
@@ -3642,7 +4162,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetBackup",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetBackup",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "GetBackup",
@@ -3690,7 +4210,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_backup",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_backup",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "GetBackup",
@@ -3867,7 +4387,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetIamPolicy",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetIamPolicy",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "GetIamPolicy",
@@ -3916,7 +4436,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_iam_policy",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_iam_policy",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "GetIamPolicy",
@@ -3926,7 +4446,157 @@ def __call__(
)
return resp
- class _GetSnapshot(
+ class _GetSchemaBundle(
+ _BaseBigtableTableAdminRestTransport._BaseGetSchemaBundle,
+ BigtableTableAdminRestStub,
+ ):
+ def __hash__(self):
+ return hash("BigtableTableAdminRestTransport.GetSchemaBundle")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: bigtable_table_admin.GetSchemaBundleRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> table.SchemaBundle:
+ r"""Call the get schema bundle method over HTTP.
+
+ Args:
+ request (~.bigtable_table_admin.GetSchemaBundleRequest):
+ The request object. The request for
+ [GetSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.table.SchemaBundle:
+ A named collection of related
+ schemas.
+
+ """
+
+ http_options = (
+ _BaseBigtableTableAdminRestTransport._BaseGetSchemaBundle._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_get_schema_bundle(
+ request, metadata
+ )
+ transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetSchemaBundle._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseBigtableTableAdminRestTransport._BaseGetSchemaBundle._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetSchemaBundle",
+ extra={
+ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "rpcName": "GetSchemaBundle",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = BigtableTableAdminRestTransport._GetSchemaBundle._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = table.SchemaBundle()
+ pb_resp = table.SchemaBundle.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_get_schema_bundle(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_get_schema_bundle_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = table.SchemaBundle.to_json(response)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_schema_bundle",
+ extra={
+ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "rpcName": "GetSchemaBundle",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _GetSnapshot(
_BaseBigtableTableAdminRestTransport._BaseGetSnapshot,
BigtableTableAdminRestStub,
):
@@ -4034,7 +4704,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetSnapshot",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetSnapshot",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "GetSnapshot",
@@ -4082,7 +4752,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_snapshot",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_snapshot",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "GetSnapshot",
@@ -4183,7 +4853,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetTable",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetTable",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "GetTable",
@@ -4231,7 +4901,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_table",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_table",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "GetTable",
@@ -4277,13 +4947,169 @@ def __call__(
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
- ) -> bigtable_table_admin.ListAuthorizedViewsResponse:
- r"""Call the list authorized views method over HTTP.
+ ) -> bigtable_table_admin.ListAuthorizedViewsResponse:
+ r"""Call the list authorized views method over HTTP.
+
+ Args:
+ request (~.bigtable_table_admin.ListAuthorizedViewsRequest):
+ The request object. Request message for
+ [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews]
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.bigtable_table_admin.ListAuthorizedViewsResponse:
+ Response message for
+ [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews]
+
+ """
+
+ http_options = (
+ _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_list_authorized_views(
+ request, metadata
+ )
+ transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_transcoded_request(
+ http_options, request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = type(request).to_json(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ListAuthorizedViews",
+ extra={
+ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "rpcName": "ListAuthorizedViews",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = (
+ BigtableTableAdminRestTransport._ListAuthorizedViews._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ )
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = bigtable_table_admin.ListAuthorizedViewsResponse()
+ pb_resp = bigtable_table_admin.ListAuthorizedViewsResponse.pb(resp)
+
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_list_authorized_views(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_list_authorized_views_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = (
+ bigtable_table_admin.ListAuthorizedViewsResponse.to_json(
+ response
+ )
+ )
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.list_authorized_views",
+ extra={
+ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "rpcName": "ListAuthorizedViews",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
+ class _ListBackups(
+ _BaseBigtableTableAdminRestTransport._BaseListBackups,
+ BigtableTableAdminRestStub,
+ ):
+ def __hash__(self):
+ return hash("BigtableTableAdminRestTransport.ListBackups")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ )
+ return response
+
+ def __call__(
+ self,
+ request: bigtable_table_admin.ListBackupsRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> bigtable_table_admin.ListBackupsResponse:
+ r"""Call the list backups method over HTTP.
Args:
- request (~.bigtable_table_admin.ListAuthorizedViewsRequest):
- The request object. Request message for
- [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews]
+ request (~.bigtable_table_admin.ListBackupsRequest):
+ The request object. The request for
+ [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -4293,25 +5119,23 @@ def __call__(
be of type `bytes`.
Returns:
- ~.bigtable_table_admin.ListAuthorizedViewsResponse:
- Response message for
- [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews]
+ ~.bigtable_table_admin.ListBackupsResponse:
+ The response for
+ [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups].
"""
http_options = (
- _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_http_options()
+ _BaseBigtableTableAdminRestTransport._BaseListBackups._get_http_options()
)
- request, metadata = self._interceptor.pre_list_authorized_views(
- request, metadata
- )
- transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_transcoded_request(
+ request, metadata = self._interceptor.pre_list_backups(request, metadata)
+ transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListBackups._get_transcoded_request(
http_options, request
)
# Jsonify the query params
- query_params = _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_query_params_json(
+ query_params = _BaseBigtableTableAdminRestTransport._BaseListBackups._get_query_params_json(
transcoded_request
)
@@ -4333,25 +5157,23 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListAuthorizedViews",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ListBackups",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
- "rpcName": "ListAuthorizedViews",
+ "rpcName": "ListBackups",
"httpRequest": http_request,
"metadata": http_request["headers"],
},
)
# Send the request
- response = (
- BigtableTableAdminRestTransport._ListAuthorizedViews._get_response(
- self._host,
- metadata,
- query_params,
- self._session,
- timeout,
- transcoded_request,
- )
+ response = BigtableTableAdminRestTransport._ListBackups._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
@@ -4360,24 +5182,22 @@ def __call__(
raise core_exceptions.from_http_response(response)
# Return the response
- resp = bigtable_table_admin.ListAuthorizedViewsResponse()
- pb_resp = bigtable_table_admin.ListAuthorizedViewsResponse.pb(resp)
+ resp = bigtable_table_admin.ListBackupsResponse()
+ pb_resp = bigtable_table_admin.ListBackupsResponse.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
- resp = self._interceptor.post_list_authorized_views(resp)
+ resp = self._interceptor.post_list_backups(resp)
response_metadata = [(k, str(v)) for k, v in response.headers.items()]
- resp, _ = self._interceptor.post_list_authorized_views_with_metadata(
+ resp, _ = self._interceptor.post_list_backups_with_metadata(
resp, response_metadata
)
if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
logging.DEBUG
): # pragma: NO COVER
try:
- response_payload = (
- bigtable_table_admin.ListAuthorizedViewsResponse.to_json(
- response
- )
+ response_payload = bigtable_table_admin.ListBackupsResponse.to_json(
+ response
)
except:
response_payload = None
@@ -4387,22 +5207,22 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_authorized_views",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.list_backups",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
- "rpcName": "ListAuthorizedViews",
+ "rpcName": "ListBackups",
"metadata": http_response["headers"],
"httpResponse": http_response,
},
)
return resp
- class _ListBackups(
- _BaseBigtableTableAdminRestTransport._BaseListBackups,
+ class _ListSchemaBundles(
+ _BaseBigtableTableAdminRestTransport._BaseListSchemaBundles,
BigtableTableAdminRestStub,
):
def __hash__(self):
- return hash("BigtableTableAdminRestTransport.ListBackups")
+ return hash("BigtableTableAdminRestTransport.ListSchemaBundles")
@staticmethod
def _get_response(
@@ -4428,18 +5248,18 @@ def _get_response(
def __call__(
self,
- request: bigtable_table_admin.ListBackupsRequest,
+ request: bigtable_table_admin.ListSchemaBundlesRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
- ) -> bigtable_table_admin.ListBackupsResponse:
- r"""Call the list backups method over HTTP.
+ ) -> bigtable_table_admin.ListSchemaBundlesResponse:
+ r"""Call the list schema bundles method over HTTP.
Args:
- request (~.bigtable_table_admin.ListBackupsRequest):
+ request (~.bigtable_table_admin.ListSchemaBundlesRequest):
The request object. The request for
- [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups].
+ [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
@@ -4449,23 +5269,25 @@ def __call__(
be of type `bytes`.
Returns:
- ~.bigtable_table_admin.ListBackupsResponse:
+ ~.bigtable_table_admin.ListSchemaBundlesResponse:
The response for
- [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups].
+ [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles].
"""
http_options = (
- _BaseBigtableTableAdminRestTransport._BaseListBackups._get_http_options()
+ _BaseBigtableTableAdminRestTransport._BaseListSchemaBundles._get_http_options()
)
- request, metadata = self._interceptor.pre_list_backups(request, metadata)
- transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListBackups._get_transcoded_request(
+ request, metadata = self._interceptor.pre_list_schema_bundles(
+ request, metadata
+ )
+ transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListSchemaBundles._get_transcoded_request(
http_options, request
)
# Jsonify the query params
- query_params = _BaseBigtableTableAdminRestTransport._BaseListBackups._get_query_params_json(
+ query_params = _BaseBigtableTableAdminRestTransport._BaseListSchemaBundles._get_query_params_json(
transcoded_request
)
@@ -4487,17 +5309,17 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListBackups",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ListSchemaBundles",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
- "rpcName": "ListBackups",
+ "rpcName": "ListSchemaBundles",
"httpRequest": http_request,
"metadata": http_request["headers"],
},
)
# Send the request
- response = BigtableTableAdminRestTransport._ListBackups._get_response(
+ response = BigtableTableAdminRestTransport._ListSchemaBundles._get_response(
self._host,
metadata,
query_params,
@@ -4512,22 +5334,22 @@ def __call__(
raise core_exceptions.from_http_response(response)
# Return the response
- resp = bigtable_table_admin.ListBackupsResponse()
- pb_resp = bigtable_table_admin.ListBackupsResponse.pb(resp)
+ resp = bigtable_table_admin.ListSchemaBundlesResponse()
+ pb_resp = bigtable_table_admin.ListSchemaBundlesResponse.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
- resp = self._interceptor.post_list_backups(resp)
+ resp = self._interceptor.post_list_schema_bundles(resp)
response_metadata = [(k, str(v)) for k, v in response.headers.items()]
- resp, _ = self._interceptor.post_list_backups_with_metadata(
+ resp, _ = self._interceptor.post_list_schema_bundles_with_metadata(
resp, response_metadata
)
if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
logging.DEBUG
): # pragma: NO COVER
try:
- response_payload = bigtable_table_admin.ListBackupsResponse.to_json(
- response
+ response_payload = (
+ bigtable_table_admin.ListSchemaBundlesResponse.to_json(response)
)
except:
response_payload = None
@@ -4537,10 +5359,10 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_backups",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.list_schema_bundles",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
- "rpcName": "ListBackups",
+ "rpcName": "ListSchemaBundles",
"metadata": http_response["headers"],
"httpResponse": http_response,
},
@@ -4651,7 +5473,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListSnapshots",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ListSnapshots",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "ListSnapshots",
@@ -4701,7 +5523,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_snapshots",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.list_snapshots",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "ListSnapshots",
@@ -4800,7 +5622,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListTables",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ListTables",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "ListTables",
@@ -4850,7 +5672,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_tables",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.list_tables",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "ListTables",
@@ -4959,7 +5781,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ModifyColumnFamilies",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ModifyColumnFamilies",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "ModifyColumnFamilies",
@@ -5010,7 +5832,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.modify_column_families",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.modify_column_families",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "ModifyColumnFamilies",
@@ -5116,7 +5938,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.RestoreTable",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.RestoreTable",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "RestoreTable",
@@ -5163,7 +5985,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.restore_table",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.restore_table",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "RestoreTable",
@@ -5340,7 +6162,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.SetIamPolicy",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.SetIamPolicy",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "SetIamPolicy",
@@ -5389,7 +6211,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.set_iam_policy",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.set_iam_policy",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "SetIamPolicy",
@@ -5502,7 +6324,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.SnapshotTable",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.SnapshotTable",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "SnapshotTable",
@@ -5549,7 +6371,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.snapshot_table",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.snapshot_table",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "SnapshotTable",
@@ -5653,7 +6475,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.TestIamPermissions",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.TestIamPermissions",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "TestIamPermissions",
@@ -5704,7 +6526,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.test_iam_permissions",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.test_iam_permissions",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "TestIamPermissions",
@@ -5810,7 +6632,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.UndeleteTable",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.UndeleteTable",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "UndeleteTable",
@@ -5857,7 +6679,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.undelete_table",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.undelete_table",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "UndeleteTable",
@@ -5965,7 +6787,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.UpdateAuthorizedView",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.UpdateAuthorizedView",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "UpdateAuthorizedView",
@@ -6014,7 +6836,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.update_authorized_view",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.update_authorized_view",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "UpdateAuthorizedView",
@@ -6117,7 +6939,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.UpdateBackup",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.UpdateBackup",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "UpdateBackup",
@@ -6166,7 +6988,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.update_backup",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.update_backup",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "UpdateBackup",
@@ -6176,6 +6998,163 @@ def __call__(
)
return resp
+ class _UpdateSchemaBundle(
+ _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle,
+ BigtableTableAdminRestStub,
+ ):
+ def __hash__(self):
+ return hash("BigtableTableAdminRestTransport.UpdateSchemaBundle")
+
+ @staticmethod
+ def _get_response(
+ host,
+ metadata,
+ query_params,
+ session,
+ timeout,
+ transcoded_request,
+ body=None,
+ ):
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(session, method)(
+ "{host}{uri}".format(host=host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+ return response
+
+ def __call__(
+ self,
+ request: bigtable_table_admin.UpdateSchemaBundleRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the update schema bundle method over HTTP.
+
+ Args:
+ request (~.bigtable_table_admin.UpdateSchemaBundleRequest):
+ The request object. The request for
+ [UpdateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
+ sent along with the request as metadata. Normally, each value must be of type `str`,
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
+ be of type `bytes`.
+
+ Returns:
+ ~.operations_pb2.Operation:
+ This resource represents a
+ long-running operation that is the
+ result of a network API call.
+
+ """
+
+ http_options = (
+ _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle._get_http_options()
+ )
+
+ request, metadata = self._interceptor.pre_update_schema_bundle(
+ request, metadata
+ )
+ transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle._get_transcoded_request(
+ http_options, request
+ )
+
+ body = _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle._get_request_body_json(
+ transcoded_request
+ )
+
+ # Jsonify the query params
+ query_params = _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle._get_query_params_json(
+ transcoded_request
+ )
+
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ request_url = "{host}{uri}".format(
+ host=self._host, uri=transcoded_request["uri"]
+ )
+ method = transcoded_request["method"]
+ try:
+ request_payload = json_format.MessageToJson(request)
+ except:
+ request_payload = None
+ http_request = {
+ "payload": request_payload,
+ "requestMethod": method,
+ "requestUrl": request_url,
+ "headers": dict(metadata),
+ }
+ _LOGGER.debug(
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.UpdateSchemaBundle",
+ extra={
+ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "rpcName": "UpdateSchemaBundle",
+ "httpRequest": http_request,
+ "metadata": http_request["headers"],
+ },
+ )
+
+ # Send the request
+ response = (
+ BigtableTableAdminRestTransport._UpdateSchemaBundle._get_response(
+ self._host,
+ metadata,
+ query_params,
+ self._session,
+ timeout,
+ transcoded_request,
+ body,
+ )
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = operations_pb2.Operation()
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
+
+ resp = self._interceptor.post_update_schema_bundle(resp)
+ response_metadata = [(k, str(v)) for k, v in response.headers.items()]
+ resp, _ = self._interceptor.post_update_schema_bundle_with_metadata(
+ resp, response_metadata
+ )
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ try:
+ response_payload = json_format.MessageToJson(resp)
+ except:
+ response_payload = None
+ http_response = {
+ "payload": response_payload,
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.update_schema_bundle",
+ extra={
+ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "rpcName": "UpdateSchemaBundle",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
+ return resp
+
class _UpdateTable(
_BaseBigtableTableAdminRestTransport._BaseUpdateTable,
BigtableTableAdminRestStub,
@@ -6272,7 +7251,7 @@ def __call__(
"headers": dict(metadata),
}
_LOGGER.debug(
- f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.UpdateTable",
+ f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.UpdateTable",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "UpdateTable",
@@ -6319,7 +7298,7 @@ def __call__(
"status": response.status_code,
}
_LOGGER.debug(
- "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.update_table",
+ "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.update_table",
extra={
"serviceName": "google.bigtable.admin.v2.BigtableTableAdmin",
"rpcName": "UpdateTable",
@@ -6366,6 +7345,16 @@ def create_backup(
# In C++ this would require a dynamic_cast
return self._CreateBackup(self._session, self._host, self._interceptor) # type: ignore
+ @property
+ def create_schema_bundle(
+ self,
+ ) -> Callable[
+ [bigtable_table_admin.CreateSchemaBundleRequest], operations_pb2.Operation
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._CreateSchemaBundle(self._session, self._host, self._interceptor) # type: ignore
+
@property
def create_table(
self,
@@ -6400,6 +7389,14 @@ def delete_backup(
# In C++ this would require a dynamic_cast
return self._DeleteBackup(self._session, self._host, self._interceptor) # type: ignore
+ @property
+ def delete_schema_bundle(
+ self,
+ ) -> Callable[[bigtable_table_admin.DeleteSchemaBundleRequest], empty_pb2.Empty]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._DeleteSchemaBundle(self._session, self._host, self._interceptor) # type: ignore
+
@property
def delete_snapshot(
self,
@@ -6461,6 +7458,14 @@ def get_iam_policy(
# In C++ this would require a dynamic_cast
return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore
+ @property
+ def get_schema_bundle(
+ self,
+ ) -> Callable[[bigtable_table_admin.GetSchemaBundleRequest], table.SchemaBundle]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._GetSchemaBundle(self._session, self._host, self._interceptor) # type: ignore
+
@property
def get_snapshot(
self,
@@ -6499,6 +7504,17 @@ def list_backups(
# In C++ this would require a dynamic_cast
return self._ListBackups(self._session, self._host, self._interceptor) # type: ignore
+ @property
+ def list_schema_bundles(
+ self,
+ ) -> Callable[
+ [bigtable_table_admin.ListSchemaBundlesRequest],
+ bigtable_table_admin.ListSchemaBundlesResponse,
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._ListSchemaBundles(self._session, self._host, self._interceptor) # type: ignore
+
@property
def list_snapshots(
self,
@@ -6594,6 +7610,16 @@ def update_backup(
# In C++ this would require a dynamic_cast
return self._UpdateBackup(self._session, self._host, self._interceptor) # type: ignore
+ @property
+ def update_schema_bundle(
+ self,
+ ) -> Callable[
+ [bigtable_table_admin.UpdateSchemaBundleRequest], operations_pb2.Operation
+ ]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._UpdateSchemaBundle(self._session, self._host, self._interceptor) # type: ignore
+
@property
def update_table(
self,
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py
index add95bcca..ef6c2374d 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py
@@ -327,6 +327,65 @@ def _get_query_params_json(transcoded_request):
query_params["$alt"] = "json;enum-encoding=int"
return query_params
+ class _BaseCreateSchemaBundle:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {
+ "schemaBundleId": "",
+ }
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v2/{parent=projects/*/instances/*/tables/*}/schemaBundles",
+ "body": "schema_bundle",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = bigtable_table_admin.CreateSchemaBundleRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
class _BaseCreateTable:
def __hash__(self): # pragma: NO COVER
return NotImplementedError("__hash__ must be implemented.")
@@ -535,6 +594,53 @@ def _get_query_params_json(transcoded_request):
query_params["$alt"] = "json;enum-encoding=int"
return query_params
+ class _BaseDeleteSchemaBundle:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "delete",
+ "uri": "/v2/{name=projects/*/instances/*/tables/*/schemaBundles/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = bigtable_table_admin.DeleteSchemaBundleRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseBigtableTableAdminRestTransport._BaseDeleteSchemaBundle._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
class _BaseDeleteSnapshot:
def __hash__(self): # pragma: NO COVER
return NotImplementedError("__hash__ must be implemented.")
@@ -866,6 +972,16 @@ def _get_http_options():
"uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:getIamPolicy",
"body": "*",
},
+ {
+ "method": "post",
+ "uri": "/v2/{resource=projects/*/instances/*/tables/*/authorizedViews/*}:getIamPolicy",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v2/{resource=projects/*/instances/*/tables/*/schemaBundles/*}:getIamPolicy",
+ "body": "*",
+ },
]
return http_options
@@ -901,6 +1017,53 @@ def _get_query_params_json(transcoded_request):
query_params["$alt"] = "json;enum-encoding=int"
return query_params
+ class _BaseGetSchemaBundle:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v2/{name=projects/*/instances/*/tables/*/schemaBundles/*}",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = bigtable_table_admin.GetSchemaBundleRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseBigtableTableAdminRestTransport._BaseGetSchemaBundle._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
class _BaseGetSnapshot:
def __hash__(self): # pragma: NO COVER
return NotImplementedError("__hash__ must be implemented.")
@@ -1089,6 +1252,53 @@ def _get_query_params_json(transcoded_request):
query_params["$alt"] = "json;enum-encoding=int"
return query_params
+ class _BaseListSchemaBundles:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "get",
+ "uri": "/v2/{parent=projects/*/instances/*/tables/*}/schemaBundles",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = bigtable_table_admin.ListSchemaBundlesRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseBigtableTableAdminRestTransport._BaseListSchemaBundles._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
class _BaseListSnapshots:
def __hash__(self): # pragma: NO COVER
return NotImplementedError("__hash__ must be implemented.")
@@ -1324,6 +1534,16 @@ def _get_http_options():
"uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy",
"body": "*",
},
+ {
+ "method": "post",
+ "uri": "/v2/{resource=projects/*/instances/*/tables/*/authorizedViews/*}:setIamPolicy",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v2/{resource=projects/*/instances/*/tables/*/schemaBundles/*}:setIamPolicy",
+ "body": "*",
+ },
]
return http_options
@@ -1443,6 +1663,16 @@ def _get_http_options():
"uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions",
"body": "*",
},
+ {
+ "method": "post",
+ "uri": "/v2/{resource=projects/*/instances/*/tables/*/authorizedViews/*}:testIamPermissions",
+ "body": "*",
+ },
+ {
+ "method": "post",
+ "uri": "/v2/{resource=projects/*/instances/*/tables/*/schemaBundles/*}:testIamPermissions",
+ "body": "*",
+ },
]
return http_options
@@ -1651,6 +1881,63 @@ def _get_query_params_json(transcoded_request):
query_params["$alt"] = "json;enum-encoding=int"
return query_params
+ class _BaseUpdateSchemaBundle:
+ def __hash__(self): # pragma: NO COVER
+ return NotImplementedError("__hash__ must be implemented.")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ @staticmethod
+ def _get_http_options():
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "patch",
+ "uri": "/v2/{schema_bundle.name=projects/*/instances/*/tables/*/schemaBundles/*}",
+ "body": "schema_bundle",
+ },
+ ]
+ return http_options
+
+ @staticmethod
+ def _get_transcoded_request(http_options, request):
+ pb_request = bigtable_table_admin.UpdateSchemaBundleRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+ return transcoded_request
+
+ @staticmethod
+ def _get_request_body_json(transcoded_request):
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"], use_integers_for_enums=True
+ )
+ return body
+
+ @staticmethod
+ def _get_query_params_json(transcoded_request):
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(
+ _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle._get_unset_required_fields(
+ query_params
+ )
+ )
+
+ query_params["$alt"] = "json;enum-encoding=int"
+ return query_params
+
class _BaseUpdateTable:
def __hash__(self): # pragma: NO COVER
return NotImplementedError("__hash__ must be implemented.")
diff --git a/google/cloud/bigtable_admin_v2/types/__init__.py b/google/cloud/bigtable_admin_v2/types/__init__.py
index 26821e2a4..d2036c7a3 100644
--- a/google/cloud/bigtable_admin_v2/types/__init__.py
+++ b/google/cloud/bigtable_admin_v2/types/__init__.py
@@ -66,12 +66,15 @@
CreateAuthorizedViewRequest,
CreateBackupMetadata,
CreateBackupRequest,
+ CreateSchemaBundleMetadata,
+ CreateSchemaBundleRequest,
CreateTableFromSnapshotMetadata,
CreateTableFromSnapshotRequest,
CreateTableRequest,
DataBoostReadLocalWrites,
DeleteAuthorizedViewRequest,
DeleteBackupRequest,
+ DeleteSchemaBundleRequest,
DeleteSnapshotRequest,
DeleteTableRequest,
DropRowRangeRequest,
@@ -79,12 +82,15 @@
GenerateConsistencyTokenResponse,
GetAuthorizedViewRequest,
GetBackupRequest,
+ GetSchemaBundleRequest,
GetSnapshotRequest,
GetTableRequest,
ListAuthorizedViewsRequest,
ListAuthorizedViewsResponse,
ListBackupsRequest,
ListBackupsResponse,
+ ListSchemaBundlesRequest,
+ ListSchemaBundlesResponse,
ListSnapshotsRequest,
ListSnapshotsResponse,
ListTablesRequest,
@@ -101,6 +107,8 @@
UpdateAuthorizedViewMetadata,
UpdateAuthorizedViewRequest,
UpdateBackupRequest,
+ UpdateSchemaBundleMetadata,
+ UpdateSchemaBundleRequest,
UpdateTableMetadata,
UpdateTableRequest,
)
@@ -126,9 +134,13 @@
ColumnFamily,
EncryptionInfo,
GcRule,
+ ProtoSchema,
RestoreInfo,
+ SchemaBundle,
Snapshot,
Table,
+ TieredStorageConfig,
+ TieredStorageRule,
RestoreSourceType,
)
from .types import (
@@ -186,12 +198,15 @@
"CreateAuthorizedViewRequest",
"CreateBackupMetadata",
"CreateBackupRequest",
+ "CreateSchemaBundleMetadata",
+ "CreateSchemaBundleRequest",
"CreateTableFromSnapshotMetadata",
"CreateTableFromSnapshotRequest",
"CreateTableRequest",
"DataBoostReadLocalWrites",
"DeleteAuthorizedViewRequest",
"DeleteBackupRequest",
+ "DeleteSchemaBundleRequest",
"DeleteSnapshotRequest",
"DeleteTableRequest",
"DropRowRangeRequest",
@@ -199,12 +214,15 @@
"GenerateConsistencyTokenResponse",
"GetAuthorizedViewRequest",
"GetBackupRequest",
+ "GetSchemaBundleRequest",
"GetSnapshotRequest",
"GetTableRequest",
"ListAuthorizedViewsRequest",
"ListAuthorizedViewsResponse",
"ListBackupsRequest",
"ListBackupsResponse",
+ "ListSchemaBundlesRequest",
+ "ListSchemaBundlesResponse",
"ListSnapshotsRequest",
"ListSnapshotsResponse",
"ListTablesRequest",
@@ -221,6 +239,8 @@
"UpdateAuthorizedViewMetadata",
"UpdateAuthorizedViewRequest",
"UpdateBackupRequest",
+ "UpdateSchemaBundleMetadata",
+ "UpdateSchemaBundleRequest",
"UpdateTableMetadata",
"UpdateTableRequest",
"OperationProgress",
@@ -240,9 +260,13 @@
"ColumnFamily",
"EncryptionInfo",
"GcRule",
+ "ProtoSchema",
"RestoreInfo",
+ "SchemaBundle",
"Snapshot",
"Table",
+ "TieredStorageConfig",
+ "TieredStorageRule",
"RestoreSourceType",
"Type",
)
diff --git a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py
index 4cadfb1bf..69de07a2a 100644
--- a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py
+++ b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py
@@ -74,6 +74,14 @@
"UpdateAuthorizedViewRequest",
"UpdateAuthorizedViewMetadata",
"DeleteAuthorizedViewRequest",
+ "CreateSchemaBundleRequest",
+ "CreateSchemaBundleMetadata",
+ "UpdateSchemaBundleRequest",
+ "UpdateSchemaBundleMetadata",
+ "GetSchemaBundleRequest",
+ "ListSchemaBundlesRequest",
+ "ListSchemaBundlesResponse",
+ "DeleteSchemaBundleRequest",
},
)
@@ -227,20 +235,20 @@ class CreateTableRequest(proto.Message):
Example:
- - Row keys :=
- ``["a", "apple", "custom", "customer_1", "customer_2",``
- ``"other", "zz"]``
- - initial_split_keys :=
- ``["apple", "customer_1", "customer_2", "other"]``
- - Key assignment:
-
- - Tablet 1 ``[, apple) => {"a"}.``
- - Tablet 2
- ``[apple, customer_1) => {"apple", "custom"}.``
- - Tablet 3
- ``[customer_1, customer_2) => {"customer_1"}.``
- - Tablet 4 ``[customer_2, other) => {"customer_2"}.``
- - Tablet 5 ``[other, ) => {"other", "zz"}.``
+ - Row keys :=
+ ``["a", "apple", "custom", "customer_1", "customer_2",``
+ ``"other", "zz"]``
+ - initial_split_keys :=
+ ``["apple", "customer_1", "customer_2", "other"]``
+ - Key assignment:
+
+ - Tablet 1 ``[, apple) => {"a"}.``
+ - Tablet 2
+ ``[apple, customer_1) => {"apple", "custom"}.``
+ - Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.``
+ - Tablet 4 ``[customer_2, other) => {"customer_2"}.``
+ - Tablet 5
+ ``[other, ) => {"other", "zz"}.``
"""
class Split(proto.Message):
@@ -474,13 +482,13 @@ class UpdateTableRequest(proto.Message):
which fields (e.g. ``change_stream_config``) in the
``table`` field should be updated. This mask is relative to
the ``table`` field, not to the request message. The
- wildcard (*) path is currently not supported. Currently
+ wildcard (\*) path is currently not supported. Currently
UpdateTable is only supported for the following fields:
- - ``change_stream_config``
- - ``change_stream_config.retention_period``
- - ``deletion_protection``
- - ``row_key_schema``
+ - ``change_stream_config``
+ - ``change_stream_config.retention_period``
+ - ``deletion_protection``
+ - ``row_key_schema``
If ``column_families`` is set in ``update_mask``, it will
return an UNIMPLEMENTED error.
@@ -1091,7 +1099,7 @@ class CreateBackupRequest(proto.Message):
name, of the form:
``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``.
This string must be between 1 and 50 characters in length
- and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*.
+ and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*.
backup (google.cloud.bigtable_admin_v2.types.Backup):
Required. The backup to create.
"""
@@ -1159,7 +1167,7 @@ class UpdateBackupRequest(proto.Message):
required. Other fields are ignored. Update is only supported
for the following fields:
- - ``backup.expire_time``.
+ - ``backup.expire_time``.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. A mask specifying which fields (e.g.
``expire_time``) in the Backup resource should be updated.
@@ -1238,16 +1246,16 @@ class ListBackupsRequest(proto.Message):
The fields eligible for filtering are:
- - ``name``
- - ``source_table``
- - ``state``
- - ``start_time`` (and values are of the format
- YYYY-MM-DDTHH:MM:SSZ)
- - ``end_time`` (and values are of the format
- YYYY-MM-DDTHH:MM:SSZ)
- - ``expire_time`` (and values are of the format
- YYYY-MM-DDTHH:MM:SSZ)
- - ``size_bytes``
+ - ``name``
+ - ``source_table``
+ - ``state``
+ - ``start_time`` (and values are of the format
+ YYYY-MM-DDTHH:MM:SSZ)
+ - ``end_time`` (and values are of the format
+ YYYY-MM-DDTHH:MM:SSZ)
+ - ``expire_time`` (and values are of the format
+ YYYY-MM-DDTHH:MM:SSZ)
+ - ``size_bytes``
To filter on multiple expressions, provide each separate
expression within parentheses. By default, each expression
@@ -1256,20 +1264,20 @@ class ListBackupsRequest(proto.Message):
Some examples of using filters are:
- - ``name:"exact"`` --> The backup's name is the string
- "exact".
- - ``name:howl`` --> The backup's name contains the string
- "howl".
- - ``source_table:prod`` --> The source_table's name
- contains the string "prod".
- - ``state:CREATING`` --> The backup is pending creation.
- - ``state:READY`` --> The backup is fully created and ready
- for use.
- - ``(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")``
- --> The backup name contains the string "howl" and
- start_time of the backup is before 2018-03-28T14:50:00Z.
- - ``size_bytes > 10000000000`` --> The backup's size is
- greater than 10GB
+ - ``name:"exact"`` --> The backup's name is the string
+ "exact".
+ - ``name:howl`` --> The backup's name contains the string
+ "howl".
+ - ``source_table:prod`` --> The source_table's name contains
+ the string "prod".
+ - ``state:CREATING`` --> The backup is pending creation.
+ - ``state:READY`` --> The backup is fully created and ready
+ for use.
+ - ``(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")``
+ --> The backup name contains the string "howl" and
+ start_time of the backup is before 2018-03-28T14:50:00Z.
+ - ``size_bytes > 10000000000`` --> The backup's size is
+ greater than 10GB
order_by (str):
An expression for specifying the sort order of the results
of the request. The string value should specify one or more
@@ -1278,13 +1286,13 @@ class ListBackupsRequest(proto.Message):
Fields supported are:
- - name
- - source_table
- - expire_time
- - start_time
- - end_time
- - size_bytes
- - state
+ - name
+ - source_table
+ - expire_time
+ - start_time
+ - end_time
+ - size_bytes
+ - state
For example, "start_time". The default sorting order is
ascending. To specify descending order for the field, a
@@ -1373,7 +1381,7 @@ class CopyBackupRequest(proto.Message):
to create the full backup name, of the form:
``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``.
This string must be between 1 and 50 characters in length
- and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*.
+ and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*.
source_backup (str):
Required. The source backup to be copied from. The source
backup needs to be in READY state for it to be copied.
@@ -1484,7 +1492,7 @@ class CreateAuthorizedViewMetadata(proto.Message):
Attributes:
original_request (google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest):
The request that prompted the initiation of
- this CreateInstance operation.
+ this CreateAuthorizedView operation.
request_time (google.protobuf.timestamp_pb2.Timestamp):
The time at which the original request was
received.
@@ -1536,7 +1544,7 @@ class ListAuthorizedViewsRequest(proto.Message):
previous call.
view (google.cloud.bigtable_admin_v2.types.AuthorizedView.ResponseView):
Optional. The resource_view to be applied to the returned
- views' fields. Default to NAME_ONLY.
+ AuthorizedViews' fields. Default to NAME_ONLY.
"""
parent: str = proto.Field(
@@ -1620,8 +1628,8 @@ class UpdateAuthorizedViewRequest(proto.Message):
authorized_view (google.cloud.bigtable_admin_v2.types.AuthorizedView):
Required. The AuthorizedView to update. The ``name`` in
``authorized_view`` is used to identify the AuthorizedView.
- AuthorizedView name must in this format
- projects//instances//tables//authorizedViews/
+ AuthorizedView name must in this format:
+ ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Optional. The list of fields to update. A mask specifying
which fields in the AuthorizedView resource should be
@@ -1712,4 +1720,247 @@ class DeleteAuthorizedViewRequest(proto.Message):
)
+class CreateSchemaBundleRequest(proto.Message):
+ r"""The request for
+ [CreateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle].
+
+ Attributes:
+ parent (str):
+ Required. The parent resource where this schema bundle will
+ be created. Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}``.
+ schema_bundle_id (str):
+ Required. The unique ID to use for the schema
+ bundle, which will become the final component of
+ the schema bundle's resource name.
+ schema_bundle (google.cloud.bigtable_admin_v2.types.SchemaBundle):
+ Required. The schema bundle to create.
+ """
+
+ parent: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ schema_bundle_id: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ schema_bundle: gba_table.SchemaBundle = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=gba_table.SchemaBundle,
+ )
+
+
+class CreateSchemaBundleMetadata(proto.Message):
+ r"""The metadata for the Operation returned by
+ [CreateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle].
+
+ Attributes:
+ name (str):
+ The unique name identifying this schema bundle. Values are
+ of the form
+ ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}``
+ start_time (google.protobuf.timestamp_pb2.Timestamp):
+ The time at which this operation started.
+ end_time (google.protobuf.timestamp_pb2.Timestamp):
+ If set, the time at which this operation
+ finished or was canceled.
+ """
+
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ start_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=timestamp_pb2.Timestamp,
+ )
+ end_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=timestamp_pb2.Timestamp,
+ )
+
+
+class UpdateSchemaBundleRequest(proto.Message):
+ r"""The request for
+ [UpdateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle].
+
+ Attributes:
+ schema_bundle (google.cloud.bigtable_admin_v2.types.SchemaBundle):
+ Required. The schema bundle to update.
+
+ The schema bundle's ``name`` field is used to identify the
+ schema bundle to update. Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}``
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Optional. The list of fields to update.
+ ignore_warnings (bool):
+ Optional. If set, ignore the safety checks
+ when updating the Schema Bundle. The safety
+ checks are:
+
+ - The new Schema Bundle is backwards compatible
+ with the existing Schema Bundle.
+ """
+
+ schema_bundle: gba_table.SchemaBundle = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message=gba_table.SchemaBundle,
+ )
+ update_mask: field_mask_pb2.FieldMask = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=field_mask_pb2.FieldMask,
+ )
+ ignore_warnings: bool = proto.Field(
+ proto.BOOL,
+ number=3,
+ )
+
+
+class UpdateSchemaBundleMetadata(proto.Message):
+ r"""The metadata for the Operation returned by
+ [UpdateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle].
+
+ Attributes:
+ name (str):
+ The unique name identifying this schema bundle. Values are
+ of the form
+ ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}``
+ start_time (google.protobuf.timestamp_pb2.Timestamp):
+ The time at which this operation started.
+ end_time (google.protobuf.timestamp_pb2.Timestamp):
+ If set, the time at which this operation
+ finished or was canceled.
+ """
+
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ start_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=timestamp_pb2.Timestamp,
+ )
+ end_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=timestamp_pb2.Timestamp,
+ )
+
+
+class GetSchemaBundleRequest(proto.Message):
+ r"""The request for
+ [GetSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle].
+
+ Attributes:
+ name (str):
+ Required. The unique name of the schema bundle to retrieve.
+ Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}``
+ """
+
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+
+
+class ListSchemaBundlesRequest(proto.Message):
+ r"""The request for
+ [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles].
+
+ Attributes:
+ parent (str):
+ Required. The parent, which owns this collection of schema
+ bundles. Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}``.
+ page_size (int):
+ The maximum number of schema bundles to
+ return. If the value is positive, the server may
+ return at most this value. If unspecified, the
+ server will return the maximum allowed page
+ size.
+ page_token (str):
+ A page token, received from a previous ``ListSchemaBundles``
+ call. Provide this to retrieve the subsequent page.
+
+ When paginating, all other parameters provided to
+ ``ListSchemaBundles`` must match the call that provided the
+ page token.
+ """
+
+ parent: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ page_size: int = proto.Field(
+ proto.INT32,
+ number=2,
+ )
+ page_token: str = proto.Field(
+ proto.STRING,
+ number=3,
+ )
+
+
+class ListSchemaBundlesResponse(proto.Message):
+ r"""The response for
+ [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles].
+
+ Attributes:
+ schema_bundles (MutableSequence[google.cloud.bigtable_admin_v2.types.SchemaBundle]):
+ The schema bundles from the specified table.
+ next_page_token (str):
+ A token, which can be sent as ``page_token`` to retrieve the
+ next page. If this field is omitted, there are no subsequent
+ pages.
+ """
+
+ @property
+ def raw_page(self):
+ return self
+
+ schema_bundles: MutableSequence[gba_table.SchemaBundle] = proto.RepeatedField(
+ proto.MESSAGE,
+ number=1,
+ message=gba_table.SchemaBundle,
+ )
+ next_page_token: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+
+
+class DeleteSchemaBundleRequest(proto.Message):
+ r"""The request for
+ [DeleteSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle].
+
+ Attributes:
+ name (str):
+ Required. The unique name of the schema bundle to delete.
+ Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}``
+ etag (str):
+ Optional. The etag of the schema bundle.
+ If this is provided, it must match the server's
+ etag. The server returns an ABORTED error on a
+ mismatched etag.
+ """
+
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ etag: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+
+
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/bigtable_admin_v2/types/instance.py b/google/cloud/bigtable_admin_v2/types/instance.py
index 2623b770e..f07414d56 100644
--- a/google/cloud/bigtable_admin_v2/types/instance.py
+++ b/google/cloud/bigtable_admin_v2/types/instance.py
@@ -67,15 +67,15 @@ class Instance(proto.Message):
customer's organizational needs and deployment strategies.
They can be used to filter resources and aggregate metrics.
- - Label keys must be between 1 and 63 characters long and
- must conform to the regular expression:
- ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``.
- - Label values must be between 0 and 63 characters long and
- must conform to the regular expression:
- ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``.
- - No more than 64 labels can be associated with a given
- resource.
- - Keys and values must both be under 128 bytes.
+ - Label keys must be between 1 and 63 characters long and
+ must conform to the regular expression:
+ ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``.
+ - Label values must be between 0 and 63 characters long and
+ must conform to the regular expression:
+ ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``.
+ - No more than 64 labels can be associated with a given
+ resource.
+ - Keys and values must both be under 128 bytes.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. A commit timestamp representing when this
Instance was created. For instances created before this
@@ -89,6 +89,20 @@ class Instance(proto.Message):
Output only. Reserved for future use.
This field is a member of `oneof`_ ``_satisfies_pzi``.
+ tags (MutableMapping[str, str]):
+ Optional. Input only. Immutable. Tag
+ keys/values directly bound to this resource. For
+ example:
+
+ - "123/environment": "production",
+ - "123/costCenter": "marketing"
+
+ Tags and Labels (above) are both used to bind
+ metadata to resources, with different use-cases.
+ See
+ https://cloud.google.com/resource-manager/docs/tags/tags-overview
+ for an in-depth overview on the difference
+ between tags and labels.
"""
class State(proto.Enum):
@@ -169,6 +183,11 @@ class Type(proto.Enum):
number=11,
optional=True,
)
+ tags: MutableMapping[str, str] = proto.MapField(
+ proto.STRING,
+ proto.STRING,
+ number=12,
+ )
class AutoscalingTargets(proto.Message):
diff --git a/google/cloud/bigtable_admin_v2/types/table.py b/google/cloud/bigtable_admin_v2/types/table.py
index 730b54ce3..c4f23d5fa 100644
--- a/google/cloud/bigtable_admin_v2/types/table.py
+++ b/google/cloud/bigtable_admin_v2/types/table.py
@@ -20,6 +20,7 @@
import proto # type: ignore
from google.cloud.bigtable_admin_v2.types import types
+from google.cloud.bigtable_admin_v2.utils import oneof_message
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
@@ -39,6 +40,10 @@
"Snapshot",
"Backup",
"BackupInfo",
+ "TieredStorageConfig",
+ "TieredStorageRule",
+ "ProtoSchema",
+ "SchemaBundle",
},
)
@@ -151,9 +156,9 @@ class Table(proto.Message):
i.e. deleting the following resources through Admin APIs are
prohibited:
- - The table.
- - The column families in the table.
- - The instance containing the table.
+ - The table.
+ - The column families in the table.
+ - The instance containing the table.
Note one can still delete the data stored in the table
through Data APIs.
@@ -163,6 +168,17 @@ class Table(proto.Message):
disabled.
This field is a member of `oneof`_ ``automated_backup_config``.
+ tiered_storage_config (google.cloud.bigtable_admin_v2.types.TieredStorageConfig):
+ Rules to specify what data is stored in each
+ storage tier. Different tiers store data
+ differently, providing different trade-offs
+ between cost and performance. Different parts of
+ a table can be stored separately on different
+ tiers.
+ If a config is specified, tiered storage is
+ enabled for this table. Otherwise, tiered
+ storage is disabled.
+ Only SSD instances can configure tiered storage.
row_key_schema (google.cloud.bigtable_admin_v2.types.Type.Struct):
The row key schema for this table. The schema is used to
decode the raw row key bytes into a structured format. The
@@ -178,7 +194,7 @@ class Table(proto.Message):
they encounter an invalid row key.
For example, if \_key =
- "some_id#2024-04-30#\x00\x13\x00\xf3" with the following
+ "some_id#2024-04-30#\\x00\\x13\\x00\\xf3" with the following
schema: { fields { field_name: "id" type { string {
encoding: utf8_bytes {} } } } fields { field_name: "date"
type { string { encoding: utf8_bytes {} } } } fields {
@@ -191,8 +207,8 @@ class Table(proto.Message):
\_key, product_code FROM table" will return two columns:
/------------------------------------------------------
| \| \_key \| product_code \| \|
- --------------------------------------|--------------\| \|
- "some_id#2024-04-30#\x00\x13\x00\xf3" \| 1245427 \|
+ --------------------------------------\|--------------\|
+ \| "some_id#2024-04-30#\\x00\\x13\\x00\\xf3" \| 1245427 \|
------------------------------------------------------/
The schema has the following invariants: (1) The decoded
@@ -203,19 +219,19 @@ class Table(proto.Message):
type is limited to scalar types only: Array, Map, Aggregate,
and Struct are not allowed. (4) The field names must not
collide with existing column family names and reserved
- keywords "_key" and "_timestamp".
+ keywords "\_key" and "\_timestamp".
The following update operations are allowed for
row_key_schema:
- - Update from an empty schema to a new schema.
- - Remove the existing schema. This operation requires
- setting the ``ignore_warnings`` flag to ``true``, since
- it might be a backward incompatible change. Without the
- flag, the update request will fail with an
- INVALID_ARGUMENT error. Any other row key schema update
- operation (e.g. update existing schema columns names or
- types) is currently unsupported.
+ - Update from an empty schema to a new schema.
+ - Remove the existing schema. This operation requires
+ setting the ``ignore_warnings`` flag to ``true``, since it
+ might be a backward incompatible change. Without the flag,
+ the update request will fail with an INVALID_ARGUMENT
+ error. Any other row key schema update operation (e.g.
+ update existing schema columns names or types) is
+ currently unsupported.
"""
class TimestampGranularity(proto.Enum):
@@ -396,6 +412,11 @@ class AutomatedBackupPolicy(proto.Message):
oneof="automated_backup_config",
message=AutomatedBackupPolicy,
)
+ tiered_storage_config: "TieredStorageConfig" = proto.Field(
+ proto.MESSAGE,
+ number=14,
+ message="TieredStorageConfig",
+ )
row_key_schema: types.Type.Struct = proto.Field(
proto.MESSAGE,
number=15,
@@ -554,7 +575,7 @@ class ColumnFamily(proto.Message):
If ``value_type`` is ``Aggregate``, written data must be
compatible with:
- - ``value_type.input_type`` for ``AddInput`` mutations
+ - ``value_type.input_type`` for ``AddInput`` mutations
"""
gc_rule: "GcRule" = proto.Field(
@@ -569,7 +590,7 @@ class ColumnFamily(proto.Message):
)
-class GcRule(proto.Message):
+class GcRule(oneof_message.OneofMessage):
r"""Rule for determining which cells to delete during garbage
collection.
@@ -846,8 +867,8 @@ class Backup(proto.Message):
backup or updating its ``expire_time``, the value must be
greater than the backup creation time by:
- - At least 6 hours
- - At most 90 days
+ - At least 6 hours
+ - At most 90 days
Once the ``expire_time`` has passed, Cloud Bigtable will
delete the backup.
@@ -877,7 +898,7 @@ class Backup(proto.Message):
standard backup. This value must be greater than the backup
creation time by:
- - At least 24 hours
+ - At least 24 hours
This field only applies for hot backups. When creating or
updating a standard backup, attempting to set this field
@@ -1025,4 +1046,116 @@ class BackupInfo(proto.Message):
)
+class TieredStorageConfig(proto.Message):
+ r"""Config for tiered storage.
+ A valid config must have a valid TieredStorageRule. Otherwise
+ the whole TieredStorageConfig must be unset.
+ By default all data is stored in the SSD tier (only SSD
+ instances can configure tiered storage).
+
+ Attributes:
+ infrequent_access (google.cloud.bigtable_admin_v2.types.TieredStorageRule):
+ Rule to specify what data is stored in the
+ infrequent access(IA) tier. The IA tier allows
+ storing more data per node with reduced
+ performance.
+ """
+
+ infrequent_access: "TieredStorageRule" = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message="TieredStorageRule",
+ )
+
+
+class TieredStorageRule(proto.Message):
+ r"""Rule to specify what data is stored in a storage tier.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
+ Attributes:
+ include_if_older_than (google.protobuf.duration_pb2.Duration):
+ Include cells older than the given age.
+ For the infrequent access tier, this value must
+ be at least 30 days.
+
+ This field is a member of `oneof`_ ``rule``.
+ """
+
+ include_if_older_than: duration_pb2.Duration = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ oneof="rule",
+ message=duration_pb2.Duration,
+ )
+
+
+class ProtoSchema(proto.Message):
+ r"""Represents a protobuf schema.
+
+ Attributes:
+ proto_descriptors (bytes):
+ Required. Contains a protobuf-serialized
+ `google.protobuf.FileDescriptorSet `__,
+ which could include multiple proto files. To generate it,
+ `install `__ and
+ run ``protoc`` with ``--include_imports`` and
+ ``--descriptor_set_out``. For example, to generate for
+ moon/shot/app.proto, run
+
+ ::
+
+ $protoc --proto_path=/app_path --proto_path=/lib_path \
+ --include_imports \
+ --descriptor_set_out=descriptors.pb \
+ moon/shot/app.proto
+
+ For more details, see protobuffer `self
+ description `__.
+ """
+
+ proto_descriptors: bytes = proto.Field(
+ proto.BYTES,
+ number=2,
+ )
+
+
+class SchemaBundle(proto.Message):
+ r"""A named collection of related schemas.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
+ Attributes:
+ name (str):
+ Identifier. The unique name identifying this schema bundle.
+ Values are of the form
+ ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}``
+ proto_schema (google.cloud.bigtable_admin_v2.types.ProtoSchema):
+ Schema for Protobufs.
+
+ This field is a member of `oneof`_ ``type``.
+ etag (str):
+ Optional. The etag for this schema bundle.
+ This may be sent on update and delete requests
+ to ensure the client has an up-to-date value
+ before proceeding. The server returns an ABORTED
+ error on a mismatched etag.
+ """
+
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ proto_schema: "ProtoSchema" = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ oneof="type",
+ message="ProtoSchema",
+ )
+ etag: str = proto.Field(
+ proto.STRING,
+ number=3,
+ )
+
+
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/bigtable_admin_v2/types/types.py b/google/cloud/bigtable_admin_v2/types/types.py
index 42935df3c..4f56429da 100644
--- a/google/cloud/bigtable_admin_v2/types/types.py
+++ b/google/cloud/bigtable_admin_v2/types/types.py
@@ -40,15 +40,15 @@ class Type(proto.Message):
Each encoding can operate in one of two modes:
- - Sorted: In this mode, Bigtable guarantees that
- ``Encode(X) <= Encode(Y)`` if and only if ``X <= Y``. This is
- useful anywhere sort order is important, for example when
- encoding keys.
- - Distinct: In this mode, Bigtable guarantees that if ``X != Y``
- then ``Encode(X) != Encode(Y)``. However, the converse is not
- guaranteed. For example, both "{'foo': '1', 'bar': '2'}" and
- "{'bar': '2', 'foo': '1'}" are valid encodings of the same JSON
- value.
+ - Sorted: In this mode, Bigtable guarantees that
+ ``Encode(X) <= Encode(Y)`` if and only if ``X <= Y``. This is
+ useful anywhere sort order is important, for example when encoding
+ keys.
+ - Distinct: In this mode, Bigtable guarantees that if ``X != Y``
+ then ``Encode(X) != Encode(Y)``. However, the converse is not
+ guaranteed. For example, both "{'foo': '1', 'bar': '2'}" and
+ "{'bar': '2', 'foo': '1'}" are valid encodings of the same JSON
+ value.
The API clearly documents which mode is used wherever an encoding
can be configured. Each encoding also documents which values are
@@ -112,6 +112,14 @@ class Type(proto.Message):
map_type (google.cloud.bigtable_admin_v2.types.Type.Map):
Map
+ This field is a member of `oneof`_ ``kind``.
+ proto_type (google.cloud.bigtable_admin_v2.types.Type.Proto):
+ Proto
+
+ This field is a member of `oneof`_ ``kind``.
+ enum_type (google.cloud.bigtable_admin_v2.types.Type.Enum):
+ Enum
+
This field is a member of `oneof`_ ``kind``.
"""
@@ -197,16 +205,16 @@ class Utf8Bytes(proto.Message):
Sorted mode:
- - All values are supported.
- - Code point order is preserved.
+ - All values are supported.
+ - Code point order is preserved.
Distinct mode: all values are supported.
Compatible with:
- - BigQuery ``TEXT`` encoding
- - HBase ``Bytes.toBytes``
- - Java ``String#getBytes(StandardCharsets.UTF_8)``
+ - BigQuery ``TEXT`` encoding
+ - HBase ``Bytes.toBytes``
+ - Java ``String#getBytes(StandardCharsets.UTF_8)``
"""
@@ -268,9 +276,9 @@ class BigEndianBytes(proto.Message):
Compatible with:
- - BigQuery ``BINARY`` encoding
- - HBase ``Bytes.toBytes``
- - Java ``ByteBuffer.putLong()`` with ``ByteOrder.BIG_ENDIAN``
+ - BigQuery ``BINARY`` encoding
+ - HBase ``Bytes.toBytes``
+ - Java ``ByteBuffer.putLong()`` with ``ByteOrder.BIG_ENDIAN``
Attributes:
bytes_type (google.cloud.bigtable_admin_v2.types.Type.Bytes):
@@ -350,7 +358,7 @@ class Encoding(proto.Message):
Compatible with:
- - Java ``Instant.truncatedTo()`` with ``ChronoUnit.MICROS``
+ - Java ``Instant.truncatedTo()`` with ``ChronoUnit.MICROS``
This field is a member of `oneof`_ ``encoding``.
"""
@@ -447,17 +455,17 @@ class DelimitedBytes(proto.Message):
Sorted mode:
- - Fields are encoded in sorted mode.
- - Encoded field values must not contain any bytes <=
- ``delimiter[0]``
- - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or
- if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort
- first.
+ - Fields are encoded in sorted mode.
+ - Encoded field values must not contain any bytes <=
+ ``delimiter[0]``
+ - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or
+ if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort
+ first.
Distinct mode:
- - Fields are encoded in distinct mode.
- - Encoded field values must not contain ``delimiter[0]``.
+ - Fields are encoded in distinct mode.
+ - Encoded field values must not contain ``delimiter[0]``.
Attributes:
delimiter (bytes):
@@ -480,24 +488,23 @@ class OrderedCodeBytes(proto.Message):
Fields that encode to the empty string "" have special handling:
- - If *every* field encodes to "", or if the STRUCT has no fields
- defined, then the STRUCT is encoded as the fixed byte pair {0x00,
- 0x00}.
- - Otherwise, the STRUCT only encodes until the last non-empty
- field, omitting any trailing empty fields. Any empty fields that
- aren't omitted are replaced with the fixed byte pair {0x00,
- 0x00}.
+ - If *every* field encodes to "", or if the STRUCT has no fields
+ defined, then the STRUCT is encoded as the fixed byte pair {0x00,
+ 0x00}.
+ - Otherwise, the STRUCT only encodes until the last non-empty field,
+ omitting any trailing empty fields. Any empty fields that aren't
+ omitted are replaced with the fixed byte pair {0x00, 0x00}.
Examples:
- - STRUCT() -> "\00\00"
- - STRUCT("") -> "\00\00"
- - STRUCT("", "") -> "\00\00"
- - STRUCT("", "B") -> "\00\00" + "\00\01" + "B"
- - STRUCT("A", "") -> "A"
- - STRUCT("", "B", "") -> "\00\00" + "\00\01" + "B"
- - STRUCT("A", "", "C") -> "A" + "\00\01" + "\00\00" + "\00\01" +
- "C"
+ - STRUCT() -> "\\00\\00"
+ - STRUCT("") -> "\\00\\00"
+ - STRUCT("", "") -> "\\00\\00"
+ - STRUCT("", "B") -> "\\00\\00" + "\\00\\01" + "B"
+ - STRUCT("A", "") -> "A"
+ - STRUCT("", "B", "") -> "\\00\\00" + "\\00\\01" + "B"
+ - STRUCT("A", "", "C") -> "A" + "\\00\\01" + "\\00\\00" + "\\00\\01"
+ + "C"
Since null bytes are always escaped, this encoding can cause size
blowup for encodings like ``Int64.BigEndianBytes`` that are likely
@@ -505,16 +512,16 @@ class OrderedCodeBytes(proto.Message):
Sorted mode:
- - Fields are encoded in sorted mode.
- - All values supported by the field encodings are allowed
- - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or
- if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort
- first.
+ - Fields are encoded in sorted mode.
+ - All values supported by the field encodings are allowed
+ - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or
+ if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort
+ first.
Distinct mode:
- - Fields are encoded in distinct mode.
- - All values supported by the field encodings are allowed.
+ - Fields are encoded in distinct mode.
+ - All values supported by the field encodings are allowed.
"""
@@ -548,6 +555,52 @@ class OrderedCodeBytes(proto.Message):
message="Type.Struct.Encoding",
)
+ class Proto(proto.Message):
+ r"""A protobuf message type. Values of type ``Proto`` are stored in
+ ``Value.bytes_value``.
+
+ Attributes:
+ schema_bundle_id (str):
+ The ID of the schema bundle that this proto
+ is defined in.
+ message_name (str):
+ The fully qualified name of the protobuf
+ message, including package. In the format of
+ "foo.bar.Message".
+ """
+
+ schema_bundle_id: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ message_name: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+
+ class Enum(proto.Message):
+ r"""A protobuf enum type. Values of type ``Enum`` are stored in
+ ``Value.int_value``.
+
+ Attributes:
+ schema_bundle_id (str):
+ The ID of the schema bundle that this enum is
+ defined in.
+ enum_name (str):
+ The fully qualified name of the protobuf enum
+ message, including package. In the format of
+ "foo.bar.EnumMessage".
+ """
+
+ schema_bundle_id: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ enum_name: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+
class Array(proto.Message):
r"""An ordered list of elements of a given type. Values of type
``Array`` are stored in ``Value.array_value``.
@@ -771,6 +824,18 @@ class HyperLogLogPlusPlusUniqueCount(proto.Message):
oneof="kind",
message=Map,
)
+ proto_type: Proto = proto.Field(
+ proto.MESSAGE,
+ number=13,
+ oneof="kind",
+ message=Proto,
+ )
+ enum_type: Enum = proto.Field(
+ proto.MESSAGE,
+ number=14,
+ oneof="kind",
+ message=Enum,
+ )
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/.github/.OwlBot.yaml b/google/cloud/bigtable_admin_v2/utils/__init__.py
similarity index 56%
rename from .github/.OwlBot.yaml
rename to google/cloud/bigtable_admin_v2/utils/__init__.py
index fe2f7841a..93d766056 100644
--- a/.github/.OwlBot.yaml
+++ b/google/cloud/bigtable_admin_v2/utils/__init__.py
@@ -1,4 +1,5 @@
-# Copyright 2021 Google LLC
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,18 +12,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-docker:
- image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest
-
-deep-remove-regex:
- - /owl-bot-staging
-
-deep-copy-regex:
- - source: /google/bigtable/admin/(v.*)/.*-py/(.*)
- dest: /owl-bot-staging/bigtable_admin/$1/$2
- - source: /google/bigtable/(v.*)/.*-py/(.*)
- dest: /owl-bot-staging/bigtable/$1/$2
-
-begin-after-commit-hash: a21f1091413a260393548c1b2ac44b7347923f08
-
+#
+# This directory is a directory for handwritten code, made for inserting
+# specifically the oneof_message module into files in the autogenerated
+# types directory without causing ImportErrors due to circular imports.
+# For other use cases, use the overlay submodule.
diff --git a/google/cloud/bigtable_admin_v2/utils/oneof_message.py b/google/cloud/bigtable_admin_v2/utils/oneof_message.py
new file mode 100644
index 000000000..e110d8fa6
--- /dev/null
+++ b/google/cloud/bigtable_admin_v2/utils/oneof_message.py
@@ -0,0 +1,108 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+import collections.abc
+import proto
+
+
+class OneofMessage(proto.Message):
+ def _get_oneof_field_from_key(self, key):
+ """Given a field name, return the corresponding oneof associated with it. If it doesn't exist, return None."""
+
+ oneof_type = None
+
+ try:
+ oneof_type = self._meta.fields[key].oneof
+ except KeyError:
+ # Underscores may be appended to field names
+ # that collide with python or proto-plus keywords.
+ # In case a key only exists with a `_` suffix, coerce the key
+ # to include the `_` suffix. It's not possible to
+ # natively define the same field with a trailing underscore in protobuf.
+ # See related issue
+ # https://github.com/googleapis/python-api-core/issues/227
+ if f"{key}_" in self._meta.fields:
+ key = f"{key}_"
+ oneof_type = self._meta.fields[key].oneof
+
+ return oneof_type
+
+ def __init__(
+ self,
+ mapping=None,
+ *,
+ ignore_unknown_fields=False,
+ **kwargs,
+ ):
+ # We accept several things for `mapping`:
+ # * An instance of this class.
+ # * An instance of the underlying protobuf descriptor class.
+ # * A dict
+ # * Nothing (keyword arguments only).
+ #
+ #
+ # Check for oneofs collisions in the parameters provided. Extract a set of
+ # all fields that are set from the mappings + kwargs combined.
+ mapping_fields = set(kwargs.keys())
+
+ if mapping is None:
+ pass
+ elif isinstance(mapping, collections.abc.Mapping):
+ mapping_fields.update(mapping.keys())
+ elif isinstance(mapping, self._meta.pb):
+ mapping_fields.update(field.name for field, _ in mapping.ListFields())
+ elif isinstance(mapping, type(self)):
+ mapping_fields.update(field.name for field, _ in mapping._pb.ListFields())
+ else:
+ # Sanity check: Did we get something not a map? Error if so.
+ raise TypeError(
+ "Invalid constructor input for %s: %r"
+ % (
+ self.__class__.__name__,
+ mapping,
+ )
+ )
+
+ oneofs = set()
+
+ for field in mapping_fields:
+ oneof_field = self._get_oneof_field_from_key(field)
+ if oneof_field is not None:
+ if oneof_field in oneofs:
+ raise ValueError(
+ "Invalid constructor input for %s: Multiple fields defined for oneof %s"
+ % (self.__class__.__name__, oneof_field)
+ )
+ else:
+ oneofs.add(oneof_field)
+
+ super().__init__(mapping, ignore_unknown_fields=ignore_unknown_fields, **kwargs)
+
+ def __setattr__(self, key, value):
+ # Oneof check: Only set the value of an existing oneof field
+ # if the field being overridden is the same as the field already set
+ # for the oneof.
+ oneof = self._get_oneof_field_from_key(key)
+ if (
+ oneof is not None
+ and self._pb.HasField(oneof)
+ and self._pb.WhichOneof(oneof) != key
+ ):
+ raise ValueError(
+ "Overriding the field set for oneof %s with a different field %s"
+ % (oneof, key)
+ )
+ super().__setattr__(key, value)
diff --git a/google/cloud/bigtable_v2/__init__.py b/google/cloud/bigtable_v2/__init__.py
index 3cb3d4de0..ec552a85d 100644
--- a/google/cloud/bigtable_v2/__init__.py
+++ b/google/cloud/bigtable_v2/__init__.py
@@ -15,8 +15,18 @@
#
from google.cloud.bigtable_v2 import gapic_version as package_version
+import google.api_core as api_core
+import sys
+
__version__ = package_version.__version__
+if sys.version_info >= (3, 8): # pragma: NO COVER
+ from importlib import metadata
+else: # pragma: NO COVER
+ # TODO(https://github.com/googleapis/python-api-core/issues/835): Remove
+ # this code path once we drop support for Python 3.7
+ import importlib_metadata as metadata
+
from .services.bigtable import BigtableClient
from .services.bigtable import BigtableAsyncClient
@@ -50,6 +60,7 @@
from .types.data import ColumnMetadata
from .types.data import ColumnRange
from .types.data import Family
+from .types.data import Idempotency
from .types.data import Mutation
from .types.data import PartialResultSet
from .types.data import ProtoFormat
@@ -69,6 +80,7 @@
from .types.data import Value
from .types.data import ValueRange
from .types.feature_flags import FeatureFlags
+from .types.peer_info import PeerInfo
from .types.request_stats import FullReadStatsView
from .types.request_stats import ReadIterationStats
from .types.request_stats import RequestLatencyStats
@@ -76,6 +88,100 @@
from .types.response_params import ResponseParams
from .types.types import Type
+if hasattr(api_core, "check_python_version") and hasattr(
+ api_core, "check_dependency_versions"
+): # pragma: NO COVER
+ api_core.check_python_version("google.cloud.bigtable_v2") # type: ignore
+ api_core.check_dependency_versions("google.cloud.bigtable_v2") # type: ignore
+else: # pragma: NO COVER
+ # An older version of api_core is installed which does not define the
+ # functions above. We do equivalent checks manually.
+ try:
+ import warnings
+ import sys
+
+ _py_version_str = sys.version.split()[0]
+ _package_label = "google.cloud.bigtable_v2"
+ if sys.version_info < (3, 9):
+ warnings.warn(
+ "You are using a non-supported Python version "
+ + f"({_py_version_str}). Google will not post any further "
+ + f"updates to {_package_label} supporting this Python version. "
+ + "Please upgrade to the latest Python version, or at "
+ + f"least to Python 3.9, and then update {_package_label}.",
+ FutureWarning,
+ )
+ if sys.version_info[:2] == (3, 9):
+ warnings.warn(
+ f"You are using a Python version ({_py_version_str}) "
+ + f"which Google will stop supporting in {_package_label} in "
+ + "January 2026. Please "
+ + "upgrade to the latest Python version, or at "
+ + "least to Python 3.10, before then, and "
+ + f"then update {_package_label}.",
+ FutureWarning,
+ )
+
+ def parse_version_to_tuple(version_string: str):
+ """Safely converts a semantic version string to a comparable tuple of integers.
+ Example: "4.25.8" -> (4, 25, 8)
+ Ignores non-numeric parts and handles common version formats.
+ Args:
+ version_string: Version string in the format "x.y.z" or "x.y.z"
+ Returns:
+ Tuple of integers for the parsed version string.
+ """
+ parts = []
+ for part in version_string.split("."):
+ try:
+ parts.append(int(part))
+ except ValueError:
+ # If it's a non-numeric part (e.g., '1.0.0b1' -> 'b1'), stop here.
+ # This is a simplification compared to 'packaging.parse_version', but sufficient
+ # for comparing strictly numeric semantic versions.
+ break
+ return tuple(parts)
+
+ def _get_version(dependency_name):
+ try:
+ version_string: str = metadata.version(dependency_name)
+ parsed_version = parse_version_to_tuple(version_string)
+ return (parsed_version, version_string)
+ except Exception:
+ # Catch exceptions from metadata.version() (e.g., PackageNotFoundError)
+ # or errors during parse_version_to_tuple
+ return (None, "--")
+
+ _dependency_package = "google.protobuf"
+ _next_supported_version = "4.25.8"
+ _next_supported_version_tuple = (4, 25, 8)
+ _recommendation = " (we recommend 6.x)"
+ (_version_used, _version_used_string) = _get_version(_dependency_package)
+ if _version_used and _version_used < _next_supported_version_tuple:
+ warnings.warn(
+ f"Package {_package_label} depends on "
+ + f"{_dependency_package}, currently installed at version "
+ + f"{_version_used_string}. Future updates to "
+ + f"{_package_label} will require {_dependency_package} at "
+ + f"version {_next_supported_version} or higher{_recommendation}."
+ + " Please ensure "
+ + "that either (a) your Python environment doesn't pin the "
+ + f"version of {_dependency_package}, so that updates to "
+ + f"{_package_label} can require the higher version, or "
+ + "(b) you manually update your Python environment to use at "
+ + f"least version {_next_supported_version} of "
+ + f"{_dependency_package}.",
+ FutureWarning,
+ )
+ except Exception:
+ warnings.warn(
+ "Could not determine the version of Python "
+ + "currently being used. To continue receiving "
+ + "updates for {_package_label}, ensure you are "
+ + "using a supported version of Python; see "
+ + "https://devguide.python.org/versions/"
+ )
+
__all__ = (
"BigtableAsyncClient",
"ArrayValue",
@@ -93,12 +199,14 @@
"FullReadStatsView",
"GenerateInitialChangeStreamPartitionsRequest",
"GenerateInitialChangeStreamPartitionsResponse",
+ "Idempotency",
"MutateRowRequest",
"MutateRowResponse",
"MutateRowsRequest",
"MutateRowsResponse",
"Mutation",
"PartialResultSet",
+ "PeerInfo",
"PingAndWarmRequest",
"PingAndWarmResponse",
"PrepareQueryRequest",
diff --git a/google/cloud/bigtable_v2/gapic_version.py b/google/cloud/bigtable_v2/gapic_version.py
index 8ab09c42e..6d72a226d 100644
--- a/google/cloud/bigtable_v2/gapic_version.py
+++ b/google/cloud/bigtable_v2/gapic_version.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,4 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-__version__ = "2.31.0" # {x-release-please-version}
+__version__ = "2.35.0" # {x-release-please-version}
diff --git a/google/cloud/bigtable_v2/services/bigtable/async_client.py b/google/cloud/bigtable_v2/services/bigtable/async_client.py
index 123c340fa..0a9442287 100644
--- a/google/cloud/bigtable_v2/services/bigtable/async_client.py
+++ b/google/cloud/bigtable_v2/services/bigtable/async_client.py
@@ -391,13 +391,18 @@ def read_rows(
header_params["app_profile_id"] = request.app_profile_id
routing_param_regex = re.compile(
- "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$"
+ "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$"
)
regex_match = routing_param_regex.match(request.authorized_view_name)
- if regex_match and regex_match.group("authorized_view_name"):
- header_params["authorized_view_name"] = regex_match.group(
- "authorized_view_name"
- )
+ if regex_match and regex_match.group("table_name"):
+ header_params["table_name"] = regex_match.group("table_name")
+
+ routing_param_regex = re.compile(
+ "^(?Pprojects/[^/]+/instances/[^/]+)(?:/.*)?$"
+ )
+ regex_match = routing_param_regex.match(request.materialized_view_name)
+ if regex_match and regex_match.group("name"):
+ header_params["name"] = regex_match.group("name")
if header_params:
metadata = tuple(metadata) + (
@@ -515,13 +520,18 @@ def sample_row_keys(
header_params["app_profile_id"] = request.app_profile_id
routing_param_regex = re.compile(
- "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$"
+ "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$"
)
regex_match = routing_param_regex.match(request.authorized_view_name)
- if regex_match and regex_match.group("authorized_view_name"):
- header_params["authorized_view_name"] = regex_match.group(
- "authorized_view_name"
- )
+ if regex_match and regex_match.group("table_name"):
+ header_params["table_name"] = regex_match.group("table_name")
+
+ routing_param_regex = re.compile(
+ "^(?Pprojects/[^/]+/instances/[^/]+)(?:/.*)?$"
+ )
+ regex_match = routing_param_regex.match(request.materialized_view_name)
+ if regex_match and regex_match.group("name"):
+ header_params["name"] = regex_match.group("name")
if header_params:
metadata = tuple(metadata) + (
@@ -660,13 +670,11 @@ async def mutate_row(
header_params["app_profile_id"] = request.app_profile_id
routing_param_regex = re.compile(
- "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$"
+ "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$"
)
regex_match = routing_param_regex.match(request.authorized_view_name)
- if regex_match and regex_match.group("authorized_view_name"):
- header_params["authorized_view_name"] = regex_match.group(
- "authorized_view_name"
- )
+ if regex_match and regex_match.group("table_name"):
+ header_params["table_name"] = regex_match.group("table_name")
if header_params:
metadata = tuple(metadata) + (
@@ -799,13 +807,11 @@ def mutate_rows(
header_params["app_profile_id"] = request.app_profile_id
routing_param_regex = re.compile(
- "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$"
+ "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$"
)
regex_match = routing_param_regex.match(request.authorized_view_name)
- if regex_match and regex_match.group("authorized_view_name"):
- header_params["authorized_view_name"] = regex_match.group(
- "authorized_view_name"
- )
+ if regex_match and regex_match.group("table_name"):
+ header_params["table_name"] = regex_match.group("table_name")
if header_params:
metadata = tuple(metadata) + (
@@ -979,13 +985,11 @@ async def check_and_mutate_row(
header_params["app_profile_id"] = request.app_profile_id
routing_param_regex = re.compile(
- "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$"
+ "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$"
)
regex_match = routing_param_regex.match(request.authorized_view_name)
- if regex_match and regex_match.group("authorized_view_name"):
- header_params["authorized_view_name"] = regex_match.group(
- "authorized_view_name"
- )
+ if regex_match and regex_match.group("table_name"):
+ header_params["table_name"] = regex_match.group("table_name")
if header_params:
metadata = tuple(metadata) + (
@@ -1164,7 +1168,9 @@ async def read_modify_write_row(
transformed into writes. Entries are
applied in order, meaning that earlier
rules will affect the results of later
- ones.
+ ones. At least one entry must be
+ specified, and there can be at most
+ 100000 rules.
This corresponds to the ``rules`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -1240,13 +1246,11 @@ async def read_modify_write_row(
header_params["app_profile_id"] = request.app_profile_id
routing_param_regex = re.compile(
- "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$"
+ "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$"
)
regex_match = routing_param_regex.match(request.authorized_view_name)
- if regex_match and regex_match.group("authorized_view_name"):
- header_params["authorized_view_name"] = regex_match.group(
- "authorized_view_name"
- )
+ if regex_match and regex_match.group("table_name"):
+ header_params["table_name"] = regex_match.group("table_name")
if header_params:
metadata = tuple(metadata) + (
@@ -1281,10 +1285,11 @@ def generate_initial_change_stream_partitions(
) -> Awaitable[
AsyncIterable[bigtable.GenerateInitialChangeStreamPartitionsResponse]
]:
- r"""NOTE: This API is intended to be used by Apache Beam BigtableIO.
- Returns the current list of partitions that make up the table's
+ r"""Returns the current list of partitions that make up the table's
change stream. The union of partitions will cover the entire
keyspace. Partitions can be read with ``ReadChangeStream``.
+ NOTE: This API is only intended to be used by Apache Beam
+ BigtableIO.
Args:
request (Optional[Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]]):
@@ -1392,10 +1397,11 @@ def read_change_stream(
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> Awaitable[AsyncIterable[bigtable.ReadChangeStreamResponse]]:
- r"""NOTE: This API is intended to be used by Apache Beam
- BigtableIO. Reads changes from a table's change stream.
- Changes will reflect both user-initiated mutations and
- mutations that are caused by garbage collection.
+ r"""Reads changes from a table's change stream. Changes
+ will reflect both user-initiated mutations and mutations
+ that are caused by garbage collection.
+ NOTE: This API is only intended to be used by Apache
+ Beam BigtableIO.
Args:
request (Optional[Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]]):
diff --git a/google/cloud/bigtable_v2/services/bigtable/client.py b/google/cloud/bigtable_v2/services/bigtable/client.py
index 902e435c5..5eb6ba894 100644
--- a/google/cloud/bigtable_v2/services/bigtable/client.py
+++ b/google/cloud/bigtable_v2/services/bigtable/client.py
@@ -151,6 +151,34 @@ def _get_default_mtls_endpoint(api_endpoint):
_DEFAULT_ENDPOINT_TEMPLATE = "bigtable.{UNIVERSE_DOMAIN}"
_DEFAULT_UNIVERSE = "googleapis.com"
+ @staticmethod
+ def _use_client_cert_effective():
+ """Returns whether client certificate should be used for mTLS if the
+ google-auth version supports should_use_client_cert automatic mTLS enablement.
+
+ Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var.
+
+ Returns:
+ bool: whether client certificate should be used for mTLS
+ Raises:
+ ValueError: (If using a version of google-auth without should_use_client_cert and
+ GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.)
+ """
+ # check if google-auth version supports should_use_client_cert for automatic mTLS enablement
+ if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER
+ return mtls.should_use_client_cert()
+ else: # pragma: NO COVER
+ # if unsupported, fallback to reading from env var
+ use_client_cert_str = os.getenv(
+ "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
+ ).lower()
+ if use_client_cert_str not in ("true", "false"):
+ raise ValueError(
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be"
+ " either `true` or `false`"
+ )
+ return use_client_cert_str == "true"
+
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
@@ -401,12 +429,8 @@ def get_mtls_endpoint_and_cert_source(
)
if client_options is None:
client_options = client_options_lib.ClientOptions()
- use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
+ use_client_cert = BigtableClient._use_client_cert_effective()
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
- if use_client_cert not in ("true", "false"):
- raise ValueError(
- "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
@@ -414,7 +438,7 @@ def get_mtls_endpoint_and_cert_source(
# Figure out the client cert source to use.
client_cert_source = None
- if use_client_cert == "true":
+ if use_client_cert:
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
@@ -446,20 +470,14 @@ def _read_environment_variables():
google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT
is not any of ["auto", "never", "always"].
"""
- use_client_cert = os.getenv(
- "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
- ).lower()
+ use_client_cert = BigtableClient._use_client_cert_effective()
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower()
universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN")
- if use_client_cert not in ("true", "false"):
- raise ValueError(
- "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
- return use_client_cert == "true", use_mtls_endpoint, universe_domain_env
+ return use_client_cert, use_mtls_endpoint, universe_domain_env
@staticmethod
def _get_client_cert_source(provided_cert_source, use_cert_flag):
@@ -867,13 +885,18 @@ def read_rows(
header_params["app_profile_id"] = request.app_profile_id
routing_param_regex = re.compile(
- "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$"
+ "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$"
)
regex_match = routing_param_regex.match(request.authorized_view_name)
- if regex_match and regex_match.group("authorized_view_name"):
- header_params["authorized_view_name"] = regex_match.group(
- "authorized_view_name"
- )
+ if regex_match and regex_match.group("table_name"):
+ header_params["table_name"] = regex_match.group("table_name")
+
+ routing_param_regex = re.compile(
+ "^(?Pprojects/[^/]+/instances/[^/]+)(?:/.*)?$"
+ )
+ regex_match = routing_param_regex.match(request.materialized_view_name)
+ if regex_match and regex_match.group("name"):
+ header_params["name"] = regex_match.group("name")
if header_params:
metadata = tuple(metadata) + (
@@ -988,13 +1011,18 @@ def sample_row_keys(
header_params["app_profile_id"] = request.app_profile_id
routing_param_regex = re.compile(
- "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$"
+ "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$"
)
regex_match = routing_param_regex.match(request.authorized_view_name)
- if regex_match and regex_match.group("authorized_view_name"):
- header_params["authorized_view_name"] = regex_match.group(
- "authorized_view_name"
- )
+ if regex_match and regex_match.group("table_name"):
+ header_params["table_name"] = regex_match.group("table_name")
+
+ routing_param_regex = re.compile(
+ "^(?Pprojects/[^/]+/instances/[^/]+)(?:/.*)?$"
+ )
+ regex_match = routing_param_regex.match(request.materialized_view_name)
+ if regex_match and regex_match.group("name"):
+ header_params["name"] = regex_match.group("name")
if header_params:
metadata = tuple(metadata) + (
@@ -1130,13 +1158,11 @@ def mutate_row(
header_params["app_profile_id"] = request.app_profile_id
routing_param_regex = re.compile(
- "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$"
+ "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$"
)
regex_match = routing_param_regex.match(request.authorized_view_name)
- if regex_match and regex_match.group("authorized_view_name"):
- header_params["authorized_view_name"] = regex_match.group(
- "authorized_view_name"
- )
+ if regex_match and regex_match.group("table_name"):
+ header_params["table_name"] = regex_match.group("table_name")
if header_params:
metadata = tuple(metadata) + (
@@ -1266,13 +1292,11 @@ def mutate_rows(
header_params["app_profile_id"] = request.app_profile_id
routing_param_regex = re.compile(
- "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$"
+ "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$"
)
regex_match = routing_param_regex.match(request.authorized_view_name)
- if regex_match and regex_match.group("authorized_view_name"):
- header_params["authorized_view_name"] = regex_match.group(
- "authorized_view_name"
- )
+ if regex_match and regex_match.group("table_name"):
+ header_params["table_name"] = regex_match.group("table_name")
if header_params:
metadata = tuple(metadata) + (
@@ -1443,13 +1467,11 @@ def check_and_mutate_row(
header_params["app_profile_id"] = request.app_profile_id
routing_param_regex = re.compile(
- "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$"
+ "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$"
)
regex_match = routing_param_regex.match(request.authorized_view_name)
- if regex_match and regex_match.group("authorized_view_name"):
- header_params["authorized_view_name"] = regex_match.group(
- "authorized_view_name"
- )
+ if regex_match and regex_match.group("table_name"):
+ header_params["table_name"] = regex_match.group("table_name")
if header_params:
metadata = tuple(metadata) + (
@@ -1625,7 +1647,9 @@ def read_modify_write_row(
transformed into writes. Entries are
applied in order, meaning that earlier
rules will affect the results of later
- ones.
+ ones. At least one entry must be
+ specified, and there can be at most
+ 100000 rules.
This corresponds to the ``rules`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -1698,13 +1722,11 @@ def read_modify_write_row(
header_params["app_profile_id"] = request.app_profile_id
routing_param_regex = re.compile(
- "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$"
+ "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$"
)
regex_match = routing_param_regex.match(request.authorized_view_name)
- if regex_match and regex_match.group("authorized_view_name"):
- header_params["authorized_view_name"] = regex_match.group(
- "authorized_view_name"
- )
+ if regex_match and regex_match.group("table_name"):
+ header_params["table_name"] = regex_match.group("table_name")
if header_params:
metadata = tuple(metadata) + (
@@ -1737,10 +1759,11 @@ def generate_initial_change_stream_partitions(
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> Iterable[bigtable.GenerateInitialChangeStreamPartitionsResponse]:
- r"""NOTE: This API is intended to be used by Apache Beam BigtableIO.
- Returns the current list of partitions that make up the table's
+ r"""Returns the current list of partitions that make up the table's
change stream. The union of partitions will cover the entire
keyspace. Partitions can be read with ``ReadChangeStream``.
+ NOTE: This API is only intended to be used by Apache Beam
+ BigtableIO.
Args:
request (Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]):
@@ -1847,10 +1870,11 @@ def read_change_stream(
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> Iterable[bigtable.ReadChangeStreamResponse]:
- r"""NOTE: This API is intended to be used by Apache Beam
- BigtableIO. Reads changes from a table's change stream.
- Changes will reflect both user-initiated mutations and
- mutations that are caused by garbage collection.
+ r"""Reads changes from a table's change stream. Changes
+ will reflect both user-initiated mutations and mutations
+ that are caused by garbage collection.
+ NOTE: This API is only intended to be used by Apache
+ Beam BigtableIO.
Args:
request (Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]):
diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/google/cloud/bigtable_v2/services/bigtable/transports/base.py
index 4d25d8b30..f08bca73e 100644
--- a/google/cloud/bigtable_v2/services/bigtable/transports/base.py
+++ b/google/cloud/bigtable_v2/services/bigtable/transports/base.py
@@ -74,9 +74,10 @@ def __init__(
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is mutually exclusive with credentials.
+ This argument is mutually exclusive with credentials. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py
index a3c0865f1..8ddbf15a2 100644
--- a/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py
+++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py
@@ -152,9 +152,10 @@ def __init__(
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if a ``channel`` instance is provided.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if a ``channel`` instance is provided.
+ This argument will be removed in the next major version of this library.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if a ``channel`` instance is provided.
channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]):
@@ -287,9 +288,10 @@ def create_channel(
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is mutually exclusive with credentials.
+ This argument is mutually exclusive with credentials. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
@@ -538,10 +540,11 @@ def generate_initial_change_stream_partitions(
r"""Return a callable for the generate initial change stream
partitions method over gRPC.
- NOTE: This API is intended to be used by Apache Beam BigtableIO.
Returns the current list of partitions that make up the table's
change stream. The union of partitions will cover the entire
keyspace. Partitions can be read with ``ReadChangeStream``.
+ NOTE: This API is only intended to be used by Apache Beam
+ BigtableIO.
Returns:
Callable[[~.GenerateInitialChangeStreamPartitionsRequest],
@@ -571,10 +574,11 @@ def read_change_stream(
]:
r"""Return a callable for the read change stream method over gRPC.
- NOTE: This API is intended to be used by Apache Beam
- BigtableIO. Reads changes from a table's change stream.
- Changes will reflect both user-initiated mutations and
- mutations that are caused by garbage collection.
+ Reads changes from a table's change stream. Changes
+ will reflect both user-initiated mutations and mutations
+ that are caused by garbage collection.
+ NOTE: This API is only intended to be used by Apache
+ Beam BigtableIO.
Returns:
Callable[[~.ReadChangeStreamRequest],
diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py
index cebee0208..3e6b70832 100644
--- a/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py
+++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py
@@ -149,8 +149,9 @@ def create_channel(
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
- be loaded with :func:`google.auth.load_credentials_from_file`.
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be
+ removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
@@ -201,9 +202,10 @@ def __init__(
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if a ``channel`` instance is provided.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if a ``channel`` instance is provided.
+ This argument will be removed in the next major version of this library.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
@@ -552,10 +554,11 @@ def generate_initial_change_stream_partitions(
r"""Return a callable for the generate initial change stream
partitions method over gRPC.
- NOTE: This API is intended to be used by Apache Beam BigtableIO.
Returns the current list of partitions that make up the table's
change stream. The union of partitions will cover the entire
keyspace. Partitions can be read with ``ReadChangeStream``.
+ NOTE: This API is only intended to be used by Apache Beam
+ BigtableIO.
Returns:
Callable[[~.GenerateInitialChangeStreamPartitionsRequest],
@@ -585,10 +588,11 @@ def read_change_stream(
]:
r"""Return a callable for the read change stream method over gRPC.
- NOTE: This API is intended to be used by Apache Beam
- BigtableIO. Reads changes from a table's change stream.
- Changes will reflect both user-initiated mutations and
- mutations that are caused by garbage collection.
+ Reads changes from a table's change stream. Changes
+ will reflect both user-initiated mutations and mutations
+ that are caused by garbage collection.
+ NOTE: This API is only intended to be used by Apache
+ Beam BigtableIO.
Returns:
Callable[[~.ReadChangeStreamRequest],
diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py
index c84ef147f..f0a761a36 100644
--- a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py
+++ b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py
@@ -750,9 +750,10 @@ def __init__(
are specified, the client will attempt to ascertain the
credentials from the environment.
- credentials_file (Optional[str]): A file with credentials that can
+ credentials_file (Optional[str]): Deprecated. A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
- This argument is ignored if ``channel`` is provided.
+ This argument is ignored if ``channel`` is provided. This argument will be
+ removed in the next major version of this library.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
@@ -1080,6 +1081,22 @@ def __call__(
resp, _ = self._interceptor.post_execute_query_with_metadata(
resp, response_metadata
)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ http_response = {
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.bigtable_v2.BigtableClient.execute_query",
+ extra={
+ "serviceName": "google.bigtable.v2.Bigtable",
+ "rpcName": "ExecuteQuery",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
return resp
class _GenerateInitialChangeStreamPartitions(
@@ -1228,6 +1245,22 @@ def __call__(
) = self._interceptor.post_generate_initial_change_stream_partitions_with_metadata(
resp, response_metadata
)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ http_response = {
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.bigtable_v2.BigtableClient.generate_initial_change_stream_partitions",
+ extra={
+ "serviceName": "google.bigtable.v2.Bigtable",
+ "rpcName": "GenerateInitialChangeStreamPartitions",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
return resp
class _MutateRow(_BaseBigtableRestTransport._BaseMutateRow, BigtableRestStub):
@@ -1515,6 +1548,22 @@ def __call__(
resp, _ = self._interceptor.post_mutate_rows_with_metadata(
resp, response_metadata
)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ http_response = {
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.bigtable_v2.BigtableClient.mutate_rows",
+ extra={
+ "serviceName": "google.bigtable.v2.Bigtable",
+ "rpcName": "MutateRows",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
return resp
class _PingAndWarm(_BaseBigtableRestTransport._BasePingAndWarm, BigtableRestStub):
@@ -1966,6 +2015,22 @@ def __call__(
resp, _ = self._interceptor.post_read_change_stream_with_metadata(
resp, response_metadata
)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ http_response = {
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.bigtable_v2.BigtableClient.read_change_stream",
+ extra={
+ "serviceName": "google.bigtable.v2.Bigtable",
+ "rpcName": "ReadChangeStream",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
return resp
class _ReadModifyWriteRow(
@@ -2253,6 +2318,22 @@ def __call__(
resp, _ = self._interceptor.post_read_rows_with_metadata(
resp, response_metadata
)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ http_response = {
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.bigtable_v2.BigtableClient.read_rows",
+ extra={
+ "serviceName": "google.bigtable.v2.Bigtable",
+ "rpcName": "ReadRows",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
return resp
class _SampleRowKeys(
@@ -2383,6 +2464,22 @@ def __call__(
resp, _ = self._interceptor.post_sample_row_keys_with_metadata(
resp, response_metadata
)
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
+ logging.DEBUG
+ ): # pragma: NO COVER
+ http_response = {
+ "headers": dict(response.headers),
+ "status": response.status_code,
+ }
+ _LOGGER.debug(
+ "Received response for google.bigtable_v2.BigtableClient.sample_row_keys",
+ extra={
+ "serviceName": "google.bigtable.v2.Bigtable",
+ "rpcName": "SampleRowKeys",
+ "metadata": http_response["headers"],
+ "httpResponse": http_response,
+ },
+ )
return resp
@property
diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py b/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py
index b2080f4a4..5eab0ded4 100644
--- a/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py
+++ b/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py
@@ -641,6 +641,11 @@ def _get_http_options():
"uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readRows",
"body": "*",
},
+ {
+ "method": "post",
+ "uri": "/v2/{materialized_view_name=projects/*/instances/*/materializedViews/*}:readRows",
+ "body": "*",
+ },
]
return http_options
@@ -686,6 +691,10 @@ def _get_http_options():
"method": "get",
"uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:sampleRowKeys",
},
+ {
+ "method": "get",
+ "uri": "/v2/{materialized_view_name=projects/*/instances/*/materializedViews/*}:sampleRowKeys",
+ },
]
return http_options
diff --git a/google/cloud/bigtable_v2/types/__init__.py b/google/cloud/bigtable_v2/types/__init__.py
index 629dd6c90..b13c076a2 100644
--- a/google/cloud/bigtable_v2/types/__init__.py
+++ b/google/cloud/bigtable_v2/types/__init__.py
@@ -45,6 +45,7 @@
ColumnMetadata,
ColumnRange,
Family,
+ Idempotency,
Mutation,
PartialResultSet,
ProtoFormat,
@@ -67,6 +68,9 @@
from .feature_flags import (
FeatureFlags,
)
+from .peer_info import (
+ PeerInfo,
+)
from .request_stats import (
FullReadStatsView,
ReadIterationStats,
@@ -110,6 +114,7 @@
"ColumnMetadata",
"ColumnRange",
"Family",
+ "Idempotency",
"Mutation",
"PartialResultSet",
"ProtoFormat",
@@ -129,6 +134,7 @@
"Value",
"ValueRange",
"FeatureFlags",
+ "PeerInfo",
"FullReadStatsView",
"ReadIterationStats",
"RequestLatencyStats",
diff --git a/google/cloud/bigtable_v2/types/bigtable.py b/google/cloud/bigtable_v2/types/bigtable.py
index f941c867a..19abba67b 100644
--- a/google/cloud/bigtable_v2/types/bigtable.py
+++ b/google/cloud/bigtable_v2/types/bigtable.py
@@ -197,27 +197,12 @@ class ReadRowsResponse(proto.Message):
row key, allowing the client to skip that work
on a retry.
request_stats (google.cloud.bigtable_v2.types.RequestStats):
- If requested, provide enhanced query performance statistics.
- The semantics dictate:
-
- - request_stats is empty on every (streamed) response,
- except
- - request_stats has non-empty information after all chunks
- have been streamed, where the ReadRowsResponse message
- only contains request_stats.
-
- - For example, if a read request would have returned an
- empty response instead a single ReadRowsResponse is
- streamed with empty chunks and request_stats filled.
-
- Visually, response messages will stream as follows: ... ->
- {chunks: [...]} -> {chunks: [], request_stats: {...}}
- \_\ **/ \_**\ \__________/ Primary response Trailer of
- RequestStats info
-
- Or if the read did not return any values: {chunks: [],
- request_stats: {...}} \________________________________/
- Trailer of RequestStats info
+ If requested, return enhanced query performance statistics.
+ The field request_stats is empty in a streamed response
+ unless the ReadRowsResponse message contains request_stats
+ in the last message of the stream. Always returned when
+ requested, even when the read request returns an empty
+ response.
"""
class CellChunk(proto.Message):
@@ -457,6 +442,10 @@ class MutateRowRequest(proto.Message):
meaning that earlier mutations can be masked by
later ones. Must contain at least one entry and
at most 100000.
+ idempotency (google.cloud.bigtable_v2.types.Idempotency):
+ If set consistently across retries, prevents
+ this mutation from being double applied to
+ aggregate column families within a 15m window.
"""
table_name: str = proto.Field(
@@ -480,6 +469,11 @@ class MutateRowRequest(proto.Message):
number=3,
message=data.Mutation,
)
+ idempotency: data.Idempotency = proto.Field(
+ proto.MESSAGE,
+ number=8,
+ message=data.Idempotency,
+ )
class MutateRowResponse(proto.Message):
@@ -529,6 +523,10 @@ class Entry(proto.Message):
order, meaning that earlier mutations can be
masked by later ones. You must specify at least
one mutation.
+ idempotency (google.cloud.bigtable_v2.types.Idempotency):
+ If set consistently across retries, prevents
+ this mutation from being double applied to
+ aggregate column families within a 15m window.
"""
row_key: bytes = proto.Field(
@@ -540,6 +538,11 @@ class Entry(proto.Message):
number=2,
message=data.Mutation,
)
+ idempotency: data.Idempotency = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=data.Idempotency,
+ )
table_name: str = proto.Field(
proto.STRING,
@@ -640,8 +643,8 @@ class RateLimitInfo(proto.Message):
``factor`` until another ``period`` has passed.
The client can measure its load using any unit that's
- comparable over time For example, QPS can be used as long as
- each request involves a similar amount of work.
+ comparable over time. For example, QPS can be used as long
+ as each request involves a similar amount of work.
"""
period: duration_pb2.Duration = proto.Field(
@@ -807,7 +810,9 @@ class ReadModifyWriteRowRequest(proto.Message):
row's contents are to be transformed into
writes. Entries are applied in order, meaning
that earlier rules will affect the results of
- later ones.
+ later ones. At least one entry must be
+ specified, and there can be at most 100000
+ rules.
"""
table_name: str = proto.Field(
@@ -935,10 +940,10 @@ class ReadChangeStreamRequest(proto.Message):
the stream as part of ``Heartbeat`` and ``CloseStream``
messages.
- If a single token is provided, the token’s partition must
- exactly match the request’s partition. If multiple tokens
+ If a single token is provided, the token's partition must
+ exactly match the request's partition. If multiple tokens
are provided, as in the case of a partition merge, the union
- of the token partitions must exactly cover the request’s
+ of the token partitions must exactly cover the request's
partition. Otherwise, INVALID_ARGUMENT will be returned.
This field is a member of `oneof`_ ``start_from``.
@@ -1119,7 +1124,7 @@ class DataChange(proto.Message):
a record that will be delivered in the future on
the stream. It is possible that, under
particular circumstances that a future record
- has a timestamp is is lower than a previously
+ has a timestamp that is lower than a previously
seen timestamp. For an example usage see
https://beam.apache.org/documentation/basics/#watermarks
"""
@@ -1203,7 +1208,7 @@ class Heartbeat(proto.Message):
a record that will be delivered in the future on
the stream. It is possible that, under
particular circumstances that a future record
- has a timestamp is is lower than a previously
+ has a timestamp that is lower than a previously
seen timestamp. For an example usage see
https://beam.apache.org/documentation/basics/#watermarks
"""
@@ -1226,12 +1231,25 @@ class CloseStream(proto.Message):
if there was an ``end_time`` specified). If ``continuation_tokens``
& ``new_partitions`` are present, then a change in partitioning
requires the client to open a new stream for each token to resume
- reading. Example: [B, D) ends \| v new_partitions: [A, C) [C, E)
- continuation_tokens.partitions: [B,C) [C,D) ^---^ ^---^ ^ ^ \| \| \|
- StreamContinuationToken 2 \| StreamContinuationToken 1 To read the
- new partition [A,C), supply the continuation tokens whose ranges
- cover the new partition, for example ContinuationToken[A,B) &
- ContinuationToken[B,C).
+ reading. Example:
+
+ ::
+
+ [B, D) ends
+ |
+ v
+ new_partitions: [A, C) [C, E)
+ continuation_tokens.partitions: [B,C) [C,D)
+ ^---^ ^---^
+ ^ ^
+ | |
+ | StreamContinuationToken 2
+ |
+ StreamContinuationToken 1
+
+ To read the new partition [A,C), supply the continuation tokens
+ whose ranges cover the new partition, for example
+ ContinuationToken[A,B) & ContinuationToken[B,C).
Attributes:
status (google.rpc.status_pb2.Status):
@@ -1312,10 +1330,10 @@ class ExecuteQueryRequest(proto.Message):
Setting this field also places restrictions on several other
fields:
- - ``data_format`` must be empty.
- - ``validate_only`` must be false.
- - ``params`` must match the ``param_types`` set in the
- ``PrepareQueryRequest``.
+ - ``data_format`` must be empty.
+ - ``validate_only`` must be false.
+ - ``params`` must match the ``param_types`` set in the
+ ``PrepareQueryRequest``.
proto_format (google.cloud.bigtable_v2.types.ProtoFormat):
Protocol buffer format as described by
ProtoSchema and ProtoRows messages.
diff --git a/google/cloud/bigtable_v2/types/data.py b/google/cloud/bigtable_v2/types/data.py
index cecbc138a..12ac8b2b1 100644
--- a/google/cloud/bigtable_v2/types/data.py
+++ b/google/cloud/bigtable_v2/types/data.py
@@ -51,6 +51,7 @@
"ProtoRows",
"ProtoRowsBatch",
"PartialResultSet",
+ "Idempotency",
},
)
@@ -240,7 +241,8 @@ class Value(proto.Message):
This field is a member of `oneof`_ ``kind``.
float_value (float):
Represents a typed value transported as a
- floating point number.
+ floating point number. Does not support NaN or
+ infinities.
This field is a member of `oneof`_ ``kind``.
timestamp_value (google.protobuf.timestamp_pb2.Timestamp):
@@ -571,26 +573,26 @@ class RowFilter(proto.Message):
transformers), as well as two ways to compose simple filters into
more complex ones (chains and interleaves). They work as follows:
- - True filters alter the input row by excluding some of its cells
- wholesale from the output row. An example of a true filter is the
- ``value_regex_filter``, which excludes cells whose values don't
- match the specified pattern. All regex true filters use RE2
- syntax (https://github.com/google/re2/wiki/Syntax) in raw byte
- mode (RE2::Latin1), and are evaluated as full matches. An
- important point to keep in mind is that ``RE2(.)`` is equivalent
- by default to ``RE2([^\n])``, meaning that it does not match
- newlines. When attempting to match an arbitrary byte, you should
- therefore use the escape sequence ``\C``, which may need to be
- further escaped as ``\\C`` in your client language.
-
- - Transformers alter the input row by changing the values of some
- of its cells in the output, without excluding them completely.
- Currently, the only supported transformer is the
- ``strip_value_transformer``, which replaces every cell's value
- with the empty string.
-
- - Chains and interleaves are described in more detail in the
- RowFilter.Chain and RowFilter.Interleave documentation.
+ - True filters alter the input row by excluding some of its cells
+ wholesale from the output row. An example of a true filter is the
+ ``value_regex_filter``, which excludes cells whose values don't
+ match the specified pattern. All regex true filters use RE2 syntax
+ (https://github.com/google/re2/wiki/Syntax) in raw byte mode
+ (RE2::Latin1), and are evaluated as full matches. An important
+ point to keep in mind is that ``RE2(.)`` is equivalent by default
+ to ``RE2([^\n])``, meaning that it does not match newlines. When
+ attempting to match an arbitrary byte, you should therefore use
+ the escape sequence ``\C``, which may need to be further escaped
+ as ``\\C`` in your client language.
+
+ - Transformers alter the input row by changing the values of some of
+ its cells in the output, without excluding them completely.
+ Currently, the only supported transformer is the
+ ``strip_value_transformer``, which replaces every cell's value
+ with the empty string.
+
+ - Chains and interleaves are described in more detail in the
+ RowFilter.Chain and RowFilter.Interleave documentation.
The total serialized size of a RowFilter message must not exceed
20480 bytes, and RowFilters may not be nested within each other (in
@@ -1491,21 +1493,20 @@ class PartialResultSet(proto.Message):
Having:
- - queue of row results waiting to be returned ``queue``
- - extensible buffer of bytes ``buffer``
- - a place to keep track of the most recent ``resume_token`` for
- each PartialResultSet ``p`` received { if p.reset { ensure
- ``queue`` is empty ensure ``buffer`` is empty } if
- p.estimated_batch_size != 0 { (optional) ensure ``buffer`` is
- sized to at least ``p.estimated_batch_size`` } if
- ``p.proto_rows_batch`` is set { append
- ``p.proto_rows_batch.bytes`` to ``buffer`` } if p.batch_checksum
- is set and ``buffer`` is not empty { validate the checksum
- matches the contents of ``buffer`` (see comments on
- ``batch_checksum``) parse ``buffer`` as ``ProtoRows`` message,
- clearing ``buffer`` add parsed rows to end of ``queue`` } if
- p.resume_token is set { release results in ``queue`` save
- ``p.resume_token`` in ``resume_token`` } }
+ - queue of row results waiting to be returned ``queue``
+ - extensible buffer of bytes ``buffer``
+ - a place to keep track of the most recent ``resume_token`` for each
+ PartialResultSet ``p`` received { if p.reset { ensure ``queue`` is
+ empty ensure ``buffer`` is empty } if p.estimated_batch_size != 0
+ { (optional) ensure ``buffer`` is sized to at least
+ ``p.estimated_batch_size`` } if ``p.proto_rows_batch`` is set {
+ append ``p.proto_rows_batch.bytes`` to ``buffer`` } if
+ p.batch_checksum is set and ``buffer`` is not empty { validate the
+ checksum matches the contents of ``buffer`` (see comments on
+ ``batch_checksum``) parse ``buffer`` as ``ProtoRows`` message,
+ clearing ``buffer`` add parsed rows to end of ``queue`` } if
+ p.resume_token is set { release results in ``queue`` save
+ ``p.resume_token`` in ``resume_token`` } }
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
@@ -1609,4 +1610,36 @@ class PartialResultSet(proto.Message):
)
+class Idempotency(proto.Message):
+ r"""Parameters on mutations where clients want to ensure
+ idempotency (i.e. at-most-once semantics). This is currently
+ only needed for certain aggregate types.
+
+ Attributes:
+ token (bytes):
+ Unique token used to identify replays of this
+ mutation. Must be at least 8 bytes long.
+ start_time (google.protobuf.timestamp_pb2.Timestamp):
+ Client-assigned timestamp when the mutation's
+ first attempt was sent. Used to reject mutations
+ that arrive after idempotency protection may
+ have expired. May cause spurious rejections if
+ clock skew is too high.
+
+ Leave unset or zero to always accept the
+ mutation, at the risk of double counting if the
+ protection for previous attempts has expired.
+ """
+
+ token: bytes = proto.Field(
+ proto.BYTES,
+ number=1,
+ )
+ start_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=timestamp_pb2.Timestamp,
+ )
+
+
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/bigtable_v2/types/feature_flags.py b/google/cloud/bigtable_v2/types/feature_flags.py
index 69cfe1cf4..2c8ea8732 100644
--- a/google/cloud/bigtable_v2/types/feature_flags.py
+++ b/google/cloud/bigtable_v2/types/feature_flags.py
@@ -76,6 +76,9 @@ class FeatureFlags(proto.Message):
direct_access_requested (bool):
Notify the server that the client explicitly
opted in for Direct Access.
+ peer_info (bool):
+ If the client can support using
+ BigtablePeerInfo.
"""
reverse_scans: bool = proto.Field(
@@ -114,6 +117,10 @@ class FeatureFlags(proto.Message):
proto.BOOL,
number=10,
)
+ peer_info: bool = proto.Field(
+ proto.BOOL,
+ number=11,
+ )
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/bigtable_v2/types/peer_info.py b/google/cloud/bigtable_v2/types/peer_info.py
new file mode 100644
index 000000000..b3f1203cc
--- /dev/null
+++ b/google/cloud/bigtable_v2/types/peer_info.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from __future__ import annotations
+
+from typing import MutableMapping, MutableSequence
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.bigtable.v2",
+ manifest={
+ "PeerInfo",
+ },
+)
+
+
+class PeerInfo(proto.Message):
+ r"""PeerInfo contains information about the peer that the client
+ is connecting to.
+
+ Attributes:
+ google_frontend_id (int):
+ An opaque identifier for the Google Frontend
+ which serviced this request. Only set when not
+ using DirectAccess.
+ application_frontend_id (int):
+ An opaque identifier for the application
+ frontend which serviced this request.
+ application_frontend_zone (str):
+ The Cloud zone of the application frontend
+ that served this request.
+ application_frontend_subzone (str):
+ The subzone of the application frontend that
+ served this request, e.g. an identifier for
+ where within the zone the application frontend
+ is.
+ transport_type (google.cloud.bigtable_v2.types.PeerInfo.TransportType):
+
+ """
+
+ class TransportType(proto.Enum):
+ r"""The transport type that the client used to connect to this
+ peer.
+
+ Values:
+ TRANSPORT_TYPE_UNKNOWN (0):
+ The transport type is unknown.
+ TRANSPORT_TYPE_EXTERNAL (1):
+ The client connected to this peer via an
+ external network (e.g. outside Google Coud).
+ TRANSPORT_TYPE_CLOUD_PATH (2):
+ The client connected to this peer via
+ CloudPath.
+ TRANSPORT_TYPE_DIRECT_ACCESS (3):
+ The client connected to this peer via
+ DirectAccess.
+ TRANSPORT_TYPE_SESSION_UNKNOWN (4):
+ The client connected to this peer via
+ Bigtable Sessions using an unknown transport
+ type.
+ TRANSPORT_TYPE_SESSION_EXTERNAL (5):
+ The client connected to this peer via
+ Bigtable Sessions on an external network (e.g.
+ outside Google Cloud).
+ TRANSPORT_TYPE_SESSION_CLOUD_PATH (6):
+ The client connected to this peer via
+ Bigtable Sessions using CloudPath.
+ TRANSPORT_TYPE_SESSION_DIRECT_ACCESS (7):
+ The client connected to this peer via
+ Bigtable Sessions using DirectAccess.
+ """
+ TRANSPORT_TYPE_UNKNOWN = 0
+ TRANSPORT_TYPE_EXTERNAL = 1
+ TRANSPORT_TYPE_CLOUD_PATH = 2
+ TRANSPORT_TYPE_DIRECT_ACCESS = 3
+ TRANSPORT_TYPE_SESSION_UNKNOWN = 4
+ TRANSPORT_TYPE_SESSION_EXTERNAL = 5
+ TRANSPORT_TYPE_SESSION_CLOUD_PATH = 6
+ TRANSPORT_TYPE_SESSION_DIRECT_ACCESS = 7
+
+ google_frontend_id: int = proto.Field(
+ proto.INT64,
+ number=1,
+ )
+ application_frontend_id: int = proto.Field(
+ proto.INT64,
+ number=2,
+ )
+ application_frontend_zone: str = proto.Field(
+ proto.STRING,
+ number=3,
+ )
+ application_frontend_subzone: str = proto.Field(
+ proto.STRING,
+ number=4,
+ )
+ transport_type: TransportType = proto.Field(
+ proto.ENUM,
+ number=5,
+ enum=TransportType,
+ )
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/bigtable_v2/types/request_stats.py b/google/cloud/bigtable_v2/types/request_stats.py
index 8548996ef..540e6548d 100644
--- a/google/cloud/bigtable_v2/types/request_stats.py
+++ b/google/cloud/bigtable_v2/types/request_stats.py
@@ -142,11 +142,10 @@ class FullReadStatsView(proto.Message):
class RequestStats(proto.Message):
- r"""RequestStats is the container for additional information pertaining
- to a single request, helpful for evaluating the performance of the
- sent request. Currently, there are the following supported methods:
-
- - google.bigtable.v2.ReadRows
+ r"""RequestStats is the container for additional information
+ pertaining to a single request, helpful for evaluating the
+ performance of the sent request. Currently, the following method
+ is supported: google.bigtable.v2.ReadRows
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
diff --git a/google/cloud/bigtable_v2/types/response_params.py b/google/cloud/bigtable_v2/types/response_params.py
index 2c04dadaa..cc6384ab3 100644
--- a/google/cloud/bigtable_v2/types/response_params.py
+++ b/google/cloud/bigtable_v2/types/response_params.py
@@ -29,10 +29,7 @@
class ResponseParams(proto.Message):
- r"""Response metadata proto This is an experimental feature that will be
- used to get zone_id and cluster_id from response trailers to tag the
- metrics. This should not be used by customers directly
-
+ r"""Response metadata proto
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
@@ -47,6 +44,11 @@ class ResponseParams(proto.Message):
of bigtable resources.
This field is a member of `oneof`_ ``_cluster_id``.
+ afe_id (int):
+ The AFE ID for the AFE that is served this
+ request.
+
+ This field is a member of `oneof`_ ``_afe_id``.
"""
zone_id: str = proto.Field(
@@ -59,6 +61,11 @@ class ResponseParams(proto.Message):
number=2,
optional=True,
)
+ afe_id: int = proto.Field(
+ proto.INT64,
+ number=3,
+ optional=True,
+ )
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/bigtable_v2/types/types.py b/google/cloud/bigtable_v2/types/types.py
index 7f92a15ae..0b4ddb57a 100644
--- a/google/cloud/bigtable_v2/types/types.py
+++ b/google/cloud/bigtable_v2/types/types.py
@@ -35,34 +35,27 @@ class Type(proto.Message):
features.
For compatibility with Bigtable's existing untyped APIs, each
- ``Type`` includes an ``Encoding`` which describes how to convert
- to/from the underlying data.
-
- Each encoding also defines the following properties:
-
- - Order-preserving: Does the encoded value sort consistently with
- the original typed value? Note that Bigtable will always sort
- data based on the raw encoded value, *not* the decoded type.
-
- - Example: BYTES values sort in the same order as their raw
- encodings.
- - Counterexample: Encoding INT64 as a fixed-width decimal string
- does *not* preserve sort order when dealing with negative
- numbers. ``INT64(1) > INT64(-1)``, but
- ``STRING("-00001") > STRING("00001)``.
-
- - Self-delimiting: If we concatenate two encoded values, can we
- always tell where the first one ends and the second one begins?
-
- - Example: If we encode INT64s to fixed-width STRINGs, the first
- value will always contain exactly N digits, possibly preceded
- by a sign.
- - Counterexample: If we concatenate two UTF-8 encoded STRINGs,
- we have no way to tell where the first one ends.
-
- - Compatibility: Which other systems have matching encoding
- schemes? For example, does this encoding have a GoogleSQL
- equivalent? HBase? Java?
+ ``Type`` includes an ``Encoding`` which describes how to convert to
+ or from the underlying data.
+
+ Each encoding can operate in one of two modes:
+
+ - Sorted: In this mode, Bigtable guarantees that
+ ``Encode(X) <= Encode(Y)`` if and only if ``X <= Y``. This is
+ useful anywhere sort order is important, for example when encoding
+ keys.
+ - Distinct: In this mode, Bigtable guarantees that if ``X != Y``
+ then ``Encode(X) != Encode(Y)``. However, the converse is not
+ guaranteed. For example, both ``{'foo': '1', 'bar': '2'}`` and
+ ``{'bar': '2', 'foo': '1'}`` are valid encodings of the same JSON
+ value.
+
+ The API clearly documents which mode is used wherever an encoding
+ can be configured. Each encoding also documents which values are
+ supported in which modes. For example, when encoding INT64 as a
+ numeric STRING, negative numbers cannot be encoded in sorted mode.
+ This is because ``INT64(1) > INT64(-1)``, but
+ ``STRING("-00001") > STRING("00001")``.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
@@ -119,6 +112,14 @@ class Type(proto.Message):
map_type (google.cloud.bigtable_v2.types.Type.Map):
Map
+ This field is a member of `oneof`_ ``kind``.
+ proto_type (google.cloud.bigtable_v2.types.Type.Proto):
+ Proto
+
+ This field is a member of `oneof`_ ``kind``.
+ enum_type (google.cloud.bigtable_v2.types.Type.Enum):
+ Enum
+
This field is a member of `oneof`_ ``kind``.
"""
@@ -127,12 +128,12 @@ class Bytes(proto.Message):
Attributes:
encoding (google.cloud.bigtable_v2.types.Type.Bytes.Encoding):
- The encoding to use when converting to/from
- lower level types.
+ The encoding to use when converting to or
+ from lower level types.
"""
class Encoding(proto.Message):
- r"""Rules used to convert to/from lower level types.
+ r"""Rules used to convert to or from lower level types.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
@@ -144,14 +145,26 @@ class Encoding(proto.Message):
"""
class Raw(proto.Message):
- r"""Leaves the value "as-is"
+ r"""Leaves the value as-is.
- - Order-preserving? Yes
- - Self-delimiting? No
- - Compatibility? N/A
+ Sorted mode: all values are supported.
+ Distinct mode: all values are supported.
+
+ Attributes:
+ escape_nulls (bool):
+ If set, allows NULL values to be encoded as the empty string
+ "".
+
+ The actual empty string, or any value which only contains
+ the null byte ``0x00``, has one more null byte appended.
"""
+ escape_nulls: bool = proto.Field(
+ proto.BOOL,
+ number=1,
+ )
+
raw: "Type.Bytes.Encoding.Raw" = proto.Field(
proto.MESSAGE,
number=1,
@@ -171,12 +184,12 @@ class String(proto.Message):
Attributes:
encoding (google.cloud.bigtable_v2.types.Type.String.Encoding):
- The encoding to use when converting to/from
- lower level types.
+ The encoding to use when converting to or
+ from lower level types.
"""
class Encoding(proto.Message):
- r"""Rules used to convert to/from lower level types.
+ r"""Rules used to convert to or from lower level types.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
@@ -200,18 +213,45 @@ class Utf8Raw(proto.Message):
r"""Deprecated: prefer the equivalent ``Utf8Bytes``."""
class Utf8Bytes(proto.Message):
- r"""UTF-8 encoding
+ r"""UTF-8 encoding.
+
+ Sorted mode:
+
+ - All values are supported.
+ - Code point order is preserved.
+
+ Distinct mode: all values are supported.
- - Order-preserving? Yes (code point order)
- - Self-delimiting? No
- - Compatibility?
+ Compatible with:
- - BigQuery Federation ``TEXT`` encoding
- - HBase ``Bytes.toBytes``
- - Java ``String#getBytes(StandardCharsets.UTF_8)``
+ - BigQuery ``TEXT`` encoding
+ - HBase ``Bytes.toBytes``
+ - Java ``String#getBytes(StandardCharsets.UTF_8)``
+ Attributes:
+ null_escape_char (str):
+ Single-character escape sequence used to support NULL
+ values.
+
+ If set, allows NULL values to be encoded as the empty string
+ "".
+
+ The actual empty string, or any value where every character
+ equals ``null_escape_char``, has one more
+ ``null_escape_char`` appended.
+
+ If ``null_escape_char`` is set and does not equal the ASCII
+ null character ``0x00``, then the encoding will not support
+ sorted mode.
+
+ .
"""
+ null_escape_char: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+
utf8_raw: "Type.String.Encoding.Utf8Raw" = proto.Field(
proto.MESSAGE,
number=1,
@@ -236,12 +276,17 @@ class Int64(proto.Message):
Attributes:
encoding (google.cloud.bigtable_v2.types.Type.Int64.Encoding):
- The encoding to use when converting to/from
- lower level types.
+ The encoding to use when converting to or
+ from lower level types.
"""
class Encoding(proto.Message):
- r"""Rules used to convert to/from lower level types.
+ r"""Rules used to convert to or from lower level types.
+
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
@@ -249,20 +294,25 @@ class Encoding(proto.Message):
big_endian_bytes (google.cloud.bigtable_v2.types.Type.Int64.Encoding.BigEndianBytes):
Use ``BigEndianBytes`` encoding.
+ This field is a member of `oneof`_ ``encoding``.
+ ordered_code_bytes (google.cloud.bigtable_v2.types.Type.Int64.Encoding.OrderedCodeBytes):
+ Use ``OrderedCodeBytes`` encoding.
+
This field is a member of `oneof`_ ``encoding``.
"""
class BigEndianBytes(proto.Message):
- r"""Encodes the value as an 8-byte big endian twos complement ``Bytes``
- value.
+ r"""Encodes the value as an 8-byte big-endian two's complement value.
- - Order-preserving? No (positive values only)
- - Self-delimiting? Yes
- - Compatibility?
+ Sorted mode: non-negative values are supported.
- - BigQuery Federation ``BINARY`` encoding
- - HBase ``Bytes.toBytes``
- - Java ``ByteBuffer.putLong()`` with ``ByteOrder.BIG_ENDIAN``
+ Distinct mode: all values are supported.
+
+ Compatible with:
+
+ - BigQuery ``BINARY`` encoding
+ - HBase ``Bytes.toBytes``
+ - Java ``ByteBuffer.putLong()`` with ``ByteOrder.BIG_ENDIAN``
Attributes:
bytes_type (google.cloud.bigtable_v2.types.Type.Bytes):
@@ -275,12 +325,28 @@ class BigEndianBytes(proto.Message):
message="Type.Bytes",
)
+ class OrderedCodeBytes(proto.Message):
+ r"""Encodes the value in a variable length binary format of up to
+ 10 bytes. Values that are closer to zero use fewer bytes.
+
+ Sorted mode: all values are supported.
+
+ Distinct mode: all values are supported.
+
+ """
+
big_endian_bytes: "Type.Int64.Encoding.BigEndianBytes" = proto.Field(
proto.MESSAGE,
number=1,
oneof="encoding",
message="Type.Int64.Encoding.BigEndianBytes",
)
+ ordered_code_bytes: "Type.Int64.Encoding.OrderedCodeBytes" = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ oneof="encoding",
+ message="Type.Int64.Encoding.OrderedCodeBytes",
+ )
encoding: "Type.Int64.Encoding" = proto.Field(
proto.MESSAGE,
@@ -307,8 +373,43 @@ class Timestamp(proto.Message):
r"""Timestamp Values of type ``Timestamp`` are stored in
``Value.timestamp_value``.
+ Attributes:
+ encoding (google.cloud.bigtable_v2.types.Type.Timestamp.Encoding):
+ The encoding to use when converting to or
+ from lower level types.
"""
+ class Encoding(proto.Message):
+ r"""Rules used to convert to or from lower level types.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
+ Attributes:
+ unix_micros_int64 (google.cloud.bigtable_v2.types.Type.Int64.Encoding):
+ Encodes the number of microseconds since the Unix epoch
+ using the given ``Int64`` encoding. Values must be
+ microsecond-aligned.
+
+ Compatible with:
+
+ - Java ``Instant.truncatedTo()`` with ``ChronoUnit.MICROS``
+
+ This field is a member of `oneof`_ ``encoding``.
+ """
+
+ unix_micros_int64: "Type.Int64.Encoding" = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ oneof="encoding",
+ message="Type.Int64.Encoding",
+ )
+
+ encoding: "Type.Timestamp.Encoding" = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ message="Type.Timestamp.Encoding",
+ )
+
class Date(proto.Message):
r"""Date Values of type ``Date`` are stored in ``Value.date_value``."""
@@ -322,6 +423,9 @@ class Struct(proto.Message):
fields (MutableSequence[google.cloud.bigtable_v2.types.Type.Struct.Field]):
The names and types of the fields in this
struct.
+ encoding (google.cloud.bigtable_v2.types.Type.Struct.Encoding):
+ The encoding to use when converting to or
+ from lower level types.
"""
class Field(proto.Message):
@@ -345,11 +449,192 @@ class Field(proto.Message):
message="Type",
)
+ class Encoding(proto.Message):
+ r"""Rules used to convert to or from lower level types.
+
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
+ Attributes:
+ singleton (google.cloud.bigtable_v2.types.Type.Struct.Encoding.Singleton):
+ Use ``Singleton`` encoding.
+
+ This field is a member of `oneof`_ ``encoding``.
+ delimited_bytes (google.cloud.bigtable_v2.types.Type.Struct.Encoding.DelimitedBytes):
+ Use ``DelimitedBytes`` encoding.
+
+ This field is a member of `oneof`_ ``encoding``.
+ ordered_code_bytes (google.cloud.bigtable_v2.types.Type.Struct.Encoding.OrderedCodeBytes):
+ User ``OrderedCodeBytes`` encoding.
+
+ This field is a member of `oneof`_ ``encoding``.
+ """
+
+ class Singleton(proto.Message):
+ r"""Uses the encoding of ``fields[0].type`` as-is. Only valid if
+ ``fields.size == 1``.
+
+ """
+
+ class DelimitedBytes(proto.Message):
+ r"""Fields are encoded independently and concatenated with a
+ configurable ``delimiter`` in between.
+
+ A struct with no fields defined is encoded as a single
+ ``delimiter``.
+
+ Sorted mode:
+
+ - Fields are encoded in sorted mode.
+ - Encoded field values must not contain any bytes <=
+ ``delimiter[0]``
+ - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or
+ if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort
+ first.
+
+ Distinct mode:
+
+ - Fields are encoded in distinct mode.
+ - Encoded field values must not contain ``delimiter[0]``.
+
+ Attributes:
+ delimiter (bytes):
+ Byte sequence used to delimit concatenated
+ fields. The delimiter must contain at least 1
+ character and at most 50 characters.
+ """
+
+ delimiter: bytes = proto.Field(
+ proto.BYTES,
+ number=1,
+ )
+
+ class OrderedCodeBytes(proto.Message):
+ r"""Fields are encoded independently and concatenated with the fixed
+ byte pair ``{0x00, 0x01}`` in between.
+
+ Any null ``(0x00)`` byte in an encoded field is replaced by the
+ fixed byte pair ``{0x00, 0xFF}``.
+
+ Fields that encode to the empty string "" have special handling:
+
+ - If *every* field encodes to "", or if the STRUCT has no fields
+ defined, then the STRUCT is encoded as the fixed byte pair
+ ``{0x00, 0x00}``.
+ - Otherwise, the STRUCT only encodes until the last non-empty field,
+ omitting any trailing empty fields. Any empty fields that aren't
+ omitted are replaced with the fixed byte pair ``{0x00, 0x00}``.
+
+ Examples:
+
+ ::
+
+ - STRUCT() -> "\00\00"
+ - STRUCT("") -> "\00\00"
+ - STRUCT("", "") -> "\00\00"
+ - STRUCT("", "B") -> "\00\00" + "\00\01" + "B"
+ - STRUCT("A", "") -> "A"
+ - STRUCT("", "B", "") -> "\00\00" + "\00\01" + "B"
+ - STRUCT("A", "", "C") -> "A" + "\00\01" + "\00\00" + "\00\01" + "C"
+
+ Since null bytes are always escaped, this encoding can cause size
+ blowup for encodings like ``Int64.BigEndianBytes`` that are likely
+ to produce many such bytes.
+
+ Sorted mode:
+
+ - Fields are encoded in sorted mode.
+ - All values supported by the field encodings are allowed
+ - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or
+ if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort
+ first.
+
+ Distinct mode:
+
+ - Fields are encoded in distinct mode.
+ - All values supported by the field encodings are allowed.
+
+ """
+
+ singleton: "Type.Struct.Encoding.Singleton" = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ oneof="encoding",
+ message="Type.Struct.Encoding.Singleton",
+ )
+ delimited_bytes: "Type.Struct.Encoding.DelimitedBytes" = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ oneof="encoding",
+ message="Type.Struct.Encoding.DelimitedBytes",
+ )
+ ordered_code_bytes: "Type.Struct.Encoding.OrderedCodeBytes" = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ oneof="encoding",
+ message="Type.Struct.Encoding.OrderedCodeBytes",
+ )
+
fields: MutableSequence["Type.Struct.Field"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="Type.Struct.Field",
)
+ encoding: "Type.Struct.Encoding" = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message="Type.Struct.Encoding",
+ )
+
+ class Proto(proto.Message):
+ r"""A protobuf message type. Values of type ``Proto`` are stored in
+ ``Value.bytes_value``.
+
+ Attributes:
+ schema_bundle_id (str):
+ The ID of the schema bundle that this proto
+ is defined in.
+ message_name (str):
+ The fully qualified name of the protobuf
+ message, including package. In the format of
+ "foo.bar.Message".
+ """
+
+ schema_bundle_id: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ message_name: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+
+ class Enum(proto.Message):
+ r"""A protobuf enum type. Values of type ``Enum`` are stored in
+ ``Value.int_value``.
+
+ Attributes:
+ schema_bundle_id (str):
+ The ID of the schema bundle that this enum is
+ defined in.
+ enum_name (str):
+ The fully qualified name of the protobuf enum
+ message, including package. In the format of
+ "foo.bar.EnumMessage".
+ """
+
+ schema_bundle_id: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ enum_name: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
class Array(proto.Message):
r"""An ordered list of elements of a given type. Values of type
@@ -399,8 +684,8 @@ class Aggregate(proto.Message):
r"""A value that combines incremental updates into a summarized value.
Data is never directly written or read using type ``Aggregate``.
- Writes will provide either the ``input_type`` or ``state_type``, and
- reads will always return the ``state_type`` .
+ Writes provide either the ``input_type`` or ``state_type``, and
+ reads always return the ``state_type`` .
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
@@ -412,13 +697,12 @@ class Aggregate(proto.Message):
Attributes:
input_type (google.cloud.bigtable_v2.types.Type):
Type of the inputs that are accumulated by this
- ``Aggregate``, which must specify a full encoding. Use
- ``AddInput`` mutations to accumulate new inputs.
+ ``Aggregate``. Use ``AddInput`` mutations to accumulate new
+ inputs.
state_type (google.cloud.bigtable_v2.types.Type):
Output only. Type that holds the internal accumulator state
for the ``Aggregate``. This is a function of the
- ``input_type`` and ``aggregator`` chosen, and will always
- specify a full encoding.
+ ``input_type`` and ``aggregator`` chosen.
sum (google.cloud.bigtable_v2.types.Type.Aggregate.Sum):
Sum aggregator.
@@ -574,6 +858,18 @@ class HyperLogLogPlusPlusUniqueCount(proto.Message):
oneof="kind",
message=Map,
)
+ proto_type: Proto = proto.Field(
+ proto.MESSAGE,
+ number=13,
+ oneof="kind",
+ message=Proto,
+ )
+ enum_type: Enum = proto.Field(
+ proto.MESSAGE,
+ number=14,
+ oneof="kind",
+ message=Enum,
+ )
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/mypy.ini b/mypy.ini
index 31cc24223..701b7587c 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -1,6 +1,9 @@
[mypy]
-python_version = 3.8
+python_version = 3.13
namespace_packages = True
+check_untyped_defs = True
+warn_unreachable = True
+disallow_any_generics = True
exclude = tests/unit/gapic/
[mypy-grpc.*]
@@ -26,3 +29,10 @@ ignore_missing_imports = True
[mypy-pytest]
ignore_missing_imports = True
+
+[mypy-google.cloud.*]
+ignore_errors = True
+
+# only verify data client
+[mypy-google.cloud.bigtable.data.*]
+ignore_errors = False
diff --git a/noxfile.py b/noxfile.py
index 548bfd0ec..8df24410c 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -14,6 +14,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+# DO NOT EDIT THIS FILE OUTSIDE OF `.librarian/generator-input`
+# The source of truth for this file is `.librarian/generator-input`
+
+
# Generated by synthtool. DO NOT EDIT!
from __future__ import absolute_import
@@ -30,9 +34,9 @@
FLAKE8_VERSION = "flake8==6.1.0"
BLACK_VERSION = "black[jupyter]==23.3.0"
ISORT_VERSION = "isort==5.11.0"
-LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"]
+LINT_PATHS = ["google", "tests", "noxfile.py", "setup.py"]
-DEFAULT_PYTHON_VERSION = "3.8"
+DEFAULT_PYTHON_VERSION = "3.13"
UNIT_TEST_PYTHON_VERSIONS: List[str] = [
"3.7",
@@ -42,6 +46,7 @@
"3.11",
"3.12",
"3.13",
+ "3.14",
]
UNIT_TEST_STANDARD_DEPENDENCIES = [
"mock",
@@ -58,7 +63,7 @@
UNIT_TEST_EXTRAS: List[str] = []
UNIT_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {}
-SYSTEM_TEST_PYTHON_VERSIONS: List[str] = ["3.8", "3.12"]
+SYSTEM_TEST_PYTHON_VERSIONS: List[str] = ["3.10", "3.14"]
SYSTEM_TEST_STANDARD_DEPENDENCIES: List[str] = [
"mock",
"pytest",
@@ -78,7 +83,11 @@
# 'docfx' is excluded since it only needs to run in 'docs-presubmit'
nox.options.sessions = [
- "unit",
+ "unit-3.10",
+ "unit-3.11",
+ "unit-3.12",
+ "unit-3.13",
+ "unit-3.14",
"system_emulated",
"system",
"mypy",
@@ -148,26 +157,13 @@ def mypy(session):
"mypy", "types-setuptools", "types-protobuf", "types-mock", "types-requests"
)
session.install("google-cloud-testutils")
- session.run(
- "mypy",
- "-p",
- "google.cloud.bigtable.data",
- "--check-untyped-defs",
- "--warn-unreachable",
- "--disallow-any-generics",
- "--exclude",
- "tests/system/v2_client",
- "--exclude",
- "tests/unit/v2_client",
- "--disable-error-code",
- "func-returns-value", # needed for CrossSync.rm_aio
- )
+ session.run("mypy", "-p", "google.cloud.bigtable.data")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
- session.install("docutils", "pygments")
+ session.install("setuptools", "docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
@@ -206,8 +202,8 @@ def install_unittest_dependencies(session, *constraints):
)
def unit(session, protobuf_implementation):
# Install all test dependencies, then install this package in-place.
-
- if protobuf_implementation == "cpp" and session.python in ("3.11", "3.12", "3.13"):
+ py_version = tuple([int(v) for v in session.python.split(".")])
+ if protobuf_implementation == "cpp" and py_version >= (3, 11):
session.skip("cpp implementation is not supported in python 3.11+")
constraints_path = str(
@@ -270,7 +266,7 @@ def install_systemtest_dependencies(session, *constraints):
session.install("-e", ".", *constraints)
-@nox.session(python="3.8")
+@nox.session(python=DEFAULT_PYTHON_VERSION)
def system_emulated(session):
import subprocess
import signal
@@ -456,7 +452,7 @@ def docfx(session):
session.run("python", "docs/scripts/patch_devsite_toc.py")
-@nox.session(python="3.12")
+@nox.session(python="3.14")
@nox.parametrize(
"protobuf_implementation",
["python", "upb", "cpp"],
@@ -464,7 +460,8 @@ def docfx(session):
def prerelease_deps(session, protobuf_implementation):
"""Run all tests with prerelease versions of dependencies installed."""
- if protobuf_implementation == "cpp" and session.python in ("3.11", "3.12", "3.13"):
+ py_version = tuple([int(v) for v in session.python.split(".")])
+ if protobuf_implementation == "cpp" and py_version >= (3, 11):
session.skip("cpp implementation is not supported in python 3.11+")
# Install all dependencies
@@ -519,6 +516,7 @@ def prerelease_deps(session, protobuf_implementation):
# Remaining dependencies
other_deps = [
"requests",
+ "cryptography",
]
session.install(*other_deps)
diff --git a/owlbot.py b/owlbot.py
deleted file mode 100644
index 56573f71e..000000000
--- a/owlbot.py
+++ /dev/null
@@ -1,161 +0,0 @@
-# Copyright 2018 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""This script is used to synthesize generated parts of this library."""
-
-from pathlib import Path
-import re
-from typing import List, Optional
-
-import synthtool as s
-from synthtool import gcp
-from synthtool.languages import python
-
-common = gcp.CommonTemplates()
-
-# This is a customized version of the s.get_staging_dirs() function from synthtool to
-# cater for copying 2 different folders from googleapis-gen
-# which are bigtable and bigtable/admin.
-# Source https://github.com/googleapis/synthtool/blob/master/synthtool/transforms.py#L280
-def get_staging_dirs(
- default_version: Optional[str] = None, sub_directory: Optional[str] = None
-) -> List[Path]:
- """Returns the list of directories, one per version, copied from
- https://github.com/googleapis/googleapis-gen. Will return in lexical sorting
- order with the exception of the default_version which will be last (if specified).
-
- Args:
- default_version (str): the default version of the API. The directory for this version
- will be the last item in the returned list if specified.
- sub_directory (str): if a `sub_directory` is provided, only the directories within the
- specified `sub_directory` will be returned.
-
- Returns: the empty list if no file were copied.
- """
-
- staging = Path("owl-bot-staging")
-
- if sub_directory:
- staging /= sub_directory
-
- if staging.is_dir():
- # Collect the subdirectories of the staging directory.
- versions = [v.name for v in staging.iterdir() if v.is_dir()]
- # Reorder the versions so the default version always comes last.
- versions = [v for v in versions if v != default_version]
- versions.sort()
- if default_version is not None:
- versions += [default_version]
- dirs = [staging / v for v in versions]
- for dir in dirs:
- s._tracked_paths.add(dir)
- return dirs
- else:
- return []
-
-# This library ships clients for two different APIs,
-# BigTable and BigTable Admin
-bigtable_default_version = "v2"
-bigtable_admin_default_version = "v2"
-
-for library in get_staging_dirs(bigtable_default_version, "bigtable"):
- s.move(library / "google/cloud/bigtable_v2", excludes=["**/gapic_version.py"])
- s.move(library / "tests")
- s.move(library / "scripts")
-
-for library in get_staging_dirs(bigtable_admin_default_version, "bigtable_admin"):
- s.move(library / "google/cloud/bigtable_admin", excludes=["**/gapic_version.py"])
- s.move(library / "google/cloud/bigtable_admin_v2", excludes=["**/gapic_version.py"])
- s.move(library / "tests")
- s.move(library / "scripts")
-
-s.remove_staging_dirs()
-
-# ----------------------------------------------------------------------------
-# Add templated files
-# ----------------------------------------------------------------------------
-templated_files = common.py_library(
- samples=True, # set to True only if there are samples
- split_system_tests=True,
- microgenerator=True,
- cov_level=99,
- system_test_external_dependencies=[
- "pytest-asyncio==0.21.2",
- ],
-)
-
-s.move(templated_files, excludes=[".coveragerc", "README.rst", ".github/release-please.yml", "noxfile.py", "renovate.json"])
-
-
-# ----------------------------------------------------------------------------
-# Always supply app_profile_id in routing headers: https://github.com/googleapis/python-bigtable/pull/1109
-# TODO: remove after backend no longer requires empty strings
-# ----------------------------------------------------------------------------
-for file in ["async_client.py", "client.py"]:
- s.replace(
- f"google/cloud/bigtable_v2/services/bigtable/{file}",
- "if request.app_profile_id:",
- "if True: # always attach app_profile_id, even if empty string"
- )
-# fix tests
-s.replace(
- "tests/unit/gapic/bigtable_v2/test_bigtable.py",
- 'expected_headers = {"name": "projects/sample1/instances/sample2"}',
- 'expected_headers = {"name": "projects/sample1/instances/sample2", "app_profile_id": ""}'
-)
-s.replace(
- "tests/unit/gapic/bigtable_v2/test_bigtable.py",
- """
- expected_headers = {
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
- }
- """,
- """
- expected_headers = {
- "app_profile_id": "",
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
- }
- """
-)
-s.replace(
- "tests/unit/gapic/bigtable_v2/test_bigtable.py",
- """
- expected_headers = {
- "table_name": "projects/sample1/instances/sample2/tables/sample3"
- }
- """,
- """
- expected_headers = {
- "table_name": "projects/sample1/instances/sample2/tables/sample3",
- "app_profile_id": ""
- }
- """
-)
-
-# ----------------------------------------------------------------------------
-# Samples templates
-# ----------------------------------------------------------------------------
-
-python.py_samples(skip_readmes=True)
-
-s.replace(
- "samples/beam/noxfile.py",
- """INSTALL_LIBRARY_FROM_SOURCE \= os.environ.get\("INSTALL_LIBRARY_FROM_SOURCE", False\) in \(
- "True",
- "true",
-\)""",
- """# todo(kolea2): temporary workaround to install pinned dep version
-INSTALL_LIBRARY_FROM_SOURCE = False""")
-
-s.shell.run(["nox", "-s", "blacken"], hide_output=False)
diff --git a/release-please-config.json b/release-please-config.json
deleted file mode 100644
index 33d5a7e21..000000000
--- a/release-please-config.json
+++ /dev/null
@@ -1,22 +0,0 @@
-{
- "$schema":
-"https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json",
- "packages": {
- ".": {
- "release-type": "python",
- "extra-files": [
- "google/cloud/bigtable/gapic_version.py",
- "google/cloud/bigtable_admin/gapic_version.py",
- "google/cloud/bigtable_v2/gapic_version.py",
- "google/cloud/bigtable_admin_v2/gapic_version.py"
- ]
- }
- },
- "release-type": "python",
- "plugins": [
- {
- "type": "sentence-case"
- }
- ],
- "initial-version": "2.13.2"
-}
diff --git a/releases.md b/releases.md
deleted file mode 120000
index 4c43d4932..000000000
--- a/releases.md
+++ /dev/null
@@ -1 +0,0 @@
-../../bigtable/CHANGELOG.md
\ No newline at end of file
diff --git a/samples/beam/requirements.txt b/samples/beam/requirements.txt
index 55b3ae719..e709a03cb 100644
--- a/samples/beam/requirements.txt
+++ b/samples/beam/requirements.txt
@@ -1,3 +1,5 @@
-apache-beam==2.65.0
-google-cloud-bigtable==2.30.1
-google-cloud-core==2.4.3
+apache-beam===2.60.0; python_version == '3.8'
+apache-beam===2.69.0; python_version == '3.9'
+apache-beam==2.71.0; python_version >= '3.10'
+google-cloud-bigtable==2.35.0
+google-cloud-core==2.5.0
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py
new file mode 100644
index 000000000..82dafab44
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateAppProfile
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_create_app_profile():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ app_profile = bigtable_admin_v2.AppProfile()
+ app_profile.priority = "PRIORITY_HIGH"
+
+ request = bigtable_admin_v2.CreateAppProfileRequest(
+ parent="parent_value",
+ app_profile_id="app_profile_id_value",
+ app_profile=app_profile,
+ )
+
+ # Make the request
+ response = await client.create_app_profile(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py
new file mode 100644
index 000000000..82ff382b7
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateAppProfile
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_create_app_profile():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ app_profile = bigtable_admin_v2.AppProfile()
+ app_profile.priority = "PRIORITY_HIGH"
+
+ request = bigtable_admin_v2.CreateAppProfileRequest(
+ parent="parent_value",
+ app_profile_id="app_profile_id_value",
+ app_profile=app_profile,
+ )
+
+ # Make the request
+ response = client.create_app_profile(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py
new file mode 100644
index 000000000..fb9fac60f
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateCluster
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_create_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CreateClusterRequest(
+ parent="parent_value",
+ cluster_id="cluster_id_value",
+ )
+
+ # Make the request
+ operation = client.create_cluster(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py
new file mode 100644
index 000000000..d8d5f9958
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateCluster
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_create_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CreateClusterRequest(
+ parent="parent_value",
+ cluster_id="cluster_id_value",
+ )
+
+ # Make the request
+ operation = client.create_cluster(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py
new file mode 100644
index 000000000..dbde6c4bc
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateInstance
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_create_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ instance = bigtable_admin_v2.Instance()
+ instance.display_name = "display_name_value"
+
+ request = bigtable_admin_v2.CreateInstanceRequest(
+ parent="parent_value",
+ instance_id="instance_id_value",
+ instance=instance,
+ )
+
+ # Make the request
+ operation = client.create_instance(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py
new file mode 100644
index 000000000..83ec90e53
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateInstance
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_create_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ instance = bigtable_admin_v2.Instance()
+ instance.display_name = "display_name_value"
+
+ request = bigtable_admin_v2.CreateInstanceRequest(
+ parent="parent_value",
+ instance_id="instance_id_value",
+ instance=instance,
+ )
+
+ # Make the request
+ operation = client.create_instance(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py
new file mode 100644
index 000000000..6dfb1d612
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateLogicalView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_create_logical_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ logical_view = bigtable_admin_v2.LogicalView()
+ logical_view.query = "query_value"
+
+ request = bigtable_admin_v2.CreateLogicalViewRequest(
+ parent="parent_value",
+ logical_view_id="logical_view_id_value",
+ logical_view=logical_view,
+ )
+
+ # Make the request
+ operation = client.create_logical_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py
new file mode 100644
index 000000000..f0214acbf
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateLogicalView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_create_logical_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ logical_view = bigtable_admin_v2.LogicalView()
+ logical_view.query = "query_value"
+
+ request = bigtable_admin_v2.CreateLogicalViewRequest(
+ parent="parent_value",
+ logical_view_id="logical_view_id_value",
+ logical_view=logical_view,
+ )
+
+ # Make the request
+ operation = client.create_logical_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py
new file mode 100644
index 000000000..30481d2f3
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateMaterializedView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_create_materialized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ materialized_view = bigtable_admin_v2.MaterializedView()
+ materialized_view.query = "query_value"
+
+ request = bigtable_admin_v2.CreateMaterializedViewRequest(
+ parent="parent_value",
+ materialized_view_id="materialized_view_id_value",
+ materialized_view=materialized_view,
+ )
+
+ # Make the request
+ operation = client.create_materialized_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py
new file mode 100644
index 000000000..45116fb49
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateMaterializedView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_create_materialized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ materialized_view = bigtable_admin_v2.MaterializedView()
+ materialized_view.query = "query_value"
+
+ request = bigtable_admin_v2.CreateMaterializedViewRequest(
+ parent="parent_value",
+ materialized_view_id="materialized_view_id_value",
+ materialized_view=materialized_view,
+ )
+
+ # Make the request
+ operation = client.create_materialized_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py
new file mode 100644
index 000000000..76d272519
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteAppProfile
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_delete_app_profile():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteAppProfileRequest(
+ name="name_value",
+ ignore_warnings=True,
+ )
+
+ # Make the request
+ await client.delete_app_profile(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py
new file mode 100644
index 000000000..47f552fb8
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteAppProfile
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_delete_app_profile():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteAppProfileRequest(
+ name="name_value",
+ ignore_warnings=True,
+ )
+
+ # Make the request
+ client.delete_app_profile(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py
new file mode 100644
index 000000000..6f97b6a5e
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteCluster
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_delete_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteClusterRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_cluster(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py
new file mode 100644
index 000000000..d058a08e6
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteCluster
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_delete_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteClusterRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_cluster(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py
new file mode 100644
index 000000000..ecf5583be
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteInstance
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_delete_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteInstanceRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_instance(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py
new file mode 100644
index 000000000..e8f568486
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteInstance
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_delete_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteInstanceRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_instance(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py
new file mode 100644
index 000000000..93f9d8ce8
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteLogicalView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_delete_logical_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteLogicalViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_logical_view(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py
new file mode 100644
index 000000000..fdece2bbc
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteLogicalView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_delete_logical_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteLogicalViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_logical_view(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py
new file mode 100644
index 000000000..22a9f0ad4
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteMaterializedView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_delete_materialized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteMaterializedViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_materialized_view(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py
new file mode 100644
index 000000000..b6cf3a453
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteMaterializedView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_delete_materialized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteMaterializedViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_materialized_view(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py
new file mode 100644
index 000000000..3a59ca599
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetAppProfile
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_get_app_profile():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetAppProfileRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_app_profile(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py
new file mode 100644
index 000000000..2e54bfcad
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetAppProfile
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_get_app_profile():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetAppProfileRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_app_profile(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py
new file mode 100644
index 000000000..b4d89a11d
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetCluster
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_get_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetClusterRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_cluster(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py
new file mode 100644
index 000000000..25a80a871
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetCluster
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_get_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetClusterRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_cluster(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py
new file mode 100644
index 000000000..b2e479c11
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetIamPolicy
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+
+async def sample_get_iam_policy():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.GetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = await client.get_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py
new file mode 100644
index 000000000..ffb2a81b0
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetIamPolicy
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+
+def sample_get_iam_policy():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.GetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = client.get_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py
new file mode 100644
index 000000000..b76fac83a
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetInstance
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_get_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetInstanceRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_instance(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py
new file mode 100644
index 000000000..711ed99a5
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetInstance
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_get_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetInstanceRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_instance(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py
new file mode 100644
index 000000000..4ce25cdda
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetLogicalView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_get_logical_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetLogicalViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_logical_view(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py
new file mode 100644
index 000000000..daaf7fa63
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetLogicalView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_get_logical_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetLogicalViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_logical_view(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py
new file mode 100644
index 000000000..165fb262c
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetMaterializedView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_get_materialized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetMaterializedViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_materialized_view(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py
new file mode 100644
index 000000000..1f94e3954
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetMaterializedView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_get_materialized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetMaterializedViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_materialized_view(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py
new file mode 100644
index 000000000..d377fc678
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListAppProfiles
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_list_app_profiles():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListAppProfilesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_app_profiles(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py
new file mode 100644
index 000000000..07f49ba39
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListAppProfiles
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_list_app_profiles():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListAppProfilesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_app_profiles(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py
new file mode 100644
index 000000000..71532d98a
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListClusters
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_list_clusters():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListClustersRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = await client.list_clusters(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py
new file mode 100644
index 000000000..1c36c098d
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListClusters
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_list_clusters():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListClustersRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.list_clusters(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py
new file mode 100644
index 000000000..cb6d58847
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListHotTablets
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_list_hot_tablets():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListHotTabletsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_hot_tablets(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py
new file mode 100644
index 000000000..5add7715d
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListHotTablets
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_list_hot_tablets():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListHotTabletsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_hot_tablets(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py
new file mode 100644
index 000000000..91c9a8230
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListInstances
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_list_instances():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListInstancesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = await client.list_instances(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py
new file mode 100644
index 000000000..bbe708c0e
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListInstances
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_list_instances():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListInstancesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.list_instances(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py
new file mode 100644
index 000000000..8de9bd06e
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListLogicalViews
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_list_logical_views():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListLogicalViewsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_logical_views(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py
new file mode 100644
index 000000000..b5fb602cd
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListLogicalViews
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_list_logical_views():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListLogicalViewsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_logical_views(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py
new file mode 100644
index 000000000..6fa672a25
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListMaterializedViews
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_list_materialized_views():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListMaterializedViewsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_materialized_views(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py
new file mode 100644
index 000000000..5a25da88a
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListMaterializedViews
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_list_materialized_views():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListMaterializedViewsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_materialized_views(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py
new file mode 100644
index 000000000..dab73b9cb
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for PartialUpdateCluster
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_partial_update_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.PartialUpdateClusterRequest(
+ )
+
+ # Make the request
+ operation = client.partial_update_cluster(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py
new file mode 100644
index 000000000..bab63c6ed
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for PartialUpdateCluster
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_partial_update_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.PartialUpdateClusterRequest(
+ )
+
+ # Make the request
+ operation = client.partial_update_cluster(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py
new file mode 100644
index 000000000..4c5e53ebe
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for PartialUpdateInstance
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_partial_update_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ instance = bigtable_admin_v2.Instance()
+ instance.display_name = "display_name_value"
+
+ request = bigtable_admin_v2.PartialUpdateInstanceRequest(
+ instance=instance,
+ )
+
+ # Make the request
+ operation = client.partial_update_instance(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py
new file mode 100644
index 000000000..0d2a74cfc
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for PartialUpdateInstance
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_partial_update_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ instance = bigtable_admin_v2.Instance()
+ instance.display_name = "display_name_value"
+
+ request = bigtable_admin_v2.PartialUpdateInstanceRequest(
+ instance=instance,
+ )
+
+ # Make the request
+ operation = client.partial_update_instance(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py
new file mode 100644
index 000000000..b389b7679
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for SetIamPolicy
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+
+async def sample_set_iam_policy():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.SetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = await client.set_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py
new file mode 100644
index 000000000..97bc29d65
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for SetIamPolicy
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+
+def sample_set_iam_policy():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.SetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = client.set_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py
new file mode 100644
index 000000000..977f79d9b
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for TestIamPermissions
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+
+async def sample_test_iam_permissions():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.TestIamPermissionsRequest(
+ resource="resource_value",
+ permissions=['permissions_value1', 'permissions_value2'],
+ )
+
+ # Make the request
+ response = await client.test_iam_permissions(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py
new file mode 100644
index 000000000..db047d367
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for TestIamPermissions
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+
+def sample_test_iam_permissions():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.TestIamPermissionsRequest(
+ resource="resource_value",
+ permissions=['permissions_value1', 'permissions_value2'],
+ )
+
+ # Make the request
+ response = client.test_iam_permissions(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py
new file mode 100644
index 000000000..2c55a45bd
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateAppProfile
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_update_app_profile():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ app_profile = bigtable_admin_v2.AppProfile()
+ app_profile.priority = "PRIORITY_HIGH"
+
+ request = bigtable_admin_v2.UpdateAppProfileRequest(
+ app_profile=app_profile,
+ )
+
+ # Make the request
+ operation = client.update_app_profile(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py
new file mode 100644
index 000000000..a7b683426
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateAppProfile
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_update_app_profile():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ app_profile = bigtable_admin_v2.AppProfile()
+ app_profile.priority = "PRIORITY_HIGH"
+
+ request = bigtable_admin_v2.UpdateAppProfileRequest(
+ app_profile=app_profile,
+ )
+
+ # Make the request
+ operation = client.update_app_profile(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py
new file mode 100644
index 000000000..af3abde41
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateCluster
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_update_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.Cluster(
+ )
+
+ # Make the request
+ operation = client.update_cluster(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py
new file mode 100644
index 000000000..ec02a64af
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateCluster
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_update_cluster():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.Cluster(
+ )
+
+ # Make the request
+ operation = client.update_cluster(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py
new file mode 100644
index 000000000..798afaf80
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateInstance
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_update_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.Instance(
+ display_name="display_name_value",
+ )
+
+ # Make the request
+ response = await client.update_instance(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py
new file mode 100644
index 000000000..fb6e5e2d3
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateInstance
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_update_instance():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.Instance(
+ display_name="display_name_value",
+ )
+
+ # Make the request
+ response = client.update_instance(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py
new file mode 100644
index 000000000..9bdd620e6
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateLogicalView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_update_logical_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ logical_view = bigtable_admin_v2.LogicalView()
+ logical_view.query = "query_value"
+
+ request = bigtable_admin_v2.UpdateLogicalViewRequest(
+ logical_view=logical_view,
+ )
+
+ # Make the request
+ operation = client.update_logical_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py
new file mode 100644
index 000000000..10d962205
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateLogicalView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_update_logical_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ logical_view = bigtable_admin_v2.LogicalView()
+ logical_view.query = "query_value"
+
+ request = bigtable_admin_v2.UpdateLogicalViewRequest(
+ logical_view=logical_view,
+ )
+
+ # Make the request
+ operation = client.update_logical_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py
new file mode 100644
index 000000000..ddd930475
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateMaterializedView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_update_materialized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient()
+
+ # Initialize request argument(s)
+ materialized_view = bigtable_admin_v2.MaterializedView()
+ materialized_view.query = "query_value"
+
+ request = bigtable_admin_v2.UpdateMaterializedViewRequest(
+ materialized_view=materialized_view,
+ )
+
+ # Make the request
+ operation = client.update_materialized_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py
new file mode 100644
index 000000000..a2ef78bd3
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateMaterializedView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_update_materialized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableInstanceAdminClient()
+
+ # Initialize request argument(s)
+ materialized_view = bigtable_admin_v2.MaterializedView()
+ materialized_view.query = "query_value"
+
+ request = bigtable_admin_v2.UpdateMaterializedViewRequest(
+ materialized_view=materialized_view,
+ )
+
+ # Make the request
+ operation = client.update_materialized_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py
new file mode 100644
index 000000000..4cd57edc8
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CheckConsistency
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_check_consistency():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CheckConsistencyRequest(
+ name="name_value",
+ consistency_token="consistency_token_value",
+ )
+
+ # Make the request
+ response = await client.check_consistency(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py
new file mode 100644
index 000000000..ec6085b3f
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CheckConsistency
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_check_consistency():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CheckConsistencyRequest(
+ name="name_value",
+ consistency_token="consistency_token_value",
+ )
+
+ # Make the request
+ response = client.check_consistency(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py
new file mode 100644
index 000000000..9355b7d44
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CopyBackup
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_copy_backup():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CopyBackupRequest(
+ parent="parent_value",
+ backup_id="backup_id_value",
+ source_backup="source_backup_value",
+ )
+
+ # Make the request
+ operation = client.copy_backup(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py
new file mode 100644
index 000000000..25456ad21
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CopyBackup
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_copy_backup():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CopyBackupRequest(
+ parent="parent_value",
+ backup_id="backup_id_value",
+ source_backup="source_backup_value",
+ )
+
+ # Make the request
+ operation = client.copy_backup(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py
new file mode 100644
index 000000000..135bbe220
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateAuthorizedView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_create_authorized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CreateAuthorizedViewRequest(
+ parent="parent_value",
+ authorized_view_id="authorized_view_id_value",
+ )
+
+ # Make the request
+ operation = client.create_authorized_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py
new file mode 100644
index 000000000..cafbf56cb
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateAuthorizedView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_create_authorized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CreateAuthorizedViewRequest(
+ parent="parent_value",
+ authorized_view_id="authorized_view_id_value",
+ )
+
+ # Make the request
+ operation = client.create_authorized_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py
new file mode 100644
index 000000000..d9bd402b4
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateBackup
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_create_backup():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ backup = bigtable_admin_v2.Backup()
+ backup.source_table = "source_table_value"
+
+ request = bigtable_admin_v2.CreateBackupRequest(
+ parent="parent_value",
+ backup_id="backup_id_value",
+ backup=backup,
+ )
+
+ # Make the request
+ operation = client.create_backup(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py
new file mode 100644
index 000000000..835f0573c
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateBackup
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_create_backup():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ backup = bigtable_admin_v2.Backup()
+ backup.source_table = "source_table_value"
+
+ request = bigtable_admin_v2.CreateBackupRequest(
+ parent="parent_value",
+ backup_id="backup_id_value",
+ backup=backup,
+ )
+
+ # Make the request
+ operation = client.create_backup(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async.py
new file mode 100644
index 000000000..8e4992635
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateSchemaBundle
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_create_schema_bundle():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ schema_bundle = bigtable_admin_v2.SchemaBundle()
+ schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob'
+
+ request = bigtable_admin_v2.CreateSchemaBundleRequest(
+ parent="parent_value",
+ schema_bundle_id="schema_bundle_id_value",
+ schema_bundle=schema_bundle,
+ )
+
+ # Make the request
+ operation = client.create_schema_bundle(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync.py
new file mode 100644
index 000000000..a5911497d
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateSchemaBundle
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_create_schema_bundle():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ schema_bundle = bigtable_admin_v2.SchemaBundle()
+ schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob'
+
+ request = bigtable_admin_v2.CreateSchemaBundleRequest(
+ parent="parent_value",
+ schema_bundle_id="schema_bundle_id_value",
+ schema_bundle=schema_bundle,
+ )
+
+ # Make the request
+ operation = client.create_schema_bundle(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py
new file mode 100644
index 000000000..3096539b9
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateTable
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_create_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CreateTableRequest(
+ parent="parent_value",
+ table_id="table_id_value",
+ )
+
+ # Make the request
+ response = await client.create_table(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py
new file mode 100644
index 000000000..f7767438e
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateTableFromSnapshot
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_create_table_from_snapshot():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CreateTableFromSnapshotRequest(
+ parent="parent_value",
+ table_id="table_id_value",
+ source_snapshot="source_snapshot_value",
+ )
+
+ # Make the request
+ operation = client.create_table_from_snapshot(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py
new file mode 100644
index 000000000..ff1dd7899
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateTableFromSnapshot
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_create_table_from_snapshot():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CreateTableFromSnapshotRequest(
+ parent="parent_value",
+ table_id="table_id_value",
+ source_snapshot="source_snapshot_value",
+ )
+
+ # Make the request
+ operation = client.create_table_from_snapshot(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py
new file mode 100644
index 000000000..552a1095f
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateTable
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_create_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.CreateTableRequest(
+ parent="parent_value",
+ table_id="table_id_value",
+ )
+
+ # Make the request
+ response = client.create_table(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py
new file mode 100644
index 000000000..cbee06ae1
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteAuthorizedView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_delete_authorized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteAuthorizedViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_authorized_view(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py
new file mode 100644
index 000000000..298e66efb
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteAuthorizedView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_delete_authorized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteAuthorizedViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_authorized_view(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py
new file mode 100644
index 000000000..d2615f792
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteBackup
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_delete_backup():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteBackupRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_backup(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py
new file mode 100644
index 000000000..c9888bf39
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteBackup
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_delete_backup():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteBackupRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_backup(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async.py
new file mode 100644
index 000000000..7377299d1
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteSchemaBundle
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_delete_schema_bundle():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteSchemaBundleRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_schema_bundle(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync.py
new file mode 100644
index 000000000..5dc12b464
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteSchemaBundle
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_delete_schema_bundle():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteSchemaBundleRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_schema_bundle(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async.py
new file mode 100644
index 000000000..eb8ca8166
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteSnapshot
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_delete_snapshot():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteSnapshotRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_snapshot(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync.py
new file mode 100644
index 000000000..ad979615d
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteSnapshot
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_delete_snapshot():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteSnapshotRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_snapshot(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py
new file mode 100644
index 000000000..375e61557
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteTable
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_delete_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteTableRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ await client.delete_table(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py
new file mode 100644
index 000000000..17397bfab
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteTable
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_delete_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DeleteTableRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_table(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py
new file mode 100644
index 000000000..391205c7c
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DropRowRange
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_drop_row_range():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DropRowRangeRequest(
+ row_key_prefix=b'row_key_prefix_blob',
+ name="name_value",
+ )
+
+ # Make the request
+ await client.drop_row_range(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py
new file mode 100644
index 000000000..bcd528f1a
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DropRowRange
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_drop_row_range():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.DropRowRangeRequest(
+ row_key_prefix=b'row_key_prefix_blob',
+ name="name_value",
+ )
+
+ # Make the request
+ client.drop_row_range(request=request)
+
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py
new file mode 100644
index 000000000..1953441b6
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GenerateConsistencyToken
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_generate_consistency_token():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GenerateConsistencyTokenRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.generate_consistency_token(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py
new file mode 100644
index 000000000..4ae52264d
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GenerateConsistencyToken
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_generate_consistency_token():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GenerateConsistencyTokenRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.generate_consistency_token(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py
new file mode 100644
index 000000000..129948bc5
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetAuthorizedView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_get_authorized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetAuthorizedViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_authorized_view(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py
new file mode 100644
index 000000000..9cc63538c
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetAuthorizedView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_get_authorized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetAuthorizedViewRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_authorized_view(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py
new file mode 100644
index 000000000..524d63e86
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetBackup
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_get_backup():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetBackupRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_backup(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py
new file mode 100644
index 000000000..5ed91b80c
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetBackup
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_get_backup():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetBackupRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_backup(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py
new file mode 100644
index 000000000..a599239d5
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetIamPolicy
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+
+async def sample_get_iam_policy():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.GetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = await client.get_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py
new file mode 100644
index 000000000..2d6e71c27
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetIamPolicy
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+
+def sample_get_iam_policy():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.GetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = client.get_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async.py
new file mode 100644
index 000000000..b5e580276
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetSchemaBundle
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_get_schema_bundle():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetSchemaBundleRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_schema_bundle(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync.py
new file mode 100644
index 000000000..1ea7b69b7
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetSchemaBundle
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_get_schema_bundle():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetSchemaBundleRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_schema_bundle(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async.py
new file mode 100644
index 000000000..ae48060bb
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetSnapshot
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_get_snapshot():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetSnapshotRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_snapshot(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync.py
new file mode 100644
index 000000000..8626549fd
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetSnapshot
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_get_snapshot():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetSnapshotRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_snapshot(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py
new file mode 100644
index 000000000..ff8dff1ae
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetTable
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_get_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetTableRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_table(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py
new file mode 100644
index 000000000..ccb68b766
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetTable
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_get_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.GetTableRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_table(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py
new file mode 100644
index 000000000..658b8f96a
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListAuthorizedViews
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_list_authorized_views():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListAuthorizedViewsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_authorized_views(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py
new file mode 100644
index 000000000..a7bf4b6ad
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListAuthorizedViews
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_list_authorized_views():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListAuthorizedViewsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_authorized_views(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py
new file mode 100644
index 000000000..368c376f0
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListBackups
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_list_backups():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListBackupsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_backups(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py
new file mode 100644
index 000000000..ca0e3e0f2
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListBackups
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_list_backups():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListBackupsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_backups(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async.py
new file mode 100644
index 000000000..3daf30e6d
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListSchemaBundles
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_list_schema_bundles():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListSchemaBundlesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_schema_bundles(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync.py
new file mode 100644
index 000000000..945d606bb
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListSchemaBundles
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_list_schema_bundles():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListSchemaBundlesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_schema_bundles(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async.py
new file mode 100644
index 000000000..91acb1d9e
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListSnapshots
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_list_snapshots():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListSnapshotsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_snapshots(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync.py
new file mode 100644
index 000000000..7f809156f
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListSnapshots
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_list_snapshots():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListSnapshotsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_snapshots(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py
new file mode 100644
index 000000000..191de0fc7
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListTables
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_list_tables():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListTablesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tables(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py
new file mode 100644
index 000000000..5d0f3a278
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListTables
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_list_tables():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ListTablesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tables(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async.py
new file mode 100644
index 000000000..2c206eb44
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ModifyColumnFamilies
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_modify_column_families():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ModifyColumnFamiliesRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.modify_column_families(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync.py
new file mode 100644
index 000000000..6224f5c5e
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ModifyColumnFamilies
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_modify_column_families():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.ModifyColumnFamiliesRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.modify_column_families(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py
new file mode 100644
index 000000000..f70b5da17
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for RestoreTable
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_async_internal]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_restore_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.RestoreTableRequest(
+ backup="backup_value",
+ parent="parent_value",
+ table_id="table_id_value",
+ )
+
+ # Make the request
+ operation = client._restore_table(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_async_internal]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py
new file mode 100644
index 000000000..45621c22b
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for RestoreTable
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_sync_internal]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_restore_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.RestoreTableRequest(
+ backup="backup_value",
+ parent="parent_value",
+ table_id="table_id_value",
+ )
+
+ # Make the request
+ operation = client._restore_table(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_sync_internal]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py
new file mode 100644
index 000000000..cbfafdc77
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for SetIamPolicy
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+
+async def sample_set_iam_policy():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.SetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = await client.set_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py
new file mode 100644
index 000000000..9a6c5fcc2
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for SetIamPolicy
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+
+def sample_set_iam_policy():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.SetIamPolicyRequest(
+ resource="resource_value",
+ )
+
+ # Make the request
+ response = client.set_iam_policy(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async.py
new file mode 100644
index 000000000..6ff619e85
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for SnapshotTable
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_snapshot_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.SnapshotTableRequest(
+ name="name_value",
+ cluster="cluster_value",
+ snapshot_id="snapshot_id_value",
+ )
+
+ # Make the request
+ operation = client.snapshot_table(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync.py
new file mode 100644
index 000000000..f983f7824
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for SnapshotTable
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_snapshot_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.SnapshotTableRequest(
+ name="name_value",
+ cluster="cluster_value",
+ snapshot_id="snapshot_id_value",
+ )
+
+ # Make the request
+ operation = client.snapshot_table(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py
new file mode 100644
index 000000000..ee5fe6027
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for TestIamPermissions
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+
+async def sample_test_iam_permissions():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.TestIamPermissionsRequest(
+ resource="resource_value",
+ permissions=['permissions_value1', 'permissions_value2'],
+ )
+
+ # Make the request
+ response = await client.test_iam_permissions(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py
new file mode 100644
index 000000000..46f0870b0
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for TestIamPermissions
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+from google.iam.v1 import iam_policy_pb2 # type: ignore
+
+
+def sample_test_iam_permissions():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = iam_policy_pb2.TestIamPermissionsRequest(
+ resource="resource_value",
+ permissions=['permissions_value1', 'permissions_value2'],
+ )
+
+ # Make the request
+ response = client.test_iam_permissions(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py
new file mode 100644
index 000000000..1e2f6aa5a
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UndeleteTable
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_undelete_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.UndeleteTableRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.undelete_table(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py
new file mode 100644
index 000000000..637afee8b
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UndeleteTable
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_undelete_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.UndeleteTableRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.undelete_table(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py
new file mode 100644
index 000000000..541427d48
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateAuthorizedView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_update_authorized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.UpdateAuthorizedViewRequest(
+ )
+
+ # Make the request
+ operation = client.update_authorized_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py
new file mode 100644
index 000000000..9c8198d9a
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateAuthorizedView
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_update_authorized_view():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.UpdateAuthorizedViewRequest(
+ )
+
+ # Make the request
+ operation = client.update_authorized_view(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py
new file mode 100644
index 000000000..f98e1e33a
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateBackup
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_update_backup():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ backup = bigtable_admin_v2.Backup()
+ backup.source_table = "source_table_value"
+
+ request = bigtable_admin_v2.UpdateBackupRequest(
+ backup=backup,
+ )
+
+ # Make the request
+ response = await client.update_backup(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py
new file mode 100644
index 000000000..466a3decb
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateBackup
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_update_backup():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ backup = bigtable_admin_v2.Backup()
+ backup.source_table = "source_table_value"
+
+ request = bigtable_admin_v2.UpdateBackupRequest(
+ backup=backup,
+ )
+
+ # Make the request
+ response = client.update_backup(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async.py
new file mode 100644
index 000000000..96447088e
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateSchemaBundle
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_update_schema_bundle():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ schema_bundle = bigtable_admin_v2.SchemaBundle()
+ schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob'
+
+ request = bigtable_admin_v2.UpdateSchemaBundleRequest(
+ schema_bundle=schema_bundle,
+ )
+
+ # Make the request
+ operation = client.update_schema_bundle(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync.py
new file mode 100644
index 000000000..075683060
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateSchemaBundle
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_update_schema_bundle():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ schema_bundle = bigtable_admin_v2.SchemaBundle()
+ schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob'
+
+ request = bigtable_admin_v2.UpdateSchemaBundleRequest(
+ schema_bundle=schema_bundle,
+ )
+
+ # Make the request
+ operation = client.update_schema_bundle(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_sync]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py
new file mode 100644
index 000000000..93839d36f
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateTable
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_async]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+async def sample_update_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminAsyncClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.UpdateTableRequest(
+ )
+
+ # Make the request
+ operation = client.update_table(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = (await operation).result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_async]
diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py
new file mode 100644
index 000000000..fea09f6a8
--- /dev/null
+++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateTable
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-bigtable-admin
+
+
+# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_sync]
+# This snippet has been automatically generated and should be regarded as a
+# code template only.
+# It will require modifications to work:
+# - It may require correct/in-range values for request initialization.
+# - It may require specifying regional endpoints when creating the service
+# client as shown in:
+# https://googleapis.dev/python/google-api-core/latest/client_options.html
+from google.cloud import bigtable_admin_v2
+
+
+def sample_update_table():
+ # Create a client
+ client = bigtable_admin_v2.BigtableTableAdminClient()
+
+ # Initialize request argument(s)
+ request = bigtable_admin_v2.UpdateTableRequest(
+ )
+
+ # Make the request
+ operation = client.update_table(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_sync]
diff --git a/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json b/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json
new file mode 100644
index 000000000..42db3b70b
--- /dev/null
+++ b/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json
@@ -0,0 +1,10871 @@
+{
+ "clientLibrary": {
+ "apis": [
+ {
+ "id": "google.bigtable.admin.v2",
+ "version": "v2"
+ }
+ ],
+ "language": "PYTHON",
+ "name": "google-cloud-bigtable-admin",
+ "version": "2.35.0"
+ },
+ "snippets": [
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.create_app_profile",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateAppProfile",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "CreateAppProfile"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "app_profile_id",
+ "type": "str"
+ },
+ {
+ "name": "app_profile",
+ "type": "google.cloud.bigtable_admin_v2.types.AppProfile"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.AppProfile",
+ "shortName": "create_app_profile"
+ },
+ "description": "Sample for CreateAppProfile",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_async",
+ "segments": [
+ {
+ "end": 56,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 56,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 53,
+ "start": 51,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 57,
+ "start": 54,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.create_app_profile",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateAppProfile",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "CreateAppProfile"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "app_profile_id",
+ "type": "str"
+ },
+ {
+ "name": "app_profile",
+ "type": "google.cloud.bigtable_admin_v2.types.AppProfile"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.AppProfile",
+ "shortName": "create_app_profile"
+ },
+ "description": "Sample for CreateAppProfile",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_sync",
+ "segments": [
+ {
+ "end": 56,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 56,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 53,
+ "start": 51,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 57,
+ "start": 54,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.create_cluster",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateCluster",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "CreateCluster"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CreateClusterRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "cluster_id",
+ "type": "str"
+ },
+ {
+ "name": "cluster",
+ "type": "google.cloud.bigtable_admin_v2.types.Cluster"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation_async.AsyncOperation",
+ "shortName": "create_cluster"
+ },
+ "description": "Sample for CreateCluster",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_async",
+ "segments": [
+ {
+ "end": 56,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 56,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 53,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 57,
+ "start": 54,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.create_cluster",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateCluster",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "CreateCluster"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CreateClusterRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "cluster_id",
+ "type": "str"
+ },
+ {
+ "name": "cluster",
+ "type": "google.cloud.bigtable_admin_v2.types.Cluster"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation.Operation",
+ "shortName": "create_cluster"
+ },
+ "description": "Sample for CreateCluster",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_sync",
+ "segments": [
+ {
+ "end": 56,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 56,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 53,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 57,
+ "start": 54,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.create_instance",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateInstance",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "CreateInstance"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CreateInstanceRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "instance_id",
+ "type": "str"
+ },
+ {
+ "name": "instance",
+ "type": "google.cloud.bigtable_admin_v2.types.Instance"
+ },
+ {
+ "name": "clusters",
+ "type": "MutableMapping[str, google.cloud.bigtable_admin_v2.types.Cluster]"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation_async.AsyncOperation",
+ "shortName": "create_instance"
+ },
+ "description": "Sample for CreateInstance",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_async",
+ "segments": [
+ {
+ "end": 60,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 60,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 57,
+ "start": 51,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 61,
+ "start": 58,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.create_instance",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateInstance",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "CreateInstance"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CreateInstanceRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "instance_id",
+ "type": "str"
+ },
+ {
+ "name": "instance",
+ "type": "google.cloud.bigtable_admin_v2.types.Instance"
+ },
+ {
+ "name": "clusters",
+ "type": "MutableMapping[str, google.cloud.bigtable_admin_v2.types.Cluster]"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation.Operation",
+ "shortName": "create_instance"
+ },
+ "description": "Sample for CreateInstance",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_sync",
+ "segments": [
+ {
+ "end": 60,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 60,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 57,
+ "start": 51,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 61,
+ "start": 58,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.create_logical_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateLogicalView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "CreateLogicalView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "logical_view",
+ "type": "google.cloud.bigtable_admin_v2.types.LogicalView"
+ },
+ {
+ "name": "logical_view_id",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation_async.AsyncOperation",
+ "shortName": "create_logical_view"
+ },
+ "description": "Sample for CreateLogicalView",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_async",
+ "segments": [
+ {
+ "end": 60,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 60,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 57,
+ "start": 51,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 61,
+ "start": 58,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.create_logical_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateLogicalView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "CreateLogicalView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "logical_view",
+ "type": "google.cloud.bigtable_admin_v2.types.LogicalView"
+ },
+ {
+ "name": "logical_view_id",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation.Operation",
+ "shortName": "create_logical_view"
+ },
+ "description": "Sample for CreateLogicalView",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_sync",
+ "segments": [
+ {
+ "end": 60,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 60,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 57,
+ "start": 51,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 61,
+ "start": 58,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.create_materialized_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateMaterializedView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "CreateMaterializedView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "materialized_view",
+ "type": "google.cloud.bigtable_admin_v2.types.MaterializedView"
+ },
+ {
+ "name": "materialized_view_id",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation_async.AsyncOperation",
+ "shortName": "create_materialized_view"
+ },
+ "description": "Sample for CreateMaterializedView",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_async",
+ "segments": [
+ {
+ "end": 60,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 60,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 57,
+ "start": 51,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 61,
+ "start": 58,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.create_materialized_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateMaterializedView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "CreateMaterializedView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "materialized_view",
+ "type": "google.cloud.bigtable_admin_v2.types.MaterializedView"
+ },
+ {
+ "name": "materialized_view_id",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation.Operation",
+ "shortName": "create_materialized_view"
+ },
+ "description": "Sample for CreateMaterializedView",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_sync",
+ "segments": [
+ {
+ "end": 60,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 60,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 57,
+ "start": 51,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 61,
+ "start": 58,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.delete_app_profile",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteAppProfile",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "DeleteAppProfile"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "ignore_warnings",
+ "type": "bool"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "delete_app_profile"
+ },
+ "description": "Sample for DeleteAppProfile",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_async",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.delete_app_profile",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteAppProfile",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "DeleteAppProfile"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "ignore_warnings",
+ "type": "bool"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "delete_app_profile"
+ },
+ "description": "Sample for DeleteAppProfile",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_sync",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.delete_cluster",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteCluster",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "DeleteCluster"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DeleteClusterRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "delete_cluster"
+ },
+ "description": "Sample for DeleteCluster",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.delete_cluster",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteCluster",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "DeleteCluster"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DeleteClusterRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "delete_cluster"
+ },
+ "description": "Sample for DeleteCluster",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.delete_instance",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteInstance",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "DeleteInstance"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "delete_instance"
+ },
+ "description": "Sample for DeleteInstance",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.delete_instance",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteInstance",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "DeleteInstance"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "delete_instance"
+ },
+ "description": "Sample for DeleteInstance",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.delete_logical_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteLogicalView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "DeleteLogicalView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DeleteLogicalViewRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "delete_logical_view"
+ },
+ "description": "Sample for DeleteLogicalView",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.delete_logical_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteLogicalView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "DeleteLogicalView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DeleteLogicalViewRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "delete_logical_view"
+ },
+ "description": "Sample for DeleteLogicalView",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.delete_materialized_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteMaterializedView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "DeleteMaterializedView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DeleteMaterializedViewRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "delete_materialized_view"
+ },
+ "description": "Sample for DeleteMaterializedView",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.delete_materialized_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteMaterializedView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "DeleteMaterializedView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DeleteMaterializedViewRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "delete_materialized_view"
+ },
+ "description": "Sample for DeleteMaterializedView",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_app_profile",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetAppProfile",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "GetAppProfile"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GetAppProfileRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.AppProfile",
+ "shortName": "get_app_profile"
+ },
+ "description": "Sample for GetAppProfile",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_async",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_app_profile",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetAppProfile",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "GetAppProfile"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GetAppProfileRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.AppProfile",
+ "shortName": "get_app_profile"
+ },
+ "description": "Sample for GetAppProfile",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_sync",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_cluster",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "GetCluster"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GetClusterRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.Cluster",
+ "shortName": "get_cluster"
+ },
+ "description": "Sample for GetCluster",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_async",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_cluster",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "GetCluster"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GetClusterRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.Cluster",
+ "shortName": "get_cluster"
+ },
+ "description": "Sample for GetCluster",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_sync",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_iam_policy",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetIamPolicy",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "GetIamPolicy"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.iam.v1.iam_policy_pb2.GetIamPolicyRequest"
+ },
+ {
+ "name": "resource",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.iam.v1.policy_pb2.Policy",
+ "shortName": "get_iam_policy"
+ },
+ "description": "Sample for GetIamPolicy",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_iam_policy",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetIamPolicy",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "GetIamPolicy"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.iam.v1.iam_policy_pb2.GetIamPolicyRequest"
+ },
+ {
+ "name": "resource",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.iam.v1.policy_pb2.Policy",
+ "shortName": "get_iam_policy"
+ },
+ "description": "Sample for GetIamPolicy",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_instance",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "GetInstance"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GetInstanceRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.Instance",
+ "shortName": "get_instance"
+ },
+ "description": "Sample for GetInstance",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_async",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_instance",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "GetInstance"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GetInstanceRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.Instance",
+ "shortName": "get_instance"
+ },
+ "description": "Sample for GetInstance",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_sync",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_logical_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetLogicalView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "GetLogicalView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GetLogicalViewRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.LogicalView",
+ "shortName": "get_logical_view"
+ },
+ "description": "Sample for GetLogicalView",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_async",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_logical_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetLogicalView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "GetLogicalView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GetLogicalViewRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.LogicalView",
+ "shortName": "get_logical_view"
+ },
+ "description": "Sample for GetLogicalView",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_sync",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_materialized_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetMaterializedView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "GetMaterializedView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GetMaterializedViewRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.MaterializedView",
+ "shortName": "get_materialized_view"
+ },
+ "description": "Sample for GetMaterializedView",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_async",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_materialized_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetMaterializedView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "GetMaterializedView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GetMaterializedViewRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.MaterializedView",
+ "shortName": "get_materialized_view"
+ },
+ "description": "Sample for GetMaterializedView",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_sync",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_app_profiles",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListAppProfiles",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "ListAppProfiles"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesAsyncPager",
+ "shortName": "list_app_profiles"
+ },
+ "description": "Sample for ListAppProfiles",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_app_profiles",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListAppProfiles",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "ListAppProfiles"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesPager",
+ "shortName": "list_app_profiles"
+ },
+ "description": "Sample for ListAppProfiles",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_clusters",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "ListClusters"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListClustersRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.ListClustersResponse",
+ "shortName": "list_clusters"
+ },
+ "description": "Sample for ListClusters",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_async",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_clusters",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "ListClusters"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListClustersRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.ListClustersResponse",
+ "shortName": "list_clusters"
+ },
+ "description": "Sample for ListClusters",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_sync",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_hot_tablets",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListHotTablets",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "ListHotTablets"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsAsyncPager",
+ "shortName": "list_hot_tablets"
+ },
+ "description": "Sample for ListHotTablets",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_hot_tablets",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListHotTablets",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "ListHotTablets"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsPager",
+ "shortName": "list_hot_tablets"
+ },
+ "description": "Sample for ListHotTablets",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_instances",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "ListInstances"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListInstancesRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.ListInstancesResponse",
+ "shortName": "list_instances"
+ },
+ "description": "Sample for ListInstances",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_async",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_instances",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "ListInstances"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListInstancesRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.ListInstancesResponse",
+ "shortName": "list_instances"
+ },
+ "description": "Sample for ListInstances",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_sync",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_logical_views",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListLogicalViews",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "ListLogicalViews"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListLogicalViewsAsyncPager",
+ "shortName": "list_logical_views"
+ },
+ "description": "Sample for ListLogicalViews",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_logical_views",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListLogicalViews",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "ListLogicalViews"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListLogicalViewsPager",
+ "shortName": "list_logical_views"
+ },
+ "description": "Sample for ListLogicalViews",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_materialized_views",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListMaterializedViews",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "ListMaterializedViews"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListMaterializedViewsAsyncPager",
+ "shortName": "list_materialized_views"
+ },
+ "description": "Sample for ListMaterializedViews",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_materialized_views",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListMaterializedViews",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "ListMaterializedViews"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListMaterializedViewsPager",
+ "shortName": "list_materialized_views"
+ },
+ "description": "Sample for ListMaterializedViews",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.partial_update_cluster",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateCluster",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "PartialUpdateCluster"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest"
+ },
+ {
+ "name": "cluster",
+ "type": "google.cloud.bigtable_admin_v2.types.Cluster"
+ },
+ {
+ "name": "update_mask",
+ "type": "google.protobuf.field_mask_pb2.FieldMask"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation_async.AsyncOperation",
+ "shortName": "partial_update_cluster"
+ },
+ "description": "Sample for PartialUpdateCluster",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_async",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.partial_update_cluster",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateCluster",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "PartialUpdateCluster"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest"
+ },
+ {
+ "name": "cluster",
+ "type": "google.cloud.bigtable_admin_v2.types.Cluster"
+ },
+ {
+ "name": "update_mask",
+ "type": "google.protobuf.field_mask_pb2.FieldMask"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation.Operation",
+ "shortName": "partial_update_cluster"
+ },
+ "description": "Sample for PartialUpdateCluster",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_sync",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.partial_update_instance",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateInstance",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "PartialUpdateInstance"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest"
+ },
+ {
+ "name": "instance",
+ "type": "google.cloud.bigtable_admin_v2.types.Instance"
+ },
+ {
+ "name": "update_mask",
+ "type": "google.protobuf.field_mask_pb2.FieldMask"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation_async.AsyncOperation",
+ "shortName": "partial_update_instance"
+ },
+ "description": "Sample for PartialUpdateInstance",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_async",
+ "segments": [
+ {
+ "end": 58,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 58,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 55,
+ "start": 49,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 59,
+ "start": 56,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.partial_update_instance",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateInstance",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "PartialUpdateInstance"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest"
+ },
+ {
+ "name": "instance",
+ "type": "google.cloud.bigtable_admin_v2.types.Instance"
+ },
+ {
+ "name": "update_mask",
+ "type": "google.protobuf.field_mask_pb2.FieldMask"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation.Operation",
+ "shortName": "partial_update_instance"
+ },
+ "description": "Sample for PartialUpdateInstance",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_sync",
+ "segments": [
+ {
+ "end": 58,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 58,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 55,
+ "start": 49,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 59,
+ "start": 56,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.set_iam_policy",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.SetIamPolicy",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "SetIamPolicy"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.iam.v1.iam_policy_pb2.SetIamPolicyRequest"
+ },
+ {
+ "name": "resource",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.iam.v1.policy_pb2.Policy",
+ "shortName": "set_iam_policy"
+ },
+ "description": "Sample for SetIamPolicy",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.set_iam_policy",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.SetIamPolicy",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "SetIamPolicy"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.iam.v1.iam_policy_pb2.SetIamPolicyRequest"
+ },
+ {
+ "name": "resource",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.iam.v1.policy_pb2.Policy",
+ "shortName": "set_iam_policy"
+ },
+ "description": "Sample for SetIamPolicy",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.test_iam_permissions",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.TestIamPermissions",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "TestIamPermissions"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest"
+ },
+ {
+ "name": "resource",
+ "type": "str"
+ },
+ {
+ "name": "permissions",
+ "type": "MutableSequence[str]"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse",
+ "shortName": "test_iam_permissions"
+ },
+ "description": "Sample for TestIamPermissions",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_async",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 42,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.test_iam_permissions",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.TestIamPermissions",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "TestIamPermissions"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest"
+ },
+ {
+ "name": "resource",
+ "type": "str"
+ },
+ {
+ "name": "permissions",
+ "type": "MutableSequence[str]"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse",
+ "shortName": "test_iam_permissions"
+ },
+ "description": "Sample for TestIamPermissions",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_sync",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 42,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.update_app_profile",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateAppProfile",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "UpdateAppProfile"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest"
+ },
+ {
+ "name": "app_profile",
+ "type": "google.cloud.bigtable_admin_v2.types.AppProfile"
+ },
+ {
+ "name": "update_mask",
+ "type": "google.protobuf.field_mask_pb2.FieldMask"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation_async.AsyncOperation",
+ "shortName": "update_app_profile"
+ },
+ "description": "Sample for UpdateAppProfile",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_async",
+ "segments": [
+ {
+ "end": 58,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 58,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 55,
+ "start": 49,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 59,
+ "start": 56,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.update_app_profile",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateAppProfile",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "UpdateAppProfile"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest"
+ },
+ {
+ "name": "app_profile",
+ "type": "google.cloud.bigtable_admin_v2.types.AppProfile"
+ },
+ {
+ "name": "update_mask",
+ "type": "google.protobuf.field_mask_pb2.FieldMask"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation.Operation",
+ "shortName": "update_app_profile"
+ },
+ "description": "Sample for UpdateAppProfile",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_sync",
+ "segments": [
+ {
+ "end": 58,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 58,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 55,
+ "start": 49,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 59,
+ "start": 56,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.update_cluster",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateCluster",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "UpdateCluster"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.Cluster"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation_async.AsyncOperation",
+ "shortName": "update_cluster"
+ },
+ "description": "Sample for UpdateCluster",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_async",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.update_cluster",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateCluster",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "UpdateCluster"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.Cluster"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation.Operation",
+ "shortName": "update_cluster"
+ },
+ "description": "Sample for UpdateCluster",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_sync",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.update_instance",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateInstance",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "UpdateInstance"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.Instance"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.Instance",
+ "shortName": "update_instance"
+ },
+ "description": "Sample for UpdateInstance",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_async",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.update_instance",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateInstance",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "UpdateInstance"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.Instance"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.Instance",
+ "shortName": "update_instance"
+ },
+ "description": "Sample for UpdateInstance",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_sync",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.update_logical_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateLogicalView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "UpdateLogicalView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest"
+ },
+ {
+ "name": "logical_view",
+ "type": "google.cloud.bigtable_admin_v2.types.LogicalView"
+ },
+ {
+ "name": "update_mask",
+ "type": "google.protobuf.field_mask_pb2.FieldMask"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation_async.AsyncOperation",
+ "shortName": "update_logical_view"
+ },
+ "description": "Sample for UpdateLogicalView",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_async",
+ "segments": [
+ {
+ "end": 58,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 58,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 55,
+ "start": 49,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 59,
+ "start": 56,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.update_logical_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateLogicalView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "UpdateLogicalView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest"
+ },
+ {
+ "name": "logical_view",
+ "type": "google.cloud.bigtable_admin_v2.types.LogicalView"
+ },
+ {
+ "name": "update_mask",
+ "type": "google.protobuf.field_mask_pb2.FieldMask"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation.Operation",
+ "shortName": "update_logical_view"
+ },
+ "description": "Sample for UpdateLogicalView",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_sync",
+ "segments": [
+ {
+ "end": 58,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 58,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 55,
+ "start": 49,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 59,
+ "start": 56,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient",
+ "shortName": "BigtableInstanceAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.update_materialized_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateMaterializedView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "UpdateMaterializedView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest"
+ },
+ {
+ "name": "materialized_view",
+ "type": "google.cloud.bigtable_admin_v2.types.MaterializedView"
+ },
+ {
+ "name": "update_mask",
+ "type": "google.protobuf.field_mask_pb2.FieldMask"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation_async.AsyncOperation",
+ "shortName": "update_materialized_view"
+ },
+ "description": "Sample for UpdateMaterializedView",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_async",
+ "segments": [
+ {
+ "end": 58,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 58,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 55,
+ "start": 49,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 59,
+ "start": 56,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient",
+ "shortName": "BigtableInstanceAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.update_materialized_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateMaterializedView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin",
+ "shortName": "BigtableInstanceAdmin"
+ },
+ "shortName": "UpdateMaterializedView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest"
+ },
+ {
+ "name": "materialized_view",
+ "type": "google.cloud.bigtable_admin_v2.types.MaterializedView"
+ },
+ {
+ "name": "update_mask",
+ "type": "google.protobuf.field_mask_pb2.FieldMask"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation.Operation",
+ "shortName": "update_materialized_view"
+ },
+ "description": "Sample for UpdateMaterializedView",
+ "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_sync",
+ "segments": [
+ {
+ "end": 58,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 58,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 55,
+ "start": 49,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 59,
+ "start": 56,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.check_consistency",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "CheckConsistency"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "consistency_token",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse",
+ "shortName": "check_consistency"
+ },
+ "description": "Sample for CheckConsistency",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.check_consistency",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "CheckConsistency"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "consistency_token",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse",
+ "shortName": "check_consistency"
+ },
+ "description": "Sample for CheckConsistency",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.copy_backup",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "CopyBackup"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CopyBackupRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "backup_id",
+ "type": "str"
+ },
+ {
+ "name": "source_backup",
+ "type": "str"
+ },
+ {
+ "name": "expire_time",
+ "type": "google.protobuf.timestamp_pb2.Timestamp"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation_async.AsyncOperation",
+ "shortName": "copy_backup"
+ },
+ "description": "Sample for CopyBackup",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_async",
+ "segments": [
+ {
+ "end": 57,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 57,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 54,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 58,
+ "start": 55,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.copy_backup",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "CopyBackup"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CopyBackupRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "backup_id",
+ "type": "str"
+ },
+ {
+ "name": "source_backup",
+ "type": "str"
+ },
+ {
+ "name": "expire_time",
+ "type": "google.protobuf.timestamp_pb2.Timestamp"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation.Operation",
+ "shortName": "copy_backup"
+ },
+ "description": "Sample for CopyBackup",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_sync",
+ "segments": [
+ {
+ "end": 57,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 57,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 54,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 58,
+ "start": 55,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.create_authorized_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateAuthorizedView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "CreateAuthorizedView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "authorized_view",
+ "type": "google.cloud.bigtable_admin_v2.types.AuthorizedView"
+ },
+ {
+ "name": "authorized_view_id",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation_async.AsyncOperation",
+ "shortName": "create_authorized_view"
+ },
+ "description": "Sample for CreateAuthorizedView",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_async",
+ "segments": [
+ {
+ "end": 56,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 56,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 53,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 57,
+ "start": 54,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.create_authorized_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateAuthorizedView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "CreateAuthorizedView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "authorized_view",
+ "type": "google.cloud.bigtable_admin_v2.types.AuthorizedView"
+ },
+ {
+ "name": "authorized_view_id",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation.Operation",
+ "shortName": "create_authorized_view"
+ },
+ "description": "Sample for CreateAuthorizedView",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_sync",
+ "segments": [
+ {
+ "end": 56,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 56,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 53,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 57,
+ "start": 54,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.create_backup",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "CreateBackup"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CreateBackupRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "backup_id",
+ "type": "str"
+ },
+ {
+ "name": "backup",
+ "type": "google.cloud.bigtable_admin_v2.types.Backup"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation_async.AsyncOperation",
+ "shortName": "create_backup"
+ },
+ "description": "Sample for CreateBackup",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_async",
+ "segments": [
+ {
+ "end": 60,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 60,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 57,
+ "start": 51,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 61,
+ "start": 58,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.create_backup",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "CreateBackup"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CreateBackupRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "backup_id",
+ "type": "str"
+ },
+ {
+ "name": "backup",
+ "type": "google.cloud.bigtable_admin_v2.types.Backup"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation.Operation",
+ "shortName": "create_backup"
+ },
+ "description": "Sample for CreateBackup",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_sync",
+ "segments": [
+ {
+ "end": 60,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 60,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 57,
+ "start": 51,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 61,
+ "start": 58,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.create_schema_bundle",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "CreateSchemaBundle"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CreateSchemaBundleRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "schema_bundle_id",
+ "type": "str"
+ },
+ {
+ "name": "schema_bundle",
+ "type": "google.cloud.bigtable_admin_v2.types.SchemaBundle"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation_async.AsyncOperation",
+ "shortName": "create_schema_bundle"
+ },
+ "description": "Sample for CreateSchemaBundle",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_async",
+ "segments": [
+ {
+ "end": 60,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 60,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 57,
+ "start": 51,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 61,
+ "start": 58,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.create_schema_bundle",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "CreateSchemaBundle"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CreateSchemaBundleRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "schema_bundle_id",
+ "type": "str"
+ },
+ {
+ "name": "schema_bundle",
+ "type": "google.cloud.bigtable_admin_v2.types.SchemaBundle"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation.Operation",
+ "shortName": "create_schema_bundle"
+ },
+ "description": "Sample for CreateSchemaBundle",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_sync",
+ "segments": [
+ {
+ "end": 60,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 60,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 57,
+ "start": 51,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 61,
+ "start": 58,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.create_table_from_snapshot",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "CreateTableFromSnapshot"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "table_id",
+ "type": "str"
+ },
+ {
+ "name": "source_snapshot",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation_async.AsyncOperation",
+ "shortName": "create_table_from_snapshot"
+ },
+ "description": "Sample for CreateTableFromSnapshot",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_async",
+ "segments": [
+ {
+ "end": 57,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 57,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 54,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 58,
+ "start": 55,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.create_table_from_snapshot",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "CreateTableFromSnapshot"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "table_id",
+ "type": "str"
+ },
+ {
+ "name": "source_snapshot",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation.Operation",
+ "shortName": "create_table_from_snapshot"
+ },
+ "description": "Sample for CreateTableFromSnapshot",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_sync",
+ "segments": [
+ {
+ "end": 57,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 57,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 54,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 58,
+ "start": 55,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.create_table",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateTable",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "CreateTable"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CreateTableRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "table_id",
+ "type": "str"
+ },
+ {
+ "name": "table",
+ "type": "google.cloud.bigtable_admin_v2.types.Table"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.Table",
+ "shortName": "create_table"
+ },
+ "description": "Sample for CreateTable",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.create_table",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateTable",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "CreateTable"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.CreateTableRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "table_id",
+ "type": "str"
+ },
+ {
+ "name": "table",
+ "type": "google.cloud.bigtable_admin_v2.types.Table"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.Table",
+ "shortName": "create_table"
+ },
+ "description": "Sample for CreateTable",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.delete_authorized_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "DeleteAuthorizedView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DeleteAuthorizedViewRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "delete_authorized_view"
+ },
+ "description": "Sample for DeleteAuthorizedView",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.delete_authorized_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "DeleteAuthorizedView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DeleteAuthorizedViewRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "delete_authorized_view"
+ },
+ "description": "Sample for DeleteAuthorizedView",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.delete_backup",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "DeleteBackup"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DeleteBackupRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "delete_backup"
+ },
+ "description": "Sample for DeleteBackup",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.delete_backup",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "DeleteBackup"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DeleteBackupRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "delete_backup"
+ },
+ "description": "Sample for DeleteBackup",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.delete_schema_bundle",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "DeleteSchemaBundle"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DeleteSchemaBundleRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "delete_schema_bundle"
+ },
+ "description": "Sample for DeleteSchemaBundle",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.delete_schema_bundle",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "DeleteSchemaBundle"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DeleteSchemaBundleRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "delete_schema_bundle"
+ },
+ "description": "Sample for DeleteSchemaBundle",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.delete_snapshot",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "DeleteSnapshot"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "delete_snapshot"
+ },
+ "description": "Sample for DeleteSnapshot",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.delete_snapshot",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "DeleteSnapshot"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "delete_snapshot"
+ },
+ "description": "Sample for DeleteSnapshot",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.delete_table",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "DeleteTable"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DeleteTableRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "delete_table"
+ },
+ "description": "Sample for DeleteTable",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.delete_table",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "DeleteTable"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DeleteTableRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "delete_table"
+ },
+ "description": "Sample for DeleteTable",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.drop_row_range",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "DropRowRange"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DropRowRangeRequest"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "drop_row_range"
+ },
+ "description": "Sample for DropRowRange",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_async",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.drop_row_range",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "DropRowRange"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.DropRowRangeRequest"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "shortName": "drop_row_range"
+ },
+ "description": "Sample for DropRowRange",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_sync",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.generate_consistency_token",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "GenerateConsistencyToken"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse",
+ "shortName": "generate_consistency_token"
+ },
+ "description": "Sample for GenerateConsistencyToken",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_async",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.generate_consistency_token",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "GenerateConsistencyToken"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse",
+ "shortName": "generate_consistency_token"
+ },
+ "description": "Sample for GenerateConsistencyToken",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_sync",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_authorized_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "GetAuthorizedView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GetAuthorizedViewRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.AuthorizedView",
+ "shortName": "get_authorized_view"
+ },
+ "description": "Sample for GetAuthorizedView",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_async",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_authorized_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "GetAuthorizedView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GetAuthorizedViewRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.AuthorizedView",
+ "shortName": "get_authorized_view"
+ },
+ "description": "Sample for GetAuthorizedView",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_sync",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_backup",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetBackup",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "GetBackup"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GetBackupRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.Backup",
+ "shortName": "get_backup"
+ },
+ "description": "Sample for GetBackup",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_async",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_backup",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetBackup",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "GetBackup"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GetBackupRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.Backup",
+ "shortName": "get_backup"
+ },
+ "description": "Sample for GetBackup",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_sync",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_iam_policy",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "GetIamPolicy"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.iam.v1.iam_policy_pb2.GetIamPolicyRequest"
+ },
+ {
+ "name": "resource",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.iam.v1.policy_pb2.Policy",
+ "shortName": "get_iam_policy"
+ },
+ "description": "Sample for GetIamPolicy",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_iam_policy",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "GetIamPolicy"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.iam.v1.iam_policy_pb2.GetIamPolicyRequest"
+ },
+ {
+ "name": "resource",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.iam.v1.policy_pb2.Policy",
+ "shortName": "get_iam_policy"
+ },
+ "description": "Sample for GetIamPolicy",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_schema_bundle",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "GetSchemaBundle"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GetSchemaBundleRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.SchemaBundle",
+ "shortName": "get_schema_bundle"
+ },
+ "description": "Sample for GetSchemaBundle",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_async",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_schema_bundle",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "GetSchemaBundle"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GetSchemaBundleRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.SchemaBundle",
+ "shortName": "get_schema_bundle"
+ },
+ "description": "Sample for GetSchemaBundle",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_sync",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_snapshot",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "GetSnapshot"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GetSnapshotRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.Snapshot",
+ "shortName": "get_snapshot"
+ },
+ "description": "Sample for GetSnapshot",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_async",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_snapshot",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "GetSnapshot"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GetSnapshotRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.Snapshot",
+ "shortName": "get_snapshot"
+ },
+ "description": "Sample for GetSnapshot",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_sync",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_table",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetTable",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "GetTable"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GetTableRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.Table",
+ "shortName": "get_table"
+ },
+ "description": "Sample for GetTable",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_async",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_table",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetTable",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "GetTable"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.GetTableRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.Table",
+ "shortName": "get_table"
+ },
+ "description": "Sample for GetTable",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_sync",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.list_authorized_views",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "ListAuthorizedViews"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListAuthorizedViewsAsyncPager",
+ "shortName": "list_authorized_views"
+ },
+ "description": "Sample for ListAuthorizedViews",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.list_authorized_views",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "ListAuthorizedViews"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListAuthorizedViewsPager",
+ "shortName": "list_authorized_views"
+ },
+ "description": "Sample for ListAuthorizedViews",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.list_backups",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListBackups",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "ListBackups"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListBackupsRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsAsyncPager",
+ "shortName": "list_backups"
+ },
+ "description": "Sample for ListBackups",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.list_backups",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListBackups",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "ListBackups"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListBackupsRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsPager",
+ "shortName": "list_backups"
+ },
+ "description": "Sample for ListBackups",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.list_schema_bundles",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "ListSchemaBundles"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSchemaBundlesAsyncPager",
+ "shortName": "list_schema_bundles"
+ },
+ "description": "Sample for ListSchemaBundles",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.list_schema_bundles",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "ListSchemaBundles"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSchemaBundlesPager",
+ "shortName": "list_schema_bundles"
+ },
+ "description": "Sample for ListSchemaBundles",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.list_snapshots",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "ListSnapshots"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsAsyncPager",
+ "shortName": "list_snapshots"
+ },
+ "description": "Sample for ListSnapshots",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.list_snapshots",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "ListSnapshots"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsPager",
+ "shortName": "list_snapshots"
+ },
+ "description": "Sample for ListSnapshots",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.list_tables",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListTables",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "ListTables"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListTablesRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesAsyncPager",
+ "shortName": "list_tables"
+ },
+ "description": "Sample for ListTables",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.list_tables",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListTables",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "ListTables"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ListTablesRequest"
+ },
+ {
+ "name": "parent",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesPager",
+ "shortName": "list_tables"
+ },
+ "description": "Sample for ListTables",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.modify_column_families",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "ModifyColumnFamilies"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "modifications",
+ "type": "MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.Table",
+ "shortName": "modify_column_families"
+ },
+ "description": "Sample for ModifyColumnFamilies",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_async",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.modify_column_families",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "ModifyColumnFamilies"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "modifications",
+ "type": "MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.Table",
+ "shortName": "modify_column_families"
+ },
+ "description": "Sample for ModifyColumnFamilies",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_sync",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient._restore_table",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "RestoreTable"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.RestoreTableRequest"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation_async.AsyncOperation",
+ "shortName": "_restore_table"
+ },
+ "description": "Sample for RestoreTable",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_async_internal",
+ "segments": [
+ {
+ "end": 57,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 57,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 54,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 58,
+ "start": 55,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient._restore_table",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "RestoreTable"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.RestoreTableRequest"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation.Operation",
+ "shortName": "_restore_table"
+ },
+ "description": "Sample for RestoreTable",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_sync_internal",
+ "segments": [
+ {
+ "end": 57,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 57,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 54,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 58,
+ "start": 55,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.set_iam_policy",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "SetIamPolicy"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.iam.v1.iam_policy_pb2.SetIamPolicyRequest"
+ },
+ {
+ "name": "resource",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.iam.v1.policy_pb2.Policy",
+ "shortName": "set_iam_policy"
+ },
+ "description": "Sample for SetIamPolicy",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.set_iam_policy",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "SetIamPolicy"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.iam.v1.iam_policy_pb2.SetIamPolicyRequest"
+ },
+ {
+ "name": "resource",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.iam.v1.policy_pb2.Policy",
+ "shortName": "set_iam_policy"
+ },
+ "description": "Sample for SetIamPolicy",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.snapshot_table",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "SnapshotTable"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.SnapshotTableRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "cluster",
+ "type": "str"
+ },
+ {
+ "name": "snapshot_id",
+ "type": "str"
+ },
+ {
+ "name": "description",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation_async.AsyncOperation",
+ "shortName": "snapshot_table"
+ },
+ "description": "Sample for SnapshotTable",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_async",
+ "segments": [
+ {
+ "end": 57,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 57,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 54,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 58,
+ "start": 55,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.snapshot_table",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "SnapshotTable"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.SnapshotTableRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "cluster",
+ "type": "str"
+ },
+ {
+ "name": "snapshot_id",
+ "type": "str"
+ },
+ {
+ "name": "description",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation.Operation",
+ "shortName": "snapshot_table"
+ },
+ "description": "Sample for SnapshotTable",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_sync",
+ "segments": [
+ {
+ "end": 57,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 57,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 54,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 58,
+ "start": 55,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.test_iam_permissions",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "TestIamPermissions"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest"
+ },
+ {
+ "name": "resource",
+ "type": "str"
+ },
+ {
+ "name": "permissions",
+ "type": "MutableSequence[str]"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse",
+ "shortName": "test_iam_permissions"
+ },
+ "description": "Sample for TestIamPermissions",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_async",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 42,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.test_iam_permissions",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "TestIamPermissions"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest"
+ },
+ {
+ "name": "resource",
+ "type": "str"
+ },
+ {
+ "name": "permissions",
+ "type": "MutableSequence[str]"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse",
+ "shortName": "test_iam_permissions"
+ },
+ "description": "Sample for TestIamPermissions",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_sync",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 42,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.undelete_table",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "UndeleteTable"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.UndeleteTableRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation_async.AsyncOperation",
+ "shortName": "undelete_table"
+ },
+ "description": "Sample for UndeleteTable",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_async",
+ "segments": [
+ {
+ "end": 55,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 55,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 52,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 56,
+ "start": 53,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.undelete_table",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "UndeleteTable"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.UndeleteTableRequest"
+ },
+ {
+ "name": "name",
+ "type": "str"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation.Operation",
+ "shortName": "undelete_table"
+ },
+ "description": "Sample for UndeleteTable",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_sync",
+ "segments": [
+ {
+ "end": 55,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 55,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 52,
+ "start": 46,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 56,
+ "start": 53,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.update_authorized_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "UpdateAuthorizedView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.UpdateAuthorizedViewRequest"
+ },
+ {
+ "name": "authorized_view",
+ "type": "google.cloud.bigtable_admin_v2.types.AuthorizedView"
+ },
+ {
+ "name": "update_mask",
+ "type": "google.protobuf.field_mask_pb2.FieldMask"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation_async.AsyncOperation",
+ "shortName": "update_authorized_view"
+ },
+ "description": "Sample for UpdateAuthorizedView",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_async",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.update_authorized_view",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "UpdateAuthorizedView"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.UpdateAuthorizedViewRequest"
+ },
+ {
+ "name": "authorized_view",
+ "type": "google.cloud.bigtable_admin_v2.types.AuthorizedView"
+ },
+ {
+ "name": "update_mask",
+ "type": "google.protobuf.field_mask_pb2.FieldMask"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation.Operation",
+ "shortName": "update_authorized_view"
+ },
+ "description": "Sample for UpdateAuthorizedView",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_sync",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.update_backup",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "UpdateBackup"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.UpdateBackupRequest"
+ },
+ {
+ "name": "backup",
+ "type": "google.cloud.bigtable_admin_v2.types.Backup"
+ },
+ {
+ "name": "update_mask",
+ "type": "google.protobuf.field_mask_pb2.FieldMask"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.Backup",
+ "shortName": "update_backup"
+ },
+ "description": "Sample for UpdateBackup",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_async",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 49,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.update_backup",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "UpdateBackup"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.UpdateBackupRequest"
+ },
+ {
+ "name": "backup",
+ "type": "google.cloud.bigtable_admin_v2.types.Backup"
+ },
+ {
+ "name": "update_mask",
+ "type": "google.protobuf.field_mask_pb2.FieldMask"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.cloud.bigtable_admin_v2.types.Backup",
+ "shortName": "update_backup"
+ },
+ "description": "Sample for UpdateBackup",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_sync",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 49,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.update_schema_bundle",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "UpdateSchemaBundle"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.UpdateSchemaBundleRequest"
+ },
+ {
+ "name": "schema_bundle",
+ "type": "google.cloud.bigtable_admin_v2.types.SchemaBundle"
+ },
+ {
+ "name": "update_mask",
+ "type": "google.protobuf.field_mask_pb2.FieldMask"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation_async.AsyncOperation",
+ "shortName": "update_schema_bundle"
+ },
+ "description": "Sample for UpdateSchemaBundle",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_async",
+ "segments": [
+ {
+ "end": 58,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 58,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 55,
+ "start": 49,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 59,
+ "start": 56,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.update_schema_bundle",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "UpdateSchemaBundle"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.UpdateSchemaBundleRequest"
+ },
+ {
+ "name": "schema_bundle",
+ "type": "google.cloud.bigtable_admin_v2.types.SchemaBundle"
+ },
+ {
+ "name": "update_mask",
+ "type": "google.protobuf.field_mask_pb2.FieldMask"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation.Operation",
+ "shortName": "update_schema_bundle"
+ },
+ "description": "Sample for UpdateSchemaBundle",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_sync",
+ "segments": [
+ {
+ "end": 58,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 58,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 55,
+ "start": 49,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 59,
+ "start": 56,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "async": true,
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient",
+ "shortName": "BaseBigtableTableAdminAsyncClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.update_table",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "UpdateTable"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.UpdateTableRequest"
+ },
+ {
+ "name": "table",
+ "type": "google.cloud.bigtable_admin_v2.types.Table"
+ },
+ {
+ "name": "update_mask",
+ "type": "google.protobuf.field_mask_pb2.FieldMask"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation_async.AsyncOperation",
+ "shortName": "update_table"
+ },
+ "description": "Sample for UpdateTable",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_async",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py"
+ },
+ {
+ "canonical": true,
+ "clientMethod": {
+ "client": {
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient",
+ "shortName": "BaseBigtableTableAdminClient"
+ },
+ "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.update_table",
+ "method": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable",
+ "service": {
+ "fullName": "google.bigtable.admin.v2.BigtableTableAdmin",
+ "shortName": "BigtableTableAdmin"
+ },
+ "shortName": "UpdateTable"
+ },
+ "parameters": [
+ {
+ "name": "request",
+ "type": "google.cloud.bigtable_admin_v2.types.UpdateTableRequest"
+ },
+ {
+ "name": "table",
+ "type": "google.cloud.bigtable_admin_v2.types.Table"
+ },
+ {
+ "name": "update_mask",
+ "type": "google.protobuf.field_mask_pb2.FieldMask"
+ },
+ {
+ "name": "retry",
+ "type": "google.api_core.retry.Retry"
+ },
+ {
+ "name": "timeout",
+ "type": "float"
+ },
+ {
+ "name": "metadata",
+ "type": "Sequence[Tuple[str, Union[str, bytes]]]"
+ }
+ ],
+ "resultType": "google.api_core.operation.Operation",
+ "shortName": "update_table"
+ },
+ "description": "Sample for UpdateTable",
+ "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py",
+ "language": "PYTHON",
+ "origin": "API_DEFINITION",
+ "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_sync",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ],
+ "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py"
+ }
+ ]
+}
diff --git a/samples/hello/async_main.py b/samples/hello/async_main.py
index 34159bedb..e134e28d0 100644
--- a/samples/hello/async_main.py
+++ b/samples/hello/async_main.py
@@ -57,7 +57,7 @@ async def main(project_id, instance_id, table_id):
# Create a column family with GC policy : most recent N versions
# Define the GC policy to retain only the most recent 2 versions
max_versions_rule = column_family.MaxVersionsGCRule(2)
- column_family_id = "cf1"
+ column_family_id = b"cf1"
column_families = {column_family_id: max_versions_rule}
if not admin_table.exists():
admin_table.create(column_families=column_families)
@@ -70,9 +70,9 @@ async def main(project_id, instance_id, table_id):
wait_for_table(admin_table)
# [START bigtable_async_hw_write_rows]
print("Writing some greetings to the table.")
- greetings = ["Hello World!", "Hello Cloud Bigtable!", "Hello Python!"]
+ greetings = [b"Hello World!", b"Hello Cloud Bigtable!", b"Hello Python!"]
mutations = []
- column = "greeting"
+ column = b"greeting"
for i, value in enumerate(greetings):
# Note: This example uses sequential numeric IDs for simplicity,
# but this can result in poor performance in a production
@@ -80,11 +80,14 @@ async def main(project_id, instance_id, table_id):
# sequential keys can result in poor distribution of operations
# across nodes.
#
+ # We recommend that you use bytestrings directly for row keys
+ # where possible, rather than encoding strings.
+ #
# For more information about how to design a Bigtable schema for
# the best performance, see the documentation:
#
# https://cloud.google.com/bigtable/docs/schema-design
- row_key = "greeting{}".format(i).encode()
+ row_key = f"greeting{i}".encode()
row_mutation = bigtable.data.RowMutationEntry(
row_key, bigtable.data.SetCell(column_family_id, column, value)
)
diff --git a/samples/hello/main.py b/samples/hello/main.py
index 41124e826..2c0d83f98 100644
--- a/samples/hello/main.py
+++ b/samples/hello/main.py
@@ -28,7 +28,7 @@
from ..utils import wait_for_table
# [START bigtable_hw_imports]
-import datetime
+from datetime import datetime, timezone
from google.cloud import bigtable
from google.cloud.bigtable import column_family
@@ -57,7 +57,7 @@ def main(project_id, instance_id, table_id):
# Create a column family with GC policy : most recent N versions
# Define the GC policy to retain only the most recent 2 versions
max_versions_rule = bigtable.column_family.MaxVersionsGCRule(2)
- column_family_id = "cf1"
+ column_family_id = b"cf1"
column_families = {column_family_id: max_versions_rule}
if not table.exists():
table.create(column_families=column_families)
@@ -71,9 +71,9 @@ def main(project_id, instance_id, table_id):
# [START bigtable_hw_write_rows]
print("Writing some greetings to the table.")
- greetings = ["Hello World!", "Hello Cloud Bigtable!", "Hello Python!"]
+ greetings = [b"Hello World!", b"Hello Cloud Bigtable!", b"Hello Python!"]
rows = []
- column = "greeting".encode()
+ column = b"greeting"
for i, value in enumerate(greetings):
# Note: This example uses sequential numeric IDs for simplicity,
# but this can result in poor performance in a production
@@ -81,14 +81,17 @@ def main(project_id, instance_id, table_id):
# sequential keys can result in poor distribution of operations
# across nodes.
#
+ # We recommend that you use bytestrings directly for row keys
+ # where possible, rather than encoding strings.
+ #
# For more information about how to design a Bigtable schema for
# the best performance, see the documentation:
#
# https://cloud.google.com/bigtable/docs/schema-design
- row_key = "greeting{}".format(i).encode()
+ row_key = f"greeting{i}".encode()
row = table.direct_row(row_key)
row.set_cell(
- column_family_id, column, value, timestamp=datetime.datetime.utcnow()
+ column_family_id, column, value, timestamp=datetime.now(timezone.utc),
)
rows.append(row)
table.mutate_rows(rows)
@@ -103,10 +106,10 @@ def main(project_id, instance_id, table_id):
# [START bigtable_hw_get_with_filter]
# [START bigtable_hw_get_by_key]
print("Getting a single greeting by row key.")
- key = "greeting0".encode()
+ key = b"greeting0"
row = table.read_row(key, row_filter)
- cell = row.cells[column_family_id][column][0]
+ cell = row.cells[column_family_id.decode("utf-8")][column][0]
print(cell.value.decode("utf-8"))
# [END bigtable_hw_get_by_key]
# [END bigtable_hw_get_with_filter]
@@ -117,7 +120,8 @@ def main(project_id, instance_id, table_id):
partial_rows = table.read_rows(filter_=row_filter)
for row in partial_rows:
- cell = row.cells[column_family_id][column][0]
+ column_family_id_str = column_family_id.decode("utf-8")
+ cell = row.cells[column_family_id_str][column][0]
print(cell.value.decode("utf-8"))
# [END bigtable_hw_scan_all]
# [END bigtable_hw_scan_with_filter]
diff --git a/samples/hello/requirements.txt b/samples/hello/requirements.txt
index 55d3a1ddd..5113ca7f1 100644
--- a/samples/hello/requirements.txt
+++ b/samples/hello/requirements.txt
@@ -1,2 +1,2 @@
-google-cloud-bigtable==2.30.1
-google-cloud-core==2.4.3
+google-cloud-bigtable==2.35.0
+google-cloud-core==2.5.0
diff --git a/samples/instanceadmin/requirements.txt b/samples/instanceadmin/requirements.txt
index a2922fe6e..67a1ea5b8 100644
--- a/samples/instanceadmin/requirements.txt
+++ b/samples/instanceadmin/requirements.txt
@@ -1,2 +1,2 @@
-google-cloud-bigtable==2.30.1
+google-cloud-bigtable==2.35.0
backoff==2.2.1
diff --git a/samples/metricscaler/requirements.txt b/samples/metricscaler/requirements.txt
index 522c28ae2..257fd1ef6 100644
--- a/samples/metricscaler/requirements.txt
+++ b/samples/metricscaler/requirements.txt
@@ -1,2 +1,2 @@
-google-cloud-bigtable==2.30.1
-google-cloud-monitoring==2.27.1
+google-cloud-bigtable==2.35.0
+google-cloud-monitoring==2.29.0
diff --git a/samples/quickstart/requirements.txt b/samples/quickstart/requirements.txt
index 807132c7e..730d25dec 100644
--- a/samples/quickstart/requirements.txt
+++ b/samples/quickstart/requirements.txt
@@ -1 +1 @@
-google-cloud-bigtable==2.30.1
+google-cloud-bigtable==2.35.0
diff --git a/samples/snippets/data_client/data_client_snippets_async.py b/samples/snippets/data_client/data_client_snippets_async.py
index dabbcb839..332dbd56f 100644
--- a/samples/snippets/data_client/data_client_snippets_async.py
+++ b/samples/snippets/data_client/data_client_snippets_async.py
@@ -136,6 +136,47 @@ async def write_conditional(project_id, instance_id, table_id):
await write_conditional(table.client.project, table.instance_id, table.table_id)
+async def write_aggregate(table):
+ # [START bigtable_async_write_aggregate]
+ import time
+ from google.cloud.bigtable.data import BigtableDataClientAsync
+ from google.cloud.bigtable.data.mutations import AddToCell, RowMutationEntry
+ from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup
+
+ async def write_aggregate(project_id, instance_id, table_id):
+ """Increments a value in a Bigtable table using AddToCell mutation."""
+ async with BigtableDataClientAsync(project=project_id) as client:
+ table = client.get_table(instance_id, table_id)
+ row_key = "unique_device_ids_1"
+ try:
+ async with table.mutations_batcher() as batcher:
+ # The AddToCell mutation increments the value of a cell.
+ # The `counters` family must be set up to be an aggregate
+ # family with an int64 input type.
+ reading = AddToCell(
+ family="counters",
+ qualifier="odometer",
+ value=32304,
+ # Convert nanoseconds to microseconds
+ timestamp_micros=time.time_ns() // 1000,
+ )
+ await batcher.append(
+ RowMutationEntry(row_key.encode("utf-8"), [reading])
+ )
+ except MutationsExceptionGroup as e:
+ # MutationsExceptionGroup contains a FailedMutationEntryError for
+ # each mutation that failed.
+ for sub_exception in e.exceptions:
+ failed_entry: RowMutationEntry = sub_exception.entry
+ cause: Exception = sub_exception.__cause__
+ print(
+ f"Failed mutation for row {failed_entry.row_key!r} with error: {cause!r}"
+ )
+
+ # [END bigtable_async_write_aggregate]
+ await write_aggregate(table.client.project, table.instance_id, table.table_id)
+
+
async def read_row(table):
# [START bigtable_async_reads_row]
from google.cloud.bigtable.data import BigtableDataClientAsync
diff --git a/samples/snippets/data_client/data_client_snippets_async_test.py b/samples/snippets/data_client/data_client_snippets_async_test.py
index 8dfff50d1..2761bd487 100644
--- a/samples/snippets/data_client/data_client_snippets_async_test.py
+++ b/samples/snippets/data_client/data_client_snippets_async_test.py
@@ -25,8 +25,26 @@
@pytest.fixture(scope="session")
-def table_id():
- with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, {"family": None, "stats_summary": None}):
+def column_family_config():
+ from google.cloud.bigtable_admin_v2 import types
+
+ int_aggregate_type = types.Type.Aggregate(
+ input_type=types.Type(int64_type={"encoding": {"big_endian_bytes": {}}}),
+ sum={},
+ )
+
+ return {
+ "family": types.ColumnFamily(),
+ "stats_summary": types.ColumnFamily(),
+ "counters": types.ColumnFamily(
+ value_type=types.Type(aggregate_type=int_aggregate_type)
+ ),
+ }
+
+
+@pytest.fixture(scope="session")
+def table_id(column_family_config):
+ with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, column_family_config):
yield TABLE_ID
@@ -59,6 +77,11 @@ async def test_write_conditional(table):
await data_snippets.write_conditional(table)
+@pytest.mark.asyncio
+async def test_write_aggregate(table):
+ await data_snippets.write_aggregate(table)
+
+
@pytest.mark.asyncio
async def test_read_row(table):
await data_snippets.read_row(table)
diff --git a/samples/snippets/data_client/requirements.txt b/samples/snippets/data_client/requirements.txt
index 807132c7e..730d25dec 100644
--- a/samples/snippets/data_client/requirements.txt
+++ b/samples/snippets/data_client/requirements.txt
@@ -1 +1 @@
-google-cloud-bigtable==2.30.1
+google-cloud-bigtable==2.35.0
diff --git a/samples/snippets/deletes/requirements.txt b/samples/snippets/deletes/requirements.txt
index 807132c7e..730d25dec 100644
--- a/samples/snippets/deletes/requirements.txt
+++ b/samples/snippets/deletes/requirements.txt
@@ -1 +1 @@
-google-cloud-bigtable==2.30.1
+google-cloud-bigtable==2.35.0
diff --git a/samples/snippets/filters/requirements.txt b/samples/snippets/filters/requirements.txt
index 807132c7e..730d25dec 100644
--- a/samples/snippets/filters/requirements.txt
+++ b/samples/snippets/filters/requirements.txt
@@ -1 +1 @@
-google-cloud-bigtable==2.30.1
+google-cloud-bigtable==2.35.0
diff --git a/samples/snippets/reads/requirements.txt b/samples/snippets/reads/requirements.txt
index 807132c7e..730d25dec 100644
--- a/samples/snippets/reads/requirements.txt
+++ b/samples/snippets/reads/requirements.txt
@@ -1 +1 @@
-google-cloud-bigtable==2.30.1
+google-cloud-bigtable==2.35.0
diff --git a/samples/snippets/writes/requirements.txt b/samples/snippets/writes/requirements.txt
index 874788bf7..54c0c14a3 100644
--- a/samples/snippets/writes/requirements.txt
+++ b/samples/snippets/writes/requirements.txt
@@ -1 +1 @@
-google-cloud-bigtable==2.30.1
\ No newline at end of file
+google-cloud-bigtable==2.35.0
\ No newline at end of file
diff --git a/samples/snippets/writes/write_batch.py b/samples/snippets/writes/write_batch.py
index 8ad4b07a5..a583bb713 100644
--- a/samples/snippets/writes/write_batch.py
+++ b/samples/snippets/writes/write_batch.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# [START bigtable_writes_batch]
-import datetime
+from datetime import datetime, timezone
from google.cloud import bigtable
from google.cloud.bigtable.batcher import MutationsBatcher
@@ -25,7 +25,7 @@ def write_batch(project_id, instance_id, table_id):
table = instance.table(table_id)
with MutationsBatcher(table=table) as batcher:
- timestamp = datetime.datetime.utcnow()
+ timestamp = datetime.now(timezone.utc)
column_family_id = "stats_summary"
rows = [
diff --git a/samples/snippets/writes/write_conditionally.py b/samples/snippets/writes/write_conditionally.py
index 7fb640aad..b6f05fba7 100644
--- a/samples/snippets/writes/write_conditionally.py
+++ b/samples/snippets/writes/write_conditionally.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# [START bigtable_writes_conditional]
-import datetime
+from datetime import datetime, timezone
from google.cloud import bigtable
from google.cloud.bigtable import row_filters
@@ -24,7 +24,7 @@ def write_conditional(project_id, instance_id, table_id):
instance = client.instance(instance_id)
table = instance.table(table_id)
- timestamp = datetime.datetime.utcnow()
+ timestamp = datetime.now(timezone.utc)
column_family_id = "stats_summary"
row_key = "phone#4c410523#20190501"
diff --git a/samples/snippets/writes/write_simple.py b/samples/snippets/writes/write_simple.py
index 1aa5a810f..fb7074bc5 100644
--- a/samples/snippets/writes/write_simple.py
+++ b/samples/snippets/writes/write_simple.py
@@ -14,7 +14,7 @@
# limitations under the License.
# [START bigtable_writes_simple]
-import datetime
+from datetime import datetime, timezone
from google.cloud import bigtable
@@ -24,7 +24,7 @@ def write_simple(project_id, instance_id, table_id):
instance = client.instance(instance_id)
table = instance.table(table_id)
- timestamp = datetime.datetime.utcnow()
+ timestamp = datetime.now(timezone.utc)
column_family_id = "stats_summary"
row_key = "phone#4c410523#20190501"
diff --git a/samples/tableadmin/requirements-test.txt b/samples/tableadmin/requirements-test.txt
index d8889022d..f01fd134c 100644
--- a/samples/tableadmin/requirements-test.txt
+++ b/samples/tableadmin/requirements-test.txt
@@ -1,2 +1,2 @@
pytest
-google-cloud-testutils==1.6.4
+google-cloud-testutils==1.7.0
diff --git a/samples/tableadmin/requirements.txt b/samples/tableadmin/requirements.txt
index 807132c7e..730d25dec 100644
--- a/samples/tableadmin/requirements.txt
+++ b/samples/tableadmin/requirements.txt
@@ -1 +1 @@
-google-cloud-bigtable==2.30.1
+google-cloud-bigtable==2.35.0
diff --git a/samples/testdata/README.md b/samples/testdata/README.md
new file mode 100644
index 000000000..57520179f
--- /dev/null
+++ b/samples/testdata/README.md
@@ -0,0 +1,5 @@
+#### To generate singer_pb2.py and descriptors.pb file from singer.proto using `protoc`
+```shell
+cd samples
+protoc --proto_path=testdata/ --include_imports --descriptor_set_out=testdata/descriptors.pb --python_out=testdata/ testdata/singer.proto
+```
\ No newline at end of file
diff --git a/samples/testdata/descriptors.pb b/samples/testdata/descriptors.pb
new file mode 100644
index 000000000..bddf04de3
Binary files /dev/null and b/samples/testdata/descriptors.pb differ
diff --git a/samples/testdata/singer.proto b/samples/testdata/singer.proto
new file mode 100644
index 000000000..d60e0dfb3
--- /dev/null
+++ b/samples/testdata/singer.proto
@@ -0,0 +1,15 @@
+syntax = "proto3";
+
+package examples.bigtable.music;
+
+enum Genre {
+ POP = 0;
+ JAZZ = 1;
+ FOLK = 2;
+ ROCK = 3;
+}
+
+message Singer {
+ string name = 1;
+ Genre genre = 2;
+}
diff --git a/samples/testdata/singer_pb2.py b/samples/testdata/singer_pb2.py
new file mode 100644
index 000000000..d2a328df0
--- /dev/null
+++ b/samples/testdata/singer_pb2.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: singer.proto
+"""Generated protocol buffer code."""
+from google.protobuf.internal import builder as _builder
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0csinger.proto\x12\x17\x65xamples.bigtable.music\"E\n\x06Singer\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x05genre\x18\x02 \x01(\x0e\x32\x1e.examples.bigtable.music.Genre*.\n\x05Genre\x12\x07\n\x03POP\x10\x00\x12\x08\n\x04JAZZ\x10\x01\x12\x08\n\x04\x46OLK\x10\x02\x12\x08\n\x04ROCK\x10\x03\x62\x06proto3')
+
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'singer_pb2', globals())
+if _descriptor._USE_C_DESCRIPTORS == False:
+
+ DESCRIPTOR._options = None
+ _GENRE._serialized_start=112
+ _GENRE._serialized_end=158
+ _SINGER._serialized_start=41
+ _SINGER._serialized_end=110
+# @@protoc_insertion_point(module_scope)
diff --git a/samples/utils.py b/samples/utils.py
index eb0ca68f9..f796aaedb 100644
--- a/samples/utils.py
+++ b/samples/utils.py
@@ -16,6 +16,8 @@
from google.cloud import bigtable
+from google.cloud.bigtable.column_family import ColumnFamily
+from google.cloud.bigtable_admin_v2.types import ColumnFamily as ColumnFamily_pb
from google.api_core import exceptions
from google.api_core.retry import Retry
from google.api_core.retry import if_exception_type
@@ -59,10 +61,20 @@ def create_table(project, instance_id, table_id, column_families={}):
if table.exists():
table.delete()
- kwargs = {}
- if column_families:
- kwargs["column_families"] = column_families
- table.create(**kwargs)
+ # convert column families to pb if needed
+ pb_families = {
+ id: ColumnFamily(id, table, rule).to_pb() if not isinstance(rule, ColumnFamily_pb) else rule
+ for (id, rule) in column_families.items()
+ }
+
+ # create table using gapic layer
+ instance._client.table_admin_client.create_table(
+ request={
+ "parent": instance.name,
+ "table_id": table_id,
+ "table": {"column_families": pb_families},
+ }
+ )
wait_for_table(table)
diff --git a/scripts/fixup_bigtable_admin_v2_keywords.py b/scripts/fixup_admin_v2_keywords.py
similarity index 98%
rename from scripts/fixup_bigtable_admin_v2_keywords.py
rename to scripts/fixup_admin_v2_keywords.py
index 352e63a93..d287df24f 100644
--- a/scripts/fixup_bigtable_admin_v2_keywords.py
+++ b/scripts/fixup_admin_v2_keywords.py
@@ -36,7 +36,7 @@ def partition(
return results[1], results[0]
-class bigtable_adminCallTransformer(cst.CSTTransformer):
+class adminCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'check_consistency': ('name', 'consistency_token', 'standard_read_remote_writes', 'data_boost_read_local_writes', ),
@@ -145,7 +145,7 @@ def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
- transformer=bigtable_adminCallTransformer(),
+ transformer=adminCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
@@ -178,7 +178,7 @@ def fix_files(
if __name__ == '__main__':
parser = argparse.ArgumentParser(
- description="""Fix up source that uses the bigtable_admin client library.
+ description="""Fix up source that uses the admin client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
diff --git a/scripts/fixup_bigtable_v2_keywords.py b/scripts/fixup_bigtable_v2_keywords.py
deleted file mode 100644
index 70e0795e2..000000000
--- a/scripts/fixup_bigtable_v2_keywords.py
+++ /dev/null
@@ -1,186 +0,0 @@
-#! /usr/bin/env python3
-# -*- coding: utf-8 -*-
-# Copyright 2025 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import argparse
-import os
-import libcst as cst
-import pathlib
-import sys
-from typing import (Any, Callable, Dict, List, Sequence, Tuple)
-
-
-def partition(
- predicate: Callable[[Any], bool],
- iterator: Sequence[Any]
-) -> Tuple[List[Any], List[Any]]:
- """A stable, out-of-place partition."""
- results = ([], [])
-
- for i in iterator:
- results[int(predicate(i))].append(i)
-
- # Returns trueList, falseList
- return results[1], results[0]
-
-
-class bigtableCallTransformer(cst.CSTTransformer):
- CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
- METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
- 'check_and_mutate_row': ('row_key', 'table_name', 'authorized_view_name', 'app_profile_id', 'predicate_filter', 'true_mutations', 'false_mutations', ),
- 'execute_query': ('instance_name', 'query', 'params', 'app_profile_id', 'prepared_query', 'proto_format', 'resume_token', ),
- 'generate_initial_change_stream_partitions': ('table_name', 'app_profile_id', ),
- 'mutate_row': ('row_key', 'mutations', 'table_name', 'authorized_view_name', 'app_profile_id', ),
- 'mutate_rows': ('entries', 'table_name', 'authorized_view_name', 'app_profile_id', ),
- 'ping_and_warm': ('name', 'app_profile_id', ),
- 'prepare_query': ('instance_name', 'query', 'param_types', 'app_profile_id', 'proto_format', ),
- 'read_change_stream': ('table_name', 'app_profile_id', 'partition', 'start_time', 'continuation_tokens', 'end_time', 'heartbeat_duration', ),
- 'read_modify_write_row': ('row_key', 'rules', 'table_name', 'authorized_view_name', 'app_profile_id', ),
- 'read_rows': ('table_name', 'authorized_view_name', 'materialized_view_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', 'request_stats_view', 'reversed', ),
- 'sample_row_keys': ('table_name', 'authorized_view_name', 'materialized_view_name', 'app_profile_id', ),
- }
-
- def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
- try:
- key = original.func.attr.value
- kword_params = self.METHOD_TO_PARAMS[key]
- except (AttributeError, KeyError):
- # Either not a method from the API or too convoluted to be sure.
- return updated
-
- # If the existing code is valid, keyword args come after positional args.
- # Therefore, all positional args must map to the first parameters.
- args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
- if any(k.keyword.value == "request" for k in kwargs):
- # We've already fixed this file, don't fix it again.
- return updated
-
- kwargs, ctrl_kwargs = partition(
- lambda a: a.keyword.value not in self.CTRL_PARAMS,
- kwargs
- )
-
- args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
- ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
- for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
-
- request_arg = cst.Arg(
- value=cst.Dict([
- cst.DictElement(
- cst.SimpleString("'{}'".format(name)),
-cst.Element(value=arg.value)
- )
- # Note: the args + kwargs looks silly, but keep in mind that
- # the control parameters had to be stripped out, and that
- # those could have been passed positionally or by keyword.
- for name, arg in zip(kword_params, args + kwargs)]),
- keyword=cst.Name("request")
- )
-
- return updated.with_changes(
- args=[request_arg] + ctrl_kwargs
- )
-
-
-def fix_files(
- in_dir: pathlib.Path,
- out_dir: pathlib.Path,
- *,
- transformer=bigtableCallTransformer(),
-):
- """Duplicate the input dir to the output dir, fixing file method calls.
-
- Preconditions:
- * in_dir is a real directory
- * out_dir is a real, empty directory
- """
- pyfile_gen = (
- pathlib.Path(os.path.join(root, f))
- for root, _, files in os.walk(in_dir)
- for f in files if os.path.splitext(f)[1] == ".py"
- )
-
- for fpath in pyfile_gen:
- with open(fpath, 'r') as f:
- src = f.read()
-
- # Parse the code and insert method call fixes.
- tree = cst.parse_module(src)
- updated = tree.visit(transformer)
-
- # Create the path and directory structure for the new file.
- updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
- updated_path.parent.mkdir(parents=True, exist_ok=True)
-
- # Generate the updated source file at the corresponding path.
- with open(updated_path, 'w') as f:
- f.write(updated.code)
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser(
- description="""Fix up source that uses the bigtable client library.
-
-The existing sources are NOT overwritten but are copied to output_dir with changes made.
-
-Note: This tool operates at a best-effort level at converting positional
- parameters in client method calls to keyword based parameters.
- Cases where it WILL FAIL include
- A) * or ** expansion in a method call.
- B) Calls via function or method alias (includes free function calls)
- C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
-
- These all constitute false negatives. The tool will also detect false
- positives when an API method shares a name with another method.
-""")
- parser.add_argument(
- '-d',
- '--input-directory',
- required=True,
- dest='input_dir',
- help='the input directory to walk for python files to fix up',
- )
- parser.add_argument(
- '-o',
- '--output-directory',
- required=True,
- dest='output_dir',
- help='the directory to output files fixed via un-flattening',
- )
- args = parser.parse_args()
- input_dir = pathlib.Path(args.input_dir)
- output_dir = pathlib.Path(args.output_dir)
- if not input_dir.is_dir():
- print(
- f"input directory '{input_dir}' does not exist or is not a directory",
- file=sys.stderr,
- )
- sys.exit(-1)
-
- if not output_dir.is_dir():
- print(
- f"output directory '{output_dir}' does not exist or is not a directory",
- file=sys.stderr,
- )
- sys.exit(-1)
-
- if os.listdir(output_dir):
- print(
- f"output directory '{output_dir}' is not empty",
- file=sys.stderr,
- )
- sys.exit(-1)
-
- fix_files(input_dir, output_dir)
diff --git a/setup.py b/setup.py
index 7e89af11b..c8f13c372 100644
--- a/setup.py
+++ b/setup.py
@@ -12,6 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+# DO NOT EDIT THIS FILE OUTSIDE OF `.librarian/generator-input`
+# The source of truth for this file is `.librarian/generator-input`
+
+
import io
import os
@@ -37,9 +41,9 @@
# 'Development Status :: 5 - Production/Stable'
release_status = "Development Status :: 5 - Production/Stable"
dependencies = [
- "google-api-core[grpc] >= 2.16.0, <3.0.0",
+ "google-api-core[grpc] >= 2.17.0, <3.0.0",
"google-cloud-core >= 1.4.4, <3.0.0",
- "google-auth >= 2.14.1, <3.0.0,!=2.24.0,!=2.25.0",
+ "google-auth >= 2.23.0, <3.0.0,!=2.24.0,!=2.25.0",
"grpc-google-iam-v1 >= 0.12.4, <1.0.0",
"proto-plus >= 1.22.3, <2.0.0",
"proto-plus >= 1.25.0, <2.0.0; python_version>='3.13'",
@@ -85,6 +89,8 @@
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
+ "Programming Language :: Python :: 3.13",
+ "Programming Language :: Python :: 3.14",
"Operating System :: OS Independent",
"Topic :: Internet",
],
@@ -92,10 +98,6 @@
packages=packages,
install_requires=dependencies,
extras_require=extras,
- scripts=[
- "scripts/fixup_bigtable_v2_keywords.py",
- "scripts/fixup_bigtable_admin_v2_keywords.py",
- ],
python_requires=">=3.7",
include_package_data=True,
zip_safe=False,
diff --git a/test_proxy/handlers/client_handler_data_async.py b/test_proxy/handlers/client_handler_data_async.py
index 49539c1aa..246b7fcd7 100644
--- a/test_proxy/handlers/client_handler_data_async.py
+++ b/test_proxy/handlers/client_handler_data_async.py
@@ -19,6 +19,7 @@
from google.cloud.environment_vars import BIGTABLE_EMULATOR
from google.cloud.bigtable.data import BigtableDataClientAsync
from google.cloud.bigtable.data._cross_sync import CrossSync
+from helpers import sql_encoding_helpers
if not CrossSync.is_async:
from client_handler_data_async import error_safe
@@ -32,6 +33,7 @@ def error_safe(func):
Catch and pass errors back to the grpc_server_process
Also check if client is closed before processing requests
"""
+
async def wrapper(self, *args, **kwargs):
try:
if self.closed:
@@ -50,6 +52,7 @@ def encode_exception(exc):
Encode an exception or chain of exceptions to pass back to grpc_handler
"""
from google.api_core.exceptions import GoogleAPICallError
+
error_msg = f"{type(exc).__name__}: {exc}"
result = {"error": error_msg}
if exc.__cause__:
@@ -113,7 +116,9 @@ async def ReadRows(self, request, **kwargs):
table_id = request.pop("table_name").split("/")[-1]
app_profile_id = self.app_profile_id or request.get("app_profile_id", None)
table = self.client.get_table(self.instance_id, table_id, app_profile_id)
- kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20
+ kwargs["operation_timeout"] = (
+ kwargs.get("operation_timeout", self.per_operation_timeout) or 20
+ )
result_list = CrossSync.rm_aio(await table.read_rows(request, **kwargs))
# pack results back into protobuf-parsable format
serialized_response = [row._to_dict() for row in result_list]
@@ -124,7 +129,9 @@ async def ReadRow(self, row_key, **kwargs):
table_id = kwargs.pop("table_name").split("/")[-1]
app_profile_id = self.app_profile_id or kwargs.get("app_profile_id", None)
table = self.client.get_table(self.instance_id, table_id, app_profile_id)
- kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20
+ kwargs["operation_timeout"] = (
+ kwargs.get("operation_timeout", self.per_operation_timeout) or 20
+ )
result_row = CrossSync.rm_aio(await table.read_row(row_key, **kwargs))
# pack results back into protobuf-parsable format
if result_row:
@@ -135,10 +142,13 @@ async def ReadRow(self, row_key, **kwargs):
@error_safe
async def MutateRow(self, request, **kwargs):
from google.cloud.bigtable.data.mutations import Mutation
+
table_id = request["table_name"].split("/")[-1]
app_profile_id = self.app_profile_id or request.get("app_profile_id", None)
table = self.client.get_table(self.instance_id, table_id, app_profile_id)
- kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20
+ kwargs["operation_timeout"] = (
+ kwargs.get("operation_timeout", self.per_operation_timeout) or 20
+ )
row_key = request["row_key"]
mutations = [Mutation._from_dict(d) for d in request["mutations"]]
CrossSync.rm_aio(await table.mutate_row(row_key, mutations, **kwargs))
@@ -147,21 +157,29 @@ async def MutateRow(self, request, **kwargs):
@error_safe
async def BulkMutateRows(self, request, **kwargs):
from google.cloud.bigtable.data.mutations import RowMutationEntry
+
table_id = request["table_name"].split("/")[-1]
app_profile_id = self.app_profile_id or request.get("app_profile_id", None)
table = self.client.get_table(self.instance_id, table_id, app_profile_id)
- kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20
- entry_list = [RowMutationEntry._from_dict(entry) for entry in request["entries"]]
+ kwargs["operation_timeout"] = (
+ kwargs.get("operation_timeout", self.per_operation_timeout) or 20
+ )
+ entry_list = [
+ RowMutationEntry._from_dict(entry) for entry in request["entries"]
+ ]
CrossSync.rm_aio(await table.bulk_mutate_rows(entry_list, **kwargs))
return "OK"
@error_safe
async def CheckAndMutateRow(self, request, **kwargs):
from google.cloud.bigtable.data.mutations import Mutation, SetCell
+
table_id = request["table_name"].split("/")[-1]
app_profile_id = self.app_profile_id or request.get("app_profile_id", None)
table = self.client.get_table(self.instance_id, table_id, app_profile_id)
- kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20
+ kwargs["operation_timeout"] = (
+ kwargs.get("operation_timeout", self.per_operation_timeout) or 20
+ )
row_key = request["row_key"]
# add default values for incomplete dicts, so they can still be parsed to objects
true_mutations = []
@@ -180,33 +198,44 @@ async def CheckAndMutateRow(self, request, **kwargs):
# invalid mutation type. Conformance test may be sending generic empty request
false_mutations.append(SetCell("", "", "", 0))
predicate_filter = request.get("predicate_filter", None)
- result = CrossSync.rm_aio(await table.check_and_mutate_row(
- row_key,
- predicate_filter,
- true_case_mutations=true_mutations,
- false_case_mutations=false_mutations,
- **kwargs,
- ))
+ result = CrossSync.rm_aio(
+ await table.check_and_mutate_row(
+ row_key,
+ predicate_filter,
+ true_case_mutations=true_mutations,
+ false_case_mutations=false_mutations,
+ **kwargs,
+ )
+ )
return result
@error_safe
async def ReadModifyWriteRow(self, request, **kwargs):
from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule
from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule
+
table_id = request["table_name"].split("/")[-1]
app_profile_id = self.app_profile_id or request.get("app_profile_id", None)
table = self.client.get_table(self.instance_id, table_id, app_profile_id)
- kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20
+ kwargs["operation_timeout"] = (
+ kwargs.get("operation_timeout", self.per_operation_timeout) or 20
+ )
row_key = request["row_key"]
rules = []
for rule_dict in request.get("rules", []):
qualifier = rule_dict["column_qualifier"]
if "append_value" in rule_dict:
- new_rule = AppendValueRule(rule_dict["family_name"], qualifier, rule_dict["append_value"])
+ new_rule = AppendValueRule(
+ rule_dict["family_name"], qualifier, rule_dict["append_value"]
+ )
else:
- new_rule = IncrementRule(rule_dict["family_name"], qualifier, rule_dict["increment_amount"])
+ new_rule = IncrementRule(
+ rule_dict["family_name"], qualifier, rule_dict["increment_amount"]
+ )
rules.append(new_rule)
- result = CrossSync.rm_aio(await table.read_modify_write_row(row_key, rules, **kwargs))
+ result = CrossSync.rm_aio(
+ await table.read_modify_write_row(row_key, rules, **kwargs)
+ )
# pack results back into protobuf-parsable format
if result:
return result._to_dict()
@@ -218,6 +247,55 @@ async def SampleRowKeys(self, request, **kwargs):
table_id = request["table_name"].split("/")[-1]
app_profile_id = self.app_profile_id or request.get("app_profile_id", None)
table = self.client.get_table(self.instance_id, table_id, app_profile_id)
- kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20
+ kwargs["operation_timeout"] = (
+ kwargs.get("operation_timeout", self.per_operation_timeout) or 20
+ )
result = CrossSync.rm_aio(await table.sample_row_keys(**kwargs))
return result
+
+ @error_safe
+ async def ExecuteQuery(self, request, **kwargs):
+ app_profile_id = self.app_profile_id or request.get("app_profile_id", None)
+ query = request.get("query")
+ params = request.get("params") or {}
+ # Note that the request has been coverted to json, and the code for this converts
+ # query param names to snake case. convert_params reverses this conversion. For this
+ # reason, snake case params will have issues if they're used in the conformance tests.
+ formatted_params, parameter_types = sql_encoding_helpers.convert_params(params)
+ operation_timeout = (
+ kwargs.get("operation_timeout", self.per_operation_timeout) or 20
+ )
+ result = CrossSync.rm_aio(
+ await self.client.execute_query(
+ query,
+ self.instance_id,
+ parameters=formatted_params,
+ parameter_types=parameter_types,
+ app_profile_id=app_profile_id,
+ operation_timeout=operation_timeout,
+ prepare_operation_timeout=operation_timeout,
+ )
+ )
+ rows = CrossSync.rm_aio([r async for r in result])
+ md = result.metadata
+ proto_rows = []
+ for r in rows:
+ vals = []
+ for c in md.columns:
+ vals.append(sql_encoding_helpers.convert_value(c.column_type, r[c.column_name]))
+
+ proto_rows.append({"values": vals})
+
+ proto_columns = []
+ for c in md.columns:
+ proto_columns.append(
+ {
+ "name": c.column_name,
+ "type": sql_encoding_helpers.convert_type(c.column_type),
+ }
+ )
+
+ return {
+ "metadata": {"columns": proto_columns},
+ "rows": proto_rows,
+ }
diff --git a/test_proxy/handlers/client_handler_data_sync_autogen.py b/test_proxy/handlers/client_handler_data_sync_autogen.py
index eabae0ffa..0e557f058 100644
--- a/test_proxy/handlers/client_handler_data_sync_autogen.py
+++ b/test_proxy/handlers/client_handler_data_sync_autogen.py
@@ -20,6 +20,7 @@
import os
from google.cloud.environment_vars import BIGTABLE_EMULATOR
from google.cloud.bigtable.data._cross_sync import CrossSync
+from helpers import sql_encoding_helpers
from client_handler_data_async import error_safe
@@ -183,3 +184,43 @@ async def SampleRowKeys(self, request, **kwargs):
)
result = table.sample_row_keys(**kwargs)
return result
+
+ @error_safe
+ async def ExecuteQuery(self, request, **kwargs):
+ app_profile_id = self.app_profile_id or request.get("app_profile_id", None)
+ query = request.get("query")
+ params = request.get("params") or {}
+ (formatted_params, parameter_types) = sql_encoding_helpers.convert_params(
+ params
+ )
+ operation_timeout = (
+ kwargs.get("operation_timeout", self.per_operation_timeout) or 20
+ )
+ result = self.client.execute_query(
+ query,
+ self.instance_id,
+ parameters=formatted_params,
+ parameter_types=parameter_types,
+ app_profile_id=app_profile_id,
+ operation_timeout=operation_timeout,
+ prepare_operation_timeout=operation_timeout,
+ )
+ rows = [r for r in result]
+ md = result.metadata
+ proto_rows = []
+ for r in rows:
+ vals = []
+ for c in md.columns:
+ vals.append(
+ sql_encoding_helpers.convert_value(c.column_type, r[c.column_name])
+ )
+ proto_rows.append({"values": vals})
+ proto_columns = []
+ for c in md.columns:
+ proto_columns.append(
+ {
+ "name": c.column_name,
+ "type": sql_encoding_helpers.convert_type(c.column_type),
+ }
+ )
+ return {"metadata": {"columns": proto_columns}, "rows": proto_rows}
diff --git a/test_proxy/handlers/grpc_handler.py b/test_proxy/handlers/grpc_handler.py
index 2c70778dd..28ae19cf9 100644
--- a/test_proxy/handlers/grpc_handler.py
+++ b/test_proxy/handlers/grpc_handler.py
@@ -1,4 +1,3 @@
-
import time
import test_proxy_pb2
@@ -9,6 +8,17 @@
from google.protobuf import json_format
+def correct_cancelled(status):
+ """
+ Deadline exceeded errors are a race between client side cancellation and server
+ side deadline exceeded. For the purpose of these tests, the client will never cancel,
+ so we adjust cancelled errors to deadline_exceeded for consistency.
+ """
+ if status.code == 1:
+ return Status(code=4, message="deadlineexceeded")
+ return status
+
+
class TestProxyGrpcServer(test_proxy_pb2_grpc.CloudBigtableV2TestProxyServicer):
"""
Implements a grpc server that proxies conformance test requests to the client library
@@ -59,7 +69,6 @@ def wrapper(self, request, context, **kwargs):
return wrapper
-
@delegate_to_client_handler
def CreateClient(self, request, context, client_response=None):
return test_proxy_pb2.CreateClientResponse()
@@ -77,10 +86,10 @@ def ReadRows(self, request, context, client_response=None):
status = Status()
rows = []
if isinstance(client_response, dict) and "error" in client_response:
- status = Status(code=5, message=client_response["error"])
+ status = correct_cancelled(Status(code=5, message=client_response["error"]))
else:
rows = [data_pb2.Row(**d) for d in client_response]
- result = test_proxy_pb2.RowsResult(row=rows, status=status)
+ result = test_proxy_pb2.RowsResult(rows=rows, status=status)
return result
@delegate_to_client_handler
@@ -88,7 +97,12 @@ def ReadRow(self, request, context, client_response=None):
status = Status()
row = None
if isinstance(client_response, dict) and "error" in client_response:
- status=Status(code=client_response.get("code", 5), message=client_response.get("error"))
+ status = correct_cancelled(
+ Status(
+ code=client_response.get("code", 5),
+ message=client_response.get("error"),
+ )
+ )
elif client_response != "None":
row = data_pb2.Row(**client_response)
result = test_proxy_pb2.RowResult(row=row, status=status)
@@ -98,7 +112,12 @@ def ReadRow(self, request, context, client_response=None):
def MutateRow(self, request, context, client_response=None):
status = Status()
if isinstance(client_response, dict) and "error" in client_response:
- status = Status(code=client_response.get("code", 5), message=client_response["error"])
+ status = correct_cancelled(
+ Status(
+ code=client_response.get("code", 5),
+ message=client_response["error"],
+ )
+ )
return test_proxy_pb2.MutateRowResult(status=status)
@delegate_to_client_handler
@@ -106,22 +125,39 @@ def BulkMutateRows(self, request, context, client_response=None):
status = Status()
entries = []
if isinstance(client_response, dict) and "error" in client_response:
- entries = [bigtable_pb2.MutateRowsResponse.Entry(index=exc_dict.get("index",1), status=Status(code=exc_dict.get("code", 5))) for exc_dict in client_response.get("subexceptions", [])]
- if not entries:
- # only return failure on the overall request if there are failed entries
- status = Status(code=client_response.get("code", 5), message=client_response["error"])
- # TODO: protos were updated. entry is now entries: https://github.com/googleapis/cndb-client-testing-protos/commit/e6205a2bba04acc10d12421a1402870b4a525fb3
- response = test_proxy_pb2.MutateRowsResult(status=status, entry=entries)
+ entries = [
+ bigtable_pb2.MutateRowsResponse.Entry(
+ index=exc_dict.get("index", 1),
+ status=correct_cancelled(Status(code=exc_dict.get("code", 5))),
+ )
+ for exc_dict in client_response.get("subexceptions", [])
+ ]
+ status = correct_cancelled(
+ Status(
+ code=client_response.get("code", 5),
+ message=client_response["error"],
+ )
+ )
+ response = test_proxy_pb2.MutateRowsResult(status=status, entries=entries)
return response
@delegate_to_client_handler
def CheckAndMutateRow(self, request, context, client_response=None):
if isinstance(client_response, dict) and "error" in client_response:
- status = Status(code=client_response.get("code", 5), message=client_response["error"])
+ status = correct_cancelled(
+ Status(
+ code=client_response.get("code", 5),
+ message=client_response["error"],
+ )
+ )
response = test_proxy_pb2.CheckAndMutateRowResult(status=status)
else:
- result = bigtable_pb2.CheckAndMutateRowResponse(predicate_matched=client_response)
- response = test_proxy_pb2.CheckAndMutateRowResult(result=result, status=Status())
+ result = bigtable_pb2.CheckAndMutateRowResponse(
+ predicate_matched=client_response
+ )
+ response = test_proxy_pb2.CheckAndMutateRowResult(
+ result=result, status=Status()
+ )
return response
@delegate_to_client_handler
@@ -129,7 +165,12 @@ def ReadModifyWriteRow(self, request, context, client_response=None):
status = Status()
row = None
if isinstance(client_response, dict) and "error" in client_response:
- status = Status(code=client_response.get("code", 5), message=client_response.get("error"))
+ status = correct_cancelled(
+ Status(
+ code=client_response.get("code", 5),
+ message=client_response.get("error"),
+ )
+ )
elif client_response != "None":
row = data_pb2.Row(**client_response)
result = test_proxy_pb2.RowResult(row=row, status=status)
@@ -140,9 +181,30 @@ def SampleRowKeys(self, request, context, client_response=None):
status = Status()
sample_list = []
if isinstance(client_response, dict) and "error" in client_response:
- status = Status(code=client_response.get("code", 5), message=client_response.get("error"))
+ status = correct_cancelled(
+ Status(
+ code=client_response.get("code", 5),
+ message=client_response.get("error"),
+ )
+ )
else:
for sample in client_response:
- sample_list.append(bigtable_pb2.SampleRowKeysResponse(offset_bytes=sample[1], row_key=sample[0]))
- # TODO: protos were updated. sample is now samples: https://github.com/googleapis/cndb-client-testing-protos/commit/e6205a2bba04acc10d12421a1402870b4a525fb3
- return test_proxy_pb2.SampleRowKeysResult(status=status, sample=sample_list)
+ sample_list.append(
+ bigtable_pb2.SampleRowKeysResponse(
+ offset_bytes=sample[1], row_key=sample[0]
+ )
+ )
+ return test_proxy_pb2.SampleRowKeysResult(status=status, samples=sample_list)
+
+ @delegate_to_client_handler
+ def ExecuteQuery(self, request, context, client_response=None):
+ if isinstance(client_response, dict) and "error" in client_response:
+ return test_proxy_pb2.ExecuteQueryResult(
+ status=correct_cancelled(
+ Status(code=client_response.get("code", 13), message=client_response["error"])
+ )
+ )
+ else:
+ return test_proxy_pb2.ExecuteQueryResult(
+ metadata=client_response["metadata"], rows=client_response["rows"]
+ )
diff --git a/test_proxy/handlers/helpers/sql_encoding_helpers.py b/test_proxy/handlers/helpers/sql_encoding_helpers.py
new file mode 100644
index 000000000..9640ae3fd
--- /dev/null
+++ b/test_proxy/handlers/helpers/sql_encoding_helpers.py
@@ -0,0 +1,183 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+This module contains helpers for handling sql data types for the test proxy.
+"""
+from datetime import date
+from typing import Any
+
+from google.api_core.datetime_helpers import DatetimeWithNanoseconds
+from google.cloud.bigtable.data.execute_query.metadata import SqlType
+
+
+PRIMITIVE_TYPE_MAPPING = {
+ "bytes_type": SqlType.Bytes(),
+ "string_type": SqlType.String(),
+ "int64_type": SqlType.Int64(),
+ "float32_type": SqlType.Float32(),
+ "float64_type": SqlType.Float64(),
+ "bool_type": SqlType.Bool(),
+ "timestamp_type": SqlType.Timestamp(),
+ "date_type": SqlType.Date(),
+}
+
+PRIMITIVE_VALUE_FIELDS = [
+ "bytes_value",
+ "string_value",
+ "int_value",
+ "float_value",
+ "bool_value",
+]
+
+
+def snake_to_camel(snake_string):
+ """
+ Used to convert query parameter names back to camel case. This needs to be handled
+ specifically because the python test proxy converts all keys to snake case when it
+ converts proto messages to dicts.
+ """
+ components = snake_string.split("_")
+ return components[0] + "".join(x.title() for x in components[1:])
+
+
+def convert_value(type: SqlType, val: Any):
+ """
+ Converts python value to a dict representation of a protobuf Value message.
+ """
+ if val is None:
+ return {}
+ elif isinstance(type, SqlType.Date):
+ return {"date_value": val}
+ elif isinstance(type, SqlType.Map):
+ key_type = type.key_type
+ val_type = type.value_type
+ results = []
+ for k, v in val.items():
+ results.append(
+ {
+ "array_value": {
+ "values": [
+ convert_value(key_type, k),
+ convert_value(val_type, v),
+ ]
+ }
+ }
+ )
+ return {"array_value": {"values": results}}
+ elif isinstance(type, SqlType.Struct):
+ results = []
+ for i, (_, field_val) in enumerate(val.fields):
+ results.append(convert_value(type[i], field_val))
+ return {"array_value": {"values": results}}
+ elif isinstance(type, SqlType.Array):
+ elem_type = type.element_type
+ results = []
+ for e in val:
+ results.append(convert_value(elem_type, e))
+ return {"array_value": {"values": results}}
+ else:
+ return type._to_value_pb_dict(val)
+
+
+def convert_type(type: SqlType):
+ if isinstance(type, SqlType.Map):
+ return {
+ "map_type": {
+ "key_type": convert_type(type.key_type),
+ "value_type": convert_type(type.value_type),
+ }
+ }
+ elif isinstance(type, SqlType.Struct):
+ fields = []
+ for field_name, field_type in type.fields:
+ fields.append({"field_name": field_name, "type": convert_type(field_type)})
+ return {"struct_type": {"fields": fields}}
+ elif isinstance(type, SqlType.Array):
+ return {"array_type": {"element_type": convert_type(type.element_type)}}
+ else:
+ return type._to_type_pb_dict()
+
+
+def to_sql_type(proto_type_dict):
+ if len(proto_type_dict.keys()) != 1:
+ raise ValueError("Invalid type: ", proto_type_dict)
+ type_field = list(proto_type_dict.keys())[0]
+ if type_field in PRIMITIVE_TYPE_MAPPING:
+ return PRIMITIVE_TYPE_MAPPING[type_field]
+ elif type_field == "array_type":
+ elem_type_dict = proto_type_dict["array_type"]["element_type"]
+ return SqlType.Array(to_sql_type(elem_type_dict))
+ else:
+ raise ValueError("Invalid query parameter type: ", proto_type_dict)
+
+
+def convert_to_python_value(proto_val: Any, sql_type: SqlType):
+ """
+ Converts the given dict representation of a proto Value message to the correct
+ python value. This is used to convert query params to the represetation expected
+ from users. We can't reuse existing parsers because they expect actual proto messages
+ rather than dicts.
+ """
+ value_field = sql_type.value_pb_dict_field_name
+ if isinstance(sql_type, SqlType.Array):
+ if "array_value" not in proto_val:
+ return None
+ elem_type = sql_type.element_type
+ return [
+ convert_to_python_value(v, elem_type)
+ for v in proto_val["array_value"]["values"]
+ ]
+ if value_field and value_field not in proto_val:
+ return None
+ if value_field in PRIMITIVE_VALUE_FIELDS:
+ return proto_val[value_field]
+ if isinstance(sql_type, SqlType.Timestamp):
+ if "timestamp_value" not in proto_val:
+ return None
+ return DatetimeWithNanoseconds.from_rfc3339(proto_val["timestamp_value"])
+ if isinstance(sql_type, SqlType.Date):
+ if "date_value" not in proto_val:
+ return None
+ return date(
+ year=proto_val["date_value"]["year"],
+ month=proto_val["date_value"]["month"],
+ day=proto_val["date_value"]["day"],
+ )
+ raise ValueError("Unexpected parameter: %s, %s", proto_val, sql_type)
+
+
+def convert_params(request_params):
+ """
+ Converts the given dictionary of parameters to a python representation.
+ This converts parameter names from snake to camel case and protobuf Value dicts
+ to python values.
+ """
+ python_params = {}
+ param_types = {}
+ for param_key, param_value in request_params.items():
+ if "type" not in param_value:
+ raise ValueError("type must be set for query params")
+
+ sql_type = to_sql_type(param_value["type"])
+ readjusted_param_name = snake_to_camel(param_key)
+ param_types[readjusted_param_name] = sql_type
+ if len(param_value.keys()) == 1:
+ # this means type is set and nothing else
+ python_params[readjusted_param_name] = None
+ elif len(param_value) > 2:
+ raise ValueError("Unexpected Value format: ", param_value)
+ python_params[readjusted_param_name] = convert_to_python_value(
+ param_value, sql_type
+ )
+ return python_params, param_types
diff --git a/test_proxy/protos/bigtable_pb2.py b/test_proxy/protos/bigtable_pb2.py
index 936a4ed55..edc90c3ec 100644
--- a/test_proxy/protos/bigtable_pb2.py
+++ b/test_proxy/protos/bigtable_pb2.py
@@ -1,11 +1,22 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
# source: google/bigtable/v2/bigtable.proto
+# Protobuf Python Version: 5.29.0
"""Generated protocol buffer code."""
-from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 29,
+ 0,
+ '',
+ 'google/bigtable/v2/bigtable.proto'
+)
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
@@ -18,128 +29,187 @@
from google.api import routing_pb2 as google_dot_api_dot_routing__pb2
import data_pb2 as google_dot_bigtable_dot_v2_dot_data__pb2
import request_stats_pb2 as google_dot_bigtable_dot_v2_dot_request__stats__pb2
+import types_pb2 as google_dot_bigtable_dot_v2_dot_types__pb2
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n!google/bigtable/v2/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x18google/api/routing.proto\x1a\x1dgoogle/bigtable/v2/data.proto\x1a&google/bigtable/v2/request_stats.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto\"\x90\x03\n\x0fReadRowsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03\x12P\n\x12request_stats_view\x18\x06 \x01(\x0e\x32\x34.google.bigtable.v2.ReadRowsRequest.RequestStatsView\"f\n\x10RequestStatsView\x12\"\n\x1eREQUEST_STATS_VIEW_UNSPECIFIED\x10\x00\x12\x16\n\x12REQUEST_STATS_NONE\x10\x01\x12\x16\n\x12REQUEST_STATS_FULL\x10\x02\"\xb1\x03\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x12\x37\n\rrequest_stats\x18\x03 \x01(\x0b\x32 .google.bigtable.v2.RequestStats\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status\"n\n\x14SampleRowKeysRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03\"\xb6\x01\n\x10MutateRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x34\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02\"\x13\n\x11MutateRowResponse\"\xfe\x01\n\x11MutateRowsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12\x41\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.EntryB\x03\xe0\x41\x02\x1aN\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x34\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02\"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"\xae\x02\n\x18\x43heckAndMutateRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08\"i\n\x12PingAndWarmRequest\x12;\n\x04name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%bigtableadmin.googleapis.com/Instance\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\"\x15\n\x13PingAndWarmResponse\"\xc6\x01\n\x19ReadModifyWriteRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12;\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRuleB\x03\xe0\x41\x02\"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row\"\x86\x01\n,GenerateInitialChangeStreamPartitionsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\"g\n-GenerateInitialChangeStreamPartitionsResponse\x12\x36\n\tpartition\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\"\x9b\x03\n\x17ReadChangeStreamRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\x12\x36\n\tpartition\x18\x03 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\x12\x30\n\nstart_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12K\n\x13\x63ontinuation_tokens\x18\x06 \x01(\x0b\x32,.google.bigtable.v2.StreamContinuationTokensH\x00\x12,\n\x08\x65nd_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x35\n\x12heartbeat_duration\x18\x07 \x01(\x0b\x32\x19.google.protobuf.DurationB\x0c\n\nstart_from\"\xeb\t\n\x18ReadChangeStreamResponse\x12N\n\x0b\x64\x61ta_change\x18\x01 \x01(\x0b\x32\x37.google.bigtable.v2.ReadChangeStreamResponse.DataChangeH\x00\x12K\n\theartbeat\x18\x02 \x01(\x0b\x32\x36.google.bigtable.v2.ReadChangeStreamResponse.HeartbeatH\x00\x12P\n\x0c\x63lose_stream\x18\x03 \x01(\x0b\x32\x38.google.bigtable.v2.ReadChangeStreamResponse.CloseStreamH\x00\x1a\xf4\x01\n\rMutationChunk\x12X\n\nchunk_info\x18\x01 \x01(\x0b\x32\x44.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo\x12.\n\x08mutation\x18\x02 \x01(\x0b\x32\x1c.google.bigtable.v2.Mutation\x1aY\n\tChunkInfo\x12\x1a\n\x12\x63hunked_value_size\x18\x01 \x01(\x05\x12\x1c\n\x14\x63hunked_value_offset\x18\x02 \x01(\x05\x12\x12\n\nlast_chunk\x18\x03 \x01(\x08\x1a\xc6\x03\n\nDataChange\x12J\n\x04type\x18\x01 \x01(\x0e\x32<.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type\x12\x19\n\x11source_cluster_id\x18\x02 \x01(\t\x12\x0f\n\x07row_key\x18\x03 \x01(\x0c\x12\x34\n\x10\x63ommit_timestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\ntiebreaker\x18\x05 \x01(\x05\x12J\n\x06\x63hunks\x18\x06 \x03(\x0b\x32:.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk\x12\x0c\n\x04\x64one\x18\x08 \x01(\x08\x12\r\n\x05token\x18\t \x01(\t\x12;\n\x17\x65stimated_low_watermark\x18\n \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"P\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x08\n\x04USER\x10\x01\x12\x16\n\x12GARBAGE_COLLECTION\x10\x02\x12\x10\n\x0c\x43ONTINUATION\x10\x03\x1a\x91\x01\n\tHeartbeat\x12G\n\x12\x63ontinuation_token\x18\x01 \x01(\x0b\x32+.google.bigtable.v2.StreamContinuationToken\x12;\n\x17\x65stimated_low_watermark\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a{\n\x0b\x43loseStream\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12H\n\x13\x63ontinuation_tokens\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.StreamContinuationTokenB\x0f\n\rstream_record2\xd7\x18\n\x08\x42igtable\x12\x9b\x02\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse\"\xc1\x01\x82\xd3\xe4\x93\x02>\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xac\x02\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse\"\xc3\x01\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xc1\x02\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse\"\xe6\x01\x82\xd3\xe4\x93\x02?\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x1ctable_name,row_key,mutations\xda\x41+table_name,row_key,mutations,app_profile_id\x12\xb3\x02\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse\"\xd3\x01\x82\xd3\xe4\x93\x02@\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x12table_name,entries\xda\x41!table_name,entries,app_profile_id0\x01\x12\xad\x03\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse\"\xba\x02\x82\xd3\xe4\x93\x02G\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x42table_name,row_key,predicate_filter,true_mutations,false_mutations\xda\x41Qtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id\x12\xee\x01\n\x0bPingAndWarm\x12&.google.bigtable.v2.PingAndWarmRequest\x1a\'.google.bigtable.v2.PingAndWarmResponse\"\x8d\x01\x82\xd3\xe4\x93\x02+\"&/v2/{name=projects/*/instances/*}:ping:\x01*\x8a\xd3\xe4\x93\x02\x39\x12%\n\x04name\x12\x1d{name=projects/*/instances/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x04name\xda\x41\x13name,app_profile_id\x12\xdd\x02\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse\"\xe7\x01\x82\xd3\xe4\x93\x02H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x18table_name,row_key,rules\xda\x41\'table_name,row_key,rules,app_profile_id\x12\xbb\x02\n%GenerateInitialChangeStreamPartitions\x12@.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest\x1a\x41.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse\"\x8a\x01\x82\xd3\xe4\x93\x02[\"V/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions:\x01*\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xe6\x01\n\x10ReadChangeStream\x12+.google.bigtable.v2.ReadChangeStreamRequest\x1a,.google.bigtable.v2.ReadChangeStreamResponse\"u\x82\xd3\xe4\x93\x02\x46\"A/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream:\x01*\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x1a\xdb\x02\xca\x41\x17\x62igtable.googleapis.com\xd2\x41\xbd\x02https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xeb\x02\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2\xea\x41P\n%bigtableadmin.googleapis.com/Instance\x12\'projects/{project}/instances/{instance}\xea\x41\\\n\"bigtableadmin.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}b\x06proto3')
-
-_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
-_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.bigtable.v2.bigtable_pb2', globals())
-if _descriptor._USE_C_DESCRIPTORS == False:
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n!google/bigtable/v2/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x18google/api/routing.proto\x1a\x1dgoogle/bigtable/v2/data.proto\x1a&google/bigtable/v2/request_stats.proto\x1a\x1egoogle/bigtable/v2/types.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto\"\xcc\x04\n\x0fReadRowsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x01\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12Q\n\x14\x61uthorized_view_name\x18\t \x01(\tB3\xe0\x41\x01\xfa\x41-\n+bigtableadmin.googleapis.com/AuthorizedView\x12U\n\x16materialized_view_name\x18\x0b \x01(\tB5\xe0\x41\x01\xfa\x41/\n-bigtableadmin.googleapis.com/MaterializedView\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03\x12P\n\x12request_stats_view\x18\x06 \x01(\x0e\x32\x34.google.bigtable.v2.ReadRowsRequest.RequestStatsView\x12\x10\n\x08reversed\x18\x07 \x01(\x08\"f\n\x10RequestStatsView\x12\"\n\x1eREQUEST_STATS_VIEW_UNSPECIFIED\x10\x00\x12\x16\n\x12REQUEST_STATS_NONE\x10\x01\x12\x16\n\x12REQUEST_STATS_FULL\x10\x02\"\xb1\x03\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x12\x37\n\rrequest_stats\x18\x03 \x01(\x0b\x32 .google.bigtable.v2.RequestStats\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status\"\x98\x02\n\x14SampleRowKeysRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x01\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12Q\n\x14\x61uthorized_view_name\x18\x04 \x01(\tB3\xe0\x41\x01\xfa\x41-\n+bigtableadmin.googleapis.com/AuthorizedView\x12U\n\x16materialized_view_name\x18\x05 \x01(\tB5\xe0\x41\x01\xfa\x41/\n-bigtableadmin.googleapis.com/MaterializedView\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03\"\x89\x02\n\x10MutateRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x01\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12Q\n\x14\x61uthorized_view_name\x18\x06 \x01(\tB3\xe0\x41\x01\xfa\x41-\n+bigtableadmin.googleapis.com/AuthorizedView\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x34\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02\"\x13\n\x11MutateRowResponse\"\xd1\x02\n\x11MutateRowsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x01\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12Q\n\x14\x61uthorized_view_name\x18\x05 \x01(\tB3\xe0\x41\x01\xfa\x41-\n+bigtableadmin.googleapis.com/AuthorizedView\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12\x41\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.EntryB\x03\xe0\x41\x02\x1aN\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x34\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02\"\xe4\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x12?\n\x0frate_limit_info\x18\x03 \x01(\x0b\x32!.google.bigtable.v2.RateLimitInfoH\x00\x88\x01\x01\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.StatusB\x12\n\x10_rate_limit_info\"J\n\rRateLimitInfo\x12)\n\x06period\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0e\n\x06\x66\x61\x63tor\x18\x02 \x01(\x01\"\x81\x03\n\x18\x43heckAndMutateRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x01\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12Q\n\x14\x61uthorized_view_name\x18\t \x01(\tB3\xe0\x41\x01\xfa\x41-\n+bigtableadmin.googleapis.com/AuthorizedView\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08\"i\n\x12PingAndWarmRequest\x12;\n\x04name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%bigtableadmin.googleapis.com/Instance\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\"\x15\n\x13PingAndWarmResponse\"\x99\x02\n\x19ReadModifyWriteRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x01\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12Q\n\x14\x61uthorized_view_name\x18\x06 \x01(\tB3\xe0\x41\x01\xfa\x41-\n+bigtableadmin.googleapis.com/AuthorizedView\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12;\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRuleB\x03\xe0\x41\x02\"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row\"\x86\x01\n,GenerateInitialChangeStreamPartitionsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\"g\n-GenerateInitialChangeStreamPartitionsResponse\x12\x36\n\tpartition\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\"\x9b\x03\n\x17ReadChangeStreamRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\x12\x36\n\tpartition\x18\x03 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\x12\x30\n\nstart_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12K\n\x13\x63ontinuation_tokens\x18\x06 \x01(\x0b\x32,.google.bigtable.v2.StreamContinuationTokensH\x00\x12,\n\x08\x65nd_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x35\n\x12heartbeat_duration\x18\x07 \x01(\x0b\x32\x19.google.protobuf.DurationB\x0c\n\nstart_from\"\xa9\n\n\x18ReadChangeStreamResponse\x12N\n\x0b\x64\x61ta_change\x18\x01 \x01(\x0b\x32\x37.google.bigtable.v2.ReadChangeStreamResponse.DataChangeH\x00\x12K\n\theartbeat\x18\x02 \x01(\x0b\x32\x36.google.bigtable.v2.ReadChangeStreamResponse.HeartbeatH\x00\x12P\n\x0c\x63lose_stream\x18\x03 \x01(\x0b\x32\x38.google.bigtable.v2.ReadChangeStreamResponse.CloseStreamH\x00\x1a\xf4\x01\n\rMutationChunk\x12X\n\nchunk_info\x18\x01 \x01(\x0b\x32\x44.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo\x12.\n\x08mutation\x18\x02 \x01(\x0b\x32\x1c.google.bigtable.v2.Mutation\x1aY\n\tChunkInfo\x12\x1a\n\x12\x63hunked_value_size\x18\x01 \x01(\x05\x12\x1c\n\x14\x63hunked_value_offset\x18\x02 \x01(\x05\x12\x12\n\nlast_chunk\x18\x03 \x01(\x08\x1a\xc6\x03\n\nDataChange\x12J\n\x04type\x18\x01 \x01(\x0e\x32<.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type\x12\x19\n\x11source_cluster_id\x18\x02 \x01(\t\x12\x0f\n\x07row_key\x18\x03 \x01(\x0c\x12\x34\n\x10\x63ommit_timestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\ntiebreaker\x18\x05 \x01(\x05\x12J\n\x06\x63hunks\x18\x06 \x03(\x0b\x32:.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk\x12\x0c\n\x04\x64one\x18\x08 \x01(\x08\x12\r\n\x05token\x18\t \x01(\t\x12;\n\x17\x65stimated_low_watermark\x18\n \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"P\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x08\n\x04USER\x10\x01\x12\x16\n\x12GARBAGE_COLLECTION\x10\x02\x12\x10\n\x0c\x43ONTINUATION\x10\x03\x1a\x91\x01\n\tHeartbeat\x12G\n\x12\x63ontinuation_token\x18\x01 \x01(\x0b\x32+.google.bigtable.v2.StreamContinuationToken\x12;\n\x17\x65stimated_low_watermark\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a\xb8\x01\n\x0b\x43loseStream\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12H\n\x13\x63ontinuation_tokens\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.StreamContinuationToken\x12;\n\x0enew_partitions\x18\x03 \x03(\x0b\x32#.google.bigtable.v2.StreamPartitionB\x0f\n\rstream_record\"\xa1\x03\n\x13\x45xecuteQueryRequest\x12\x44\n\rinstance_name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%bigtableadmin.googleapis.com/Instance\x12\x1b\n\x0e\x61pp_profile_id\x18\x02 \x01(\tB\x03\xe0\x41\x01\x12\x14\n\x05query\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x16\n\x0eprepared_query\x18\t \x01(\x0c\x12;\n\x0cproto_format\x18\x04 \x01(\x0b\x32\x1f.google.bigtable.v2.ProtoFormatB\x02\x18\x01H\x00\x12\x19\n\x0cresume_token\x18\x08 \x01(\x0c\x42\x03\xe0\x41\x01\x12H\n\x06params\x18\x07 \x03(\x0b\x32\x33.google.bigtable.v2.ExecuteQueryRequest.ParamsEntryB\x03\xe0\x41\x02\x1aH\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.google.bigtable.v2.Value:\x02\x38\x01\x42\r\n\x0b\x64\x61ta_format\"\x96\x01\n\x14\x45xecuteQueryResponse\x12\x39\n\x08metadata\x18\x01 \x01(\x0b\x32%.google.bigtable.v2.ResultSetMetadataH\x00\x12\x37\n\x07results\x18\x02 \x01(\x0b\x32$.google.bigtable.v2.PartialResultSetH\x00\x42\n\n\x08response\"\xf4\x02\n\x13PrepareQueryRequest\x12\x44\n\rinstance_name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%bigtableadmin.googleapis.com/Instance\x12\x1b\n\x0e\x61pp_profile_id\x18\x02 \x01(\tB\x03\xe0\x41\x01\x12\x12\n\x05query\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x0cproto_format\x18\x04 \x01(\x0b\x32\x1f.google.bigtable.v2.ProtoFormatH\x00\x12Q\n\x0bparam_types\x18\x06 \x03(\x0b\x32\x37.google.bigtable.v2.PrepareQueryRequest.ParamTypesEntryB\x03\xe0\x41\x02\x1aK\n\x0fParamTypesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\'\n\x05value\x18\x02 \x01(\x0b\x32\x18.google.bigtable.v2.Type:\x02\x38\x01\x42\r\n\x0b\x64\x61ta_format\"\x98\x01\n\x14PrepareQueryResponse\x12\x37\n\x08metadata\x18\x01 \x01(\x0b\x32%.google.bigtable.v2.ResultSetMetadata\x12\x16\n\x0eprepared_query\x18\x02 \x01(\x0c\x12/\n\x0bvalid_until\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\xc9&\n\x08\x42igtable\x12\xdb\x03\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse\"\x81\x03\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id\x82\xd3\xe4\x93\x02\x9a\x01\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*ZZ\"U/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readRows:\x01*\x8a\xd3\xe4\x93\x02\xb0\x01\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\x12`\n\x14\x61uthorized_view_name\x12H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}0\x01\x12\xee\x03\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse\"\x85\x03\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id\x82\xd3\xe4\x93\x02\x9e\x01\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeysZ\\\x12Z/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:sampleRowKeys\x8a\xd3\xe4\x93\x02\xb0\x01\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\x12`\n\x14\x61uthorized_view_name\x12H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}0\x01\x12\x82\x04\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse\"\xa7\x03\xda\x41\x1ctable_name,row_key,mutations\xda\x41+table_name,row_key,mutations,app_profile_id\x82\xd3\xe4\x93\x02\x9c\x01\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*Z[\"V/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRow:\x01*\x8a\xd3\xe4\x93\x02\xb0\x01\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\x12`\n\x14\x61uthorized_view_name\x12H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}\x12\xf5\x03\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse\"\x95\x03\xda\x41\x12table_name,entries\xda\x41!table_name,entries,app_profile_id\x82\xd3\xe4\x93\x02\x9e\x01\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*Z\\\"W/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRows:\x01*\x8a\xd3\xe4\x93\x02\xb0\x01\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\x12`\n\x14\x61uthorized_view_name\x12H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}0\x01\x12\xf6\x04\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse\"\x83\x04\xda\x41\x42table_name,row_key,predicate_filter,true_mutations,false_mutations\xda\x41Qtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id\x82\xd3\xe4\x93\x02\xac\x01\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*Zc\"^/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:checkAndMutateRow:\x01*\x8a\xd3\xe4\x93\x02\xb0\x01\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\x12`\n\x14\x61uthorized_view_name\x12H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}\x12\xee\x01\n\x0bPingAndWarm\x12&.google.bigtable.v2.PingAndWarmRequest\x1a\'.google.bigtable.v2.PingAndWarmResponse\"\x8d\x01\xda\x41\x04name\xda\x41\x13name,app_profile_id\x82\xd3\xe4\x93\x02+\"&/v2/{name=projects/*/instances/*}:ping:\x01*\x8a\xd3\xe4\x93\x02\x39\x12%\n\x04name\x12\x1d{name=projects/*/instances/*}\x12\x10\n\x0e\x61pp_profile_id\x12\xa7\x04\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse\"\xb1\x03\xda\x41\x18table_name,row_key,rules\xda\x41\'table_name,row_key,rules,app_profile_id\x82\xd3\xe4\x93\x02\xae\x01\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*Zd\"_/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readModifyWriteRow:\x01*\x8a\xd3\xe4\x93\x02\xb0\x01\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\x12`\n\x14\x61uthorized_view_name\x12H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}\x12\xbb\x02\n%GenerateInitialChangeStreamPartitions\x12@.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest\x1a\x41.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse\"\x8a\x01\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id\x82\xd3\xe4\x93\x02[\"V/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions:\x01*0\x01\x12\xe6\x01\n\x10ReadChangeStream\x12+.google.bigtable.v2.ReadChangeStreamRequest\x1a,.google.bigtable.v2.ReadChangeStreamResponse\"u\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id\x82\xd3\xe4\x93\x02\x46\"A/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream:\x01*0\x01\x12\xa9\x02\n\x0cPrepareQuery\x12\'.google.bigtable.v2.PrepareQueryRequest\x1a(.google.bigtable.v2.PrepareQueryResponse\"\xc5\x01\xda\x41\x13instance_name,query\xda\x41\"instance_name,query,app_profile_id\x82\xd3\xe4\x93\x02<\"7/v2/{instance_name=projects/*/instances/*}:prepareQuery:\x01*\x8a\xd3\xe4\x93\x02\x42\x12.\n\rinstance_name\x12\x1d{name=projects/*/instances/*}\x12\x10\n\x0e\x61pp_profile_id\x12\xab\x02\n\x0c\x45xecuteQuery\x12\'.google.bigtable.v2.ExecuteQueryRequest\x1a(.google.bigtable.v2.ExecuteQueryResponse\"\xc5\x01\xda\x41\x13instance_name,query\xda\x41\"instance_name,query,app_profile_id\x82\xd3\xe4\x93\x02<\"7/v2/{instance_name=projects/*/instances/*}:executeQuery:\x01*\x8a\xd3\xe4\x93\x02\x42\x12.\n\rinstance_name\x12\x1d{name=projects/*/instances/*}\x12\x10\n\x0e\x61pp_profile_id0\x01\x1a\xdb\x02\xca\x41\x17\x62igtable.googleapis.com\xd2\x41\xbd\x02https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xf5\x04\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z8cloud.google.com/go/bigtable/apiv2/bigtablepb;bigtablepb\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2\xea\x41P\n%bigtableadmin.googleapis.com/Instance\x12\'projects/{project}/instances/{instance}\xea\x41\\\n\"bigtableadmin.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}\xea\x41\x87\x01\n+bigtableadmin.googleapis.com/AuthorizedView\x12Xprojects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}\xea\x41~\n-bigtableadmin.googleapis.com/MaterializedView\x12Mprojects/{project}/instances/{instance}/materializedViews/{materialized_view}b\x06proto3')
- DESCRIPTOR._options = None
- DESCRIPTOR._serialized_options = b'\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2\352AP\n%bigtableadmin.googleapis.com/Instance\022\'projects/{project}/instances/{instance}\352A\\\n\"bigtableadmin.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}'
- _READROWSREQUEST.fields_by_name['table_name']._options = None
- _READROWSREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table'
- _SAMPLEROWKEYSREQUEST.fields_by_name['table_name']._options = None
- _SAMPLEROWKEYSREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table'
- _MUTATEROWREQUEST.fields_by_name['table_name']._options = None
- _MUTATEROWREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table'
- _MUTATEROWREQUEST.fields_by_name['row_key']._options = None
- _MUTATEROWREQUEST.fields_by_name['row_key']._serialized_options = b'\340A\002'
- _MUTATEROWREQUEST.fields_by_name['mutations']._options = None
- _MUTATEROWREQUEST.fields_by_name['mutations']._serialized_options = b'\340A\002'
- _MUTATEROWSREQUEST_ENTRY.fields_by_name['mutations']._options = None
- _MUTATEROWSREQUEST_ENTRY.fields_by_name['mutations']._serialized_options = b'\340A\002'
- _MUTATEROWSREQUEST.fields_by_name['table_name']._options = None
- _MUTATEROWSREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table'
- _MUTATEROWSREQUEST.fields_by_name['entries']._options = None
- _MUTATEROWSREQUEST.fields_by_name['entries']._serialized_options = b'\340A\002'
- _CHECKANDMUTATEROWREQUEST.fields_by_name['table_name']._options = None
- _CHECKANDMUTATEROWREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table'
- _CHECKANDMUTATEROWREQUEST.fields_by_name['row_key']._options = None
- _CHECKANDMUTATEROWREQUEST.fields_by_name['row_key']._serialized_options = b'\340A\002'
- _PINGANDWARMREQUEST.fields_by_name['name']._options = None
- _PINGANDWARMREQUEST.fields_by_name['name']._serialized_options = b'\340A\002\372A\'\n%bigtableadmin.googleapis.com/Instance'
- _READMODIFYWRITEROWREQUEST.fields_by_name['table_name']._options = None
- _READMODIFYWRITEROWREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table'
- _READMODIFYWRITEROWREQUEST.fields_by_name['row_key']._options = None
- _READMODIFYWRITEROWREQUEST.fields_by_name['row_key']._serialized_options = b'\340A\002'
- _READMODIFYWRITEROWREQUEST.fields_by_name['rules']._options = None
- _READMODIFYWRITEROWREQUEST.fields_by_name['rules']._serialized_options = b'\340A\002'
- _GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST.fields_by_name['table_name']._options = None
- _GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table'
- _READCHANGESTREAMREQUEST.fields_by_name['table_name']._options = None
- _READCHANGESTREAMREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table'
- _BIGTABLE._options = None
- _BIGTABLE._serialized_options = b'\312A\027bigtable.googleapis.com\322A\275\002https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only'
- _BIGTABLE.methods_by_name['ReadRows']._options = None
- _BIGTABLE.methods_by_name['ReadRows']._serialized_options = b'\202\323\344\223\002>\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332A\ntable_name\332A\031table_name,app_profile_id'
- _BIGTABLE.methods_by_name['SampleRowKeys']._options = None
- _BIGTABLE.methods_by_name['SampleRowKeys']._serialized_options = b'\202\323\344\223\002@\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332A\ntable_name\332A\031table_name,app_profile_id'
- _BIGTABLE.methods_by_name['MutateRow']._options = None
- _BIGTABLE.methods_by_name['MutateRow']._serialized_options = b'\202\323\344\223\002?\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332A\034table_name,row_key,mutations\332A+table_name,row_key,mutations,app_profile_id'
- _BIGTABLE.methods_by_name['MutateRows']._options = None
- _BIGTABLE.methods_by_name['MutateRows']._serialized_options = b'\202\323\344\223\002@\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332A\022table_name,entries\332A!table_name,entries,app_profile_id'
- _BIGTABLE.methods_by_name['CheckAndMutateRow']._options = None
- _BIGTABLE.methods_by_name['CheckAndMutateRow']._serialized_options = b'\202\323\344\223\002G\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332ABtable_name,row_key,predicate_filter,true_mutations,false_mutations\332AQtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id'
- _BIGTABLE.methods_by_name['PingAndWarm']._options = None
- _BIGTABLE.methods_by_name['PingAndWarm']._serialized_options = b'\202\323\344\223\002+\"&/v2/{name=projects/*/instances/*}:ping:\001*\212\323\344\223\0029\022%\n\004name\022\035{name=projects/*/instances/*}\022\020\n\016app_profile_id\332A\004name\332A\023name,app_profile_id'
- _BIGTABLE.methods_by_name['ReadModifyWriteRow']._options = None
- _BIGTABLE.methods_by_name['ReadModifyWriteRow']._serialized_options = b'\202\323\344\223\002H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332A\030table_name,row_key,rules\332A\'table_name,row_key,rules,app_profile_id'
- _BIGTABLE.methods_by_name['GenerateInitialChangeStreamPartitions']._options = None
- _BIGTABLE.methods_by_name['GenerateInitialChangeStreamPartitions']._serialized_options = b'\202\323\344\223\002[\"V/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions:\001*\332A\ntable_name\332A\031table_name,app_profile_id'
- _BIGTABLE.methods_by_name['ReadChangeStream']._options = None
- _BIGTABLE.methods_by_name['ReadChangeStream']._serialized_options = b'\202\323\344\223\002F\"A/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream:\001*\332A\ntable_name\332A\031table_name,app_profile_id'
- _READROWSREQUEST._serialized_start=392
- _READROWSREQUEST._serialized_end=792
- _READROWSREQUEST_REQUESTSTATSVIEW._serialized_start=690
- _READROWSREQUEST_REQUESTSTATSVIEW._serialized_end=792
- _READROWSRESPONSE._serialized_start=795
- _READROWSRESPONSE._serialized_end=1228
- _READROWSRESPONSE_CELLCHUNK._serialized_start=967
- _READROWSRESPONSE_CELLCHUNK._serialized_end=1228
- _SAMPLEROWKEYSREQUEST._serialized_start=1230
- _SAMPLEROWKEYSREQUEST._serialized_end=1340
- _SAMPLEROWKEYSRESPONSE._serialized_start=1342
- _SAMPLEROWKEYSRESPONSE._serialized_end=1404
- _MUTATEROWREQUEST._serialized_start=1407
- _MUTATEROWREQUEST._serialized_end=1589
- _MUTATEROWRESPONSE._serialized_start=1591
- _MUTATEROWRESPONSE._serialized_end=1610
- _MUTATEROWSREQUEST._serialized_start=1613
- _MUTATEROWSREQUEST._serialized_end=1867
- _MUTATEROWSREQUEST_ENTRY._serialized_start=1789
- _MUTATEROWSREQUEST_ENTRY._serialized_end=1867
- _MUTATEROWSRESPONSE._serialized_start=1870
- _MUTATEROWSRESPONSE._serialized_end=2013
- _MUTATEROWSRESPONSE_ENTRY._serialized_start=1955
- _MUTATEROWSRESPONSE_ENTRY._serialized_end=2013
- _CHECKANDMUTATEROWREQUEST._serialized_start=2016
- _CHECKANDMUTATEROWREQUEST._serialized_end=2318
- _CHECKANDMUTATEROWRESPONSE._serialized_start=2320
- _CHECKANDMUTATEROWRESPONSE._serialized_end=2374
- _PINGANDWARMREQUEST._serialized_start=2376
- _PINGANDWARMREQUEST._serialized_end=2481
- _PINGANDWARMRESPONSE._serialized_start=2483
- _PINGANDWARMRESPONSE._serialized_end=2504
- _READMODIFYWRITEROWREQUEST._serialized_start=2507
- _READMODIFYWRITEROWREQUEST._serialized_end=2705
- _READMODIFYWRITEROWRESPONSE._serialized_start=2707
- _READMODIFYWRITEROWRESPONSE._serialized_end=2773
- _GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST._serialized_start=2776
- _GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST._serialized_end=2910
- _GENERATEINITIALCHANGESTREAMPARTITIONSRESPONSE._serialized_start=2912
- _GENERATEINITIALCHANGESTREAMPARTITIONSRESPONSE._serialized_end=3015
- _READCHANGESTREAMREQUEST._serialized_start=3018
- _READCHANGESTREAMREQUEST._serialized_end=3429
- _READCHANGESTREAMRESPONSE._serialized_start=3432
- _READCHANGESTREAMRESPONSE._serialized_end=4691
- _READCHANGESTREAMRESPONSE_MUTATIONCHUNK._serialized_start=3700
- _READCHANGESTREAMRESPONSE_MUTATIONCHUNK._serialized_end=3944
- _READCHANGESTREAMRESPONSE_MUTATIONCHUNK_CHUNKINFO._serialized_start=3855
- _READCHANGESTREAMRESPONSE_MUTATIONCHUNK_CHUNKINFO._serialized_end=3944
- _READCHANGESTREAMRESPONSE_DATACHANGE._serialized_start=3947
- _READCHANGESTREAMRESPONSE_DATACHANGE._serialized_end=4401
- _READCHANGESTREAMRESPONSE_DATACHANGE_TYPE._serialized_start=4321
- _READCHANGESTREAMRESPONSE_DATACHANGE_TYPE._serialized_end=4401
- _READCHANGESTREAMRESPONSE_HEARTBEAT._serialized_start=4404
- _READCHANGESTREAMRESPONSE_HEARTBEAT._serialized_end=4549
- _READCHANGESTREAMRESPONSE_CLOSESTREAM._serialized_start=4551
- _READCHANGESTREAMRESPONSE_CLOSESTREAM._serialized_end=4674
- _BIGTABLE._serialized_start=4694
- _BIGTABLE._serialized_end=7853
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.bigtable.v2.bigtable_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ _globals['DESCRIPTOR']._loaded_options = None
+ _globals['DESCRIPTOR']._serialized_options = b'\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z8cloud.google.com/go/bigtable/apiv2/bigtablepb;bigtablepb\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2\352AP\n%bigtableadmin.googleapis.com/Instance\022\'projects/{project}/instances/{instance}\352A\\\n\"bigtableadmin.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}\352A\207\001\n+bigtableadmin.googleapis.com/AuthorizedView\022Xprojects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}\352A~\n-bigtableadmin.googleapis.com/MaterializedView\022Mprojects/{project}/instances/{instance}/materializedViews/{materialized_view}'
+ _globals['_READROWSREQUEST'].fields_by_name['table_name']._loaded_options = None
+ _globals['_READROWSREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\001\372A$\n\"bigtableadmin.googleapis.com/Table'
+ _globals['_READROWSREQUEST'].fields_by_name['authorized_view_name']._loaded_options = None
+ _globals['_READROWSREQUEST'].fields_by_name['authorized_view_name']._serialized_options = b'\340A\001\372A-\n+bigtableadmin.googleapis.com/AuthorizedView'
+ _globals['_READROWSREQUEST'].fields_by_name['materialized_view_name']._loaded_options = None
+ _globals['_READROWSREQUEST'].fields_by_name['materialized_view_name']._serialized_options = b'\340A\001\372A/\n-bigtableadmin.googleapis.com/MaterializedView'
+ _globals['_SAMPLEROWKEYSREQUEST'].fields_by_name['table_name']._loaded_options = None
+ _globals['_SAMPLEROWKEYSREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\001\372A$\n\"bigtableadmin.googleapis.com/Table'
+ _globals['_SAMPLEROWKEYSREQUEST'].fields_by_name['authorized_view_name']._loaded_options = None
+ _globals['_SAMPLEROWKEYSREQUEST'].fields_by_name['authorized_view_name']._serialized_options = b'\340A\001\372A-\n+bigtableadmin.googleapis.com/AuthorizedView'
+ _globals['_SAMPLEROWKEYSREQUEST'].fields_by_name['materialized_view_name']._loaded_options = None
+ _globals['_SAMPLEROWKEYSREQUEST'].fields_by_name['materialized_view_name']._serialized_options = b'\340A\001\372A/\n-bigtableadmin.googleapis.com/MaterializedView'
+ _globals['_MUTATEROWREQUEST'].fields_by_name['table_name']._loaded_options = None
+ _globals['_MUTATEROWREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\001\372A$\n\"bigtableadmin.googleapis.com/Table'
+ _globals['_MUTATEROWREQUEST'].fields_by_name['authorized_view_name']._loaded_options = None
+ _globals['_MUTATEROWREQUEST'].fields_by_name['authorized_view_name']._serialized_options = b'\340A\001\372A-\n+bigtableadmin.googleapis.com/AuthorizedView'
+ _globals['_MUTATEROWREQUEST'].fields_by_name['row_key']._loaded_options = None
+ _globals['_MUTATEROWREQUEST'].fields_by_name['row_key']._serialized_options = b'\340A\002'
+ _globals['_MUTATEROWREQUEST'].fields_by_name['mutations']._loaded_options = None
+ _globals['_MUTATEROWREQUEST'].fields_by_name['mutations']._serialized_options = b'\340A\002'
+ _globals['_MUTATEROWSREQUEST_ENTRY'].fields_by_name['mutations']._loaded_options = None
+ _globals['_MUTATEROWSREQUEST_ENTRY'].fields_by_name['mutations']._serialized_options = b'\340A\002'
+ _globals['_MUTATEROWSREQUEST'].fields_by_name['table_name']._loaded_options = None
+ _globals['_MUTATEROWSREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\001\372A$\n\"bigtableadmin.googleapis.com/Table'
+ _globals['_MUTATEROWSREQUEST'].fields_by_name['authorized_view_name']._loaded_options = None
+ _globals['_MUTATEROWSREQUEST'].fields_by_name['authorized_view_name']._serialized_options = b'\340A\001\372A-\n+bigtableadmin.googleapis.com/AuthorizedView'
+ _globals['_MUTATEROWSREQUEST'].fields_by_name['entries']._loaded_options = None
+ _globals['_MUTATEROWSREQUEST'].fields_by_name['entries']._serialized_options = b'\340A\002'
+ _globals['_CHECKANDMUTATEROWREQUEST'].fields_by_name['table_name']._loaded_options = None
+ _globals['_CHECKANDMUTATEROWREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\001\372A$\n\"bigtableadmin.googleapis.com/Table'
+ _globals['_CHECKANDMUTATEROWREQUEST'].fields_by_name['authorized_view_name']._loaded_options = None
+ _globals['_CHECKANDMUTATEROWREQUEST'].fields_by_name['authorized_view_name']._serialized_options = b'\340A\001\372A-\n+bigtableadmin.googleapis.com/AuthorizedView'
+ _globals['_CHECKANDMUTATEROWREQUEST'].fields_by_name['row_key']._loaded_options = None
+ _globals['_CHECKANDMUTATEROWREQUEST'].fields_by_name['row_key']._serialized_options = b'\340A\002'
+ _globals['_PINGANDWARMREQUEST'].fields_by_name['name']._loaded_options = None
+ _globals['_PINGANDWARMREQUEST'].fields_by_name['name']._serialized_options = b'\340A\002\372A\'\n%bigtableadmin.googleapis.com/Instance'
+ _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['table_name']._loaded_options = None
+ _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\001\372A$\n\"bigtableadmin.googleapis.com/Table'
+ _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['authorized_view_name']._loaded_options = None
+ _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['authorized_view_name']._serialized_options = b'\340A\001\372A-\n+bigtableadmin.googleapis.com/AuthorizedView'
+ _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['row_key']._loaded_options = None
+ _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['row_key']._serialized_options = b'\340A\002'
+ _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['rules']._loaded_options = None
+ _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['rules']._serialized_options = b'\340A\002'
+ _globals['_GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST'].fields_by_name['table_name']._loaded_options = None
+ _globals['_GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table'
+ _globals['_READCHANGESTREAMREQUEST'].fields_by_name['table_name']._loaded_options = None
+ _globals['_READCHANGESTREAMREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table'
+ _globals['_EXECUTEQUERYREQUEST_PARAMSENTRY']._loaded_options = None
+ _globals['_EXECUTEQUERYREQUEST_PARAMSENTRY']._serialized_options = b'8\001'
+ _globals['_EXECUTEQUERYREQUEST'].fields_by_name['instance_name']._loaded_options = None
+ _globals['_EXECUTEQUERYREQUEST'].fields_by_name['instance_name']._serialized_options = b'\340A\002\372A\'\n%bigtableadmin.googleapis.com/Instance'
+ _globals['_EXECUTEQUERYREQUEST'].fields_by_name['app_profile_id']._loaded_options = None
+ _globals['_EXECUTEQUERYREQUEST'].fields_by_name['app_profile_id']._serialized_options = b'\340A\001'
+ _globals['_EXECUTEQUERYREQUEST'].fields_by_name['query']._loaded_options = None
+ _globals['_EXECUTEQUERYREQUEST'].fields_by_name['query']._serialized_options = b'\030\001\340A\002'
+ _globals['_EXECUTEQUERYREQUEST'].fields_by_name['proto_format']._loaded_options = None
+ _globals['_EXECUTEQUERYREQUEST'].fields_by_name['proto_format']._serialized_options = b'\030\001'
+ _globals['_EXECUTEQUERYREQUEST'].fields_by_name['resume_token']._loaded_options = None
+ _globals['_EXECUTEQUERYREQUEST'].fields_by_name['resume_token']._serialized_options = b'\340A\001'
+ _globals['_EXECUTEQUERYREQUEST'].fields_by_name['params']._loaded_options = None
+ _globals['_EXECUTEQUERYREQUEST'].fields_by_name['params']._serialized_options = b'\340A\002'
+ _globals['_PREPAREQUERYREQUEST_PARAMTYPESENTRY']._loaded_options = None
+ _globals['_PREPAREQUERYREQUEST_PARAMTYPESENTRY']._serialized_options = b'8\001'
+ _globals['_PREPAREQUERYREQUEST'].fields_by_name['instance_name']._loaded_options = None
+ _globals['_PREPAREQUERYREQUEST'].fields_by_name['instance_name']._serialized_options = b'\340A\002\372A\'\n%bigtableadmin.googleapis.com/Instance'
+ _globals['_PREPAREQUERYREQUEST'].fields_by_name['app_profile_id']._loaded_options = None
+ _globals['_PREPAREQUERYREQUEST'].fields_by_name['app_profile_id']._serialized_options = b'\340A\001'
+ _globals['_PREPAREQUERYREQUEST'].fields_by_name['query']._loaded_options = None
+ _globals['_PREPAREQUERYREQUEST'].fields_by_name['query']._serialized_options = b'\340A\002'
+ _globals['_PREPAREQUERYREQUEST'].fields_by_name['param_types']._loaded_options = None
+ _globals['_PREPAREQUERYREQUEST'].fields_by_name['param_types']._serialized_options = b'\340A\002'
+ _globals['_BIGTABLE']._loaded_options = None
+ _globals['_BIGTABLE']._serialized_options = b'\312A\027bigtable.googleapis.com\322A\275\002https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only'
+ _globals['_BIGTABLE'].methods_by_name['ReadRows']._loaded_options = None
+ _globals['_BIGTABLE'].methods_by_name['ReadRows']._serialized_options = b'\332A\ntable_name\332A\031table_name,app_profile_id\202\323\344\223\002\232\001\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*ZZ\"U/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readRows:\001*\212\323\344\223\002\260\001\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\022`\n\024authorized_view_name\022H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}'
+ _globals['_BIGTABLE'].methods_by_name['SampleRowKeys']._loaded_options = None
+ _globals['_BIGTABLE'].methods_by_name['SampleRowKeys']._serialized_options = b'\332A\ntable_name\332A\031table_name,app_profile_id\202\323\344\223\002\236\001\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeysZ\\\022Z/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:sampleRowKeys\212\323\344\223\002\260\001\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\022`\n\024authorized_view_name\022H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}'
+ _globals['_BIGTABLE'].methods_by_name['MutateRow']._loaded_options = None
+ _globals['_BIGTABLE'].methods_by_name['MutateRow']._serialized_options = b'\332A\034table_name,row_key,mutations\332A+table_name,row_key,mutations,app_profile_id\202\323\344\223\002\234\001\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*Z[\"V/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRow:\001*\212\323\344\223\002\260\001\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\022`\n\024authorized_view_name\022H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}'
+ _globals['_BIGTABLE'].methods_by_name['MutateRows']._loaded_options = None
+ _globals['_BIGTABLE'].methods_by_name['MutateRows']._serialized_options = b'\332A\022table_name,entries\332A!table_name,entries,app_profile_id\202\323\344\223\002\236\001\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*Z\\\"W/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRows:\001*\212\323\344\223\002\260\001\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\022`\n\024authorized_view_name\022H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}'
+ _globals['_BIGTABLE'].methods_by_name['CheckAndMutateRow']._loaded_options = None
+ _globals['_BIGTABLE'].methods_by_name['CheckAndMutateRow']._serialized_options = b'\332ABtable_name,row_key,predicate_filter,true_mutations,false_mutations\332AQtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id\202\323\344\223\002\254\001\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*Zc\"^/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:checkAndMutateRow:\001*\212\323\344\223\002\260\001\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\022`\n\024authorized_view_name\022H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}'
+ _globals['_BIGTABLE'].methods_by_name['PingAndWarm']._loaded_options = None
+ _globals['_BIGTABLE'].methods_by_name['PingAndWarm']._serialized_options = b'\332A\004name\332A\023name,app_profile_id\202\323\344\223\002+\"&/v2/{name=projects/*/instances/*}:ping:\001*\212\323\344\223\0029\022%\n\004name\022\035{name=projects/*/instances/*}\022\020\n\016app_profile_id'
+ _globals['_BIGTABLE'].methods_by_name['ReadModifyWriteRow']._loaded_options = None
+ _globals['_BIGTABLE'].methods_by_name['ReadModifyWriteRow']._serialized_options = b'\332A\030table_name,row_key,rules\332A\'table_name,row_key,rules,app_profile_id\202\323\344\223\002\256\001\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*Zd\"_/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readModifyWriteRow:\001*\212\323\344\223\002\260\001\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\022`\n\024authorized_view_name\022H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}'
+ _globals['_BIGTABLE'].methods_by_name['GenerateInitialChangeStreamPartitions']._loaded_options = None
+ _globals['_BIGTABLE'].methods_by_name['GenerateInitialChangeStreamPartitions']._serialized_options = b'\332A\ntable_name\332A\031table_name,app_profile_id\202\323\344\223\002[\"V/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions:\001*'
+ _globals['_BIGTABLE'].methods_by_name['ReadChangeStream']._loaded_options = None
+ _globals['_BIGTABLE'].methods_by_name['ReadChangeStream']._serialized_options = b'\332A\ntable_name\332A\031table_name,app_profile_id\202\323\344\223\002F\"A/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream:\001*'
+ _globals['_BIGTABLE'].methods_by_name['PrepareQuery']._loaded_options = None
+ _globals['_BIGTABLE'].methods_by_name['PrepareQuery']._serialized_options = b'\332A\023instance_name,query\332A\"instance_name,query,app_profile_id\202\323\344\223\002<\"7/v2/{instance_name=projects/*/instances/*}:prepareQuery:\001*\212\323\344\223\002B\022.\n\rinstance_name\022\035{name=projects/*/instances/*}\022\020\n\016app_profile_id'
+ _globals['_BIGTABLE'].methods_by_name['ExecuteQuery']._loaded_options = None
+ _globals['_BIGTABLE'].methods_by_name['ExecuteQuery']._serialized_options = b'\332A\023instance_name,query\332A\"instance_name,query,app_profile_id\202\323\344\223\002<\"7/v2/{instance_name=projects/*/instances/*}:executeQuery:\001*\212\323\344\223\002B\022.\n\rinstance_name\022\035{name=projects/*/instances/*}\022\020\n\016app_profile_id'
+ _globals['_READROWSREQUEST']._serialized_start=424
+ _globals['_READROWSREQUEST']._serialized_end=1012
+ _globals['_READROWSREQUEST_REQUESTSTATSVIEW']._serialized_start=910
+ _globals['_READROWSREQUEST_REQUESTSTATSVIEW']._serialized_end=1012
+ _globals['_READROWSRESPONSE']._serialized_start=1015
+ _globals['_READROWSRESPONSE']._serialized_end=1448
+ _globals['_READROWSRESPONSE_CELLCHUNK']._serialized_start=1187
+ _globals['_READROWSRESPONSE_CELLCHUNK']._serialized_end=1448
+ _globals['_SAMPLEROWKEYSREQUEST']._serialized_start=1451
+ _globals['_SAMPLEROWKEYSREQUEST']._serialized_end=1731
+ _globals['_SAMPLEROWKEYSRESPONSE']._serialized_start=1733
+ _globals['_SAMPLEROWKEYSRESPONSE']._serialized_end=1795
+ _globals['_MUTATEROWREQUEST']._serialized_start=1798
+ _globals['_MUTATEROWREQUEST']._serialized_end=2063
+ _globals['_MUTATEROWRESPONSE']._serialized_start=2065
+ _globals['_MUTATEROWRESPONSE']._serialized_end=2084
+ _globals['_MUTATEROWSREQUEST']._serialized_start=2087
+ _globals['_MUTATEROWSREQUEST']._serialized_end=2424
+ _globals['_MUTATEROWSREQUEST_ENTRY']._serialized_start=2346
+ _globals['_MUTATEROWSREQUEST_ENTRY']._serialized_end=2424
+ _globals['_MUTATEROWSRESPONSE']._serialized_start=2427
+ _globals['_MUTATEROWSRESPONSE']._serialized_end=2655
+ _globals['_MUTATEROWSRESPONSE_ENTRY']._serialized_start=2577
+ _globals['_MUTATEROWSRESPONSE_ENTRY']._serialized_end=2635
+ _globals['_RATELIMITINFO']._serialized_start=2657
+ _globals['_RATELIMITINFO']._serialized_end=2731
+ _globals['_CHECKANDMUTATEROWREQUEST']._serialized_start=2734
+ _globals['_CHECKANDMUTATEROWREQUEST']._serialized_end=3119
+ _globals['_CHECKANDMUTATEROWRESPONSE']._serialized_start=3121
+ _globals['_CHECKANDMUTATEROWRESPONSE']._serialized_end=3175
+ _globals['_PINGANDWARMREQUEST']._serialized_start=3177
+ _globals['_PINGANDWARMREQUEST']._serialized_end=3282
+ _globals['_PINGANDWARMRESPONSE']._serialized_start=3284
+ _globals['_PINGANDWARMRESPONSE']._serialized_end=3305
+ _globals['_READMODIFYWRITEROWREQUEST']._serialized_start=3308
+ _globals['_READMODIFYWRITEROWREQUEST']._serialized_end=3589
+ _globals['_READMODIFYWRITEROWRESPONSE']._serialized_start=3591
+ _globals['_READMODIFYWRITEROWRESPONSE']._serialized_end=3657
+ _globals['_GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST']._serialized_start=3660
+ _globals['_GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST']._serialized_end=3794
+ _globals['_GENERATEINITIALCHANGESTREAMPARTITIONSRESPONSE']._serialized_start=3796
+ _globals['_GENERATEINITIALCHANGESTREAMPARTITIONSRESPONSE']._serialized_end=3899
+ _globals['_READCHANGESTREAMREQUEST']._serialized_start=3902
+ _globals['_READCHANGESTREAMREQUEST']._serialized_end=4313
+ _globals['_READCHANGESTREAMRESPONSE']._serialized_start=4316
+ _globals['_READCHANGESTREAMRESPONSE']._serialized_end=5637
+ _globals['_READCHANGESTREAMRESPONSE_MUTATIONCHUNK']._serialized_start=4584
+ _globals['_READCHANGESTREAMRESPONSE_MUTATIONCHUNK']._serialized_end=4828
+ _globals['_READCHANGESTREAMRESPONSE_MUTATIONCHUNK_CHUNKINFO']._serialized_start=4739
+ _globals['_READCHANGESTREAMRESPONSE_MUTATIONCHUNK_CHUNKINFO']._serialized_end=4828
+ _globals['_READCHANGESTREAMRESPONSE_DATACHANGE']._serialized_start=4831
+ _globals['_READCHANGESTREAMRESPONSE_DATACHANGE']._serialized_end=5285
+ _globals['_READCHANGESTREAMRESPONSE_DATACHANGE_TYPE']._serialized_start=5205
+ _globals['_READCHANGESTREAMRESPONSE_DATACHANGE_TYPE']._serialized_end=5285
+ _globals['_READCHANGESTREAMRESPONSE_HEARTBEAT']._serialized_start=5288
+ _globals['_READCHANGESTREAMRESPONSE_HEARTBEAT']._serialized_end=5433
+ _globals['_READCHANGESTREAMRESPONSE_CLOSESTREAM']._serialized_start=5436
+ _globals['_READCHANGESTREAMRESPONSE_CLOSESTREAM']._serialized_end=5620
+ _globals['_EXECUTEQUERYREQUEST']._serialized_start=5640
+ _globals['_EXECUTEQUERYREQUEST']._serialized_end=6057
+ _globals['_EXECUTEQUERYREQUEST_PARAMSENTRY']._serialized_start=5970
+ _globals['_EXECUTEQUERYREQUEST_PARAMSENTRY']._serialized_end=6042
+ _globals['_EXECUTEQUERYRESPONSE']._serialized_start=6060
+ _globals['_EXECUTEQUERYRESPONSE']._serialized_end=6210
+ _globals['_PREPAREQUERYREQUEST']._serialized_start=6213
+ _globals['_PREPAREQUERYREQUEST']._serialized_end=6585
+ _globals['_PREPAREQUERYREQUEST_PARAMTYPESENTRY']._serialized_start=6495
+ _globals['_PREPAREQUERYREQUEST_PARAMTYPESENTRY']._serialized_end=6570
+ _globals['_PREPAREQUERYRESPONSE']._serialized_start=6588
+ _globals['_PREPAREQUERYRESPONSE']._serialized_end=6740
+ _globals['_BIGTABLE']._serialized_start=6743
+ _globals['_BIGTABLE']._serialized_end=11680
# @@protoc_insertion_point(module_scope)
diff --git a/test_proxy/protos/bigtable_pb2_grpc.py b/test_proxy/protos/bigtable_pb2_grpc.py
index 9ce87d869..ef4e5bed6 100644
--- a/test_proxy/protos/bigtable_pb2_grpc.py
+++ b/test_proxy/protos/bigtable_pb2_grpc.py
@@ -1,9 +1,29 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
+import warnings
import bigtable_pb2 as google_dot_bigtable_dot_v2_dot_bigtable__pb2
+GRPC_GENERATED_VERSION = '1.70.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in google/bigtable/v2/bigtable_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
+
class BigtableStub(object):
"""Service for reading from and writing to existing Bigtable tables.
@@ -19,47 +39,57 @@ def __init__(self, channel):
'/google.bigtable.v2.Bigtable/ReadRows',
request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsResponse.FromString,
- )
+ _registered_method=True)
self.SampleRowKeys = channel.unary_stream(
'/google.bigtable.v2.Bigtable/SampleRowKeys',
request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysResponse.FromString,
- )
+ _registered_method=True)
self.MutateRow = channel.unary_unary(
'/google.bigtable.v2.Bigtable/MutateRow',
request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowResponse.FromString,
- )
+ _registered_method=True)
self.MutateRows = channel.unary_stream(
'/google.bigtable.v2.Bigtable/MutateRows',
request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsResponse.FromString,
- )
+ _registered_method=True)
self.CheckAndMutateRow = channel.unary_unary(
'/google.bigtable.v2.Bigtable/CheckAndMutateRow',
request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString,
- )
+ _registered_method=True)
self.PingAndWarm = channel.unary_unary(
'/google.bigtable.v2.Bigtable/PingAndWarm',
request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmResponse.FromString,
- )
+ _registered_method=True)
self.ReadModifyWriteRow = channel.unary_unary(
'/google.bigtable.v2.Bigtable/ReadModifyWriteRow',
request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString,
- )
+ _registered_method=True)
self.GenerateInitialChangeStreamPartitions = channel.unary_stream(
'/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions',
request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsResponse.FromString,
- )
+ _registered_method=True)
self.ReadChangeStream = channel.unary_stream(
'/google.bigtable.v2.Bigtable/ReadChangeStream',
request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamResponse.FromString,
- )
+ _registered_method=True)
+ self.PrepareQuery = channel.unary_unary(
+ '/google.bigtable.v2.Bigtable/PrepareQuery',
+ request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PrepareQueryRequest.SerializeToString,
+ response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PrepareQueryResponse.FromString,
+ _registered_method=True)
+ self.ExecuteQuery = channel.unary_stream(
+ '/google.bigtable.v2.Bigtable/ExecuteQuery',
+ request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ExecuteQueryRequest.SerializeToString,
+ response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ExecuteQueryResponse.FromString,
+ _registered_method=True)
class BigtableServicer(object):
@@ -150,6 +180,20 @@ def ReadChangeStream(self, request, context):
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
+ def PrepareQuery(self, request, context):
+ """Prepares a GoogleSQL query for execution on a particular Bigtable instance.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def ExecuteQuery(self, request, context):
+ """Executes a SQL query against a particular Bigtable instance.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
def add_BigtableServicer_to_server(servicer, server):
rpc_method_handlers = {
@@ -198,10 +242,21 @@ def add_BigtableServicer_to_server(servicer, server):
request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamRequest.FromString,
response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamResponse.SerializeToString,
),
+ 'PrepareQuery': grpc.unary_unary_rpc_method_handler(
+ servicer.PrepareQuery,
+ request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PrepareQueryRequest.FromString,
+ response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PrepareQueryResponse.SerializeToString,
+ ),
+ 'ExecuteQuery': grpc.unary_stream_rpc_method_handler(
+ servicer.ExecuteQuery,
+ request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ExecuteQueryRequest.FromString,
+ response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ExecuteQueryResponse.SerializeToString,
+ ),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.bigtable.v2.Bigtable', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
+ server.add_registered_method_handlers('google.bigtable.v2.Bigtable', rpc_method_handlers)
# This class is part of an EXPERIMENTAL API.
@@ -220,11 +275,21 @@ def ReadRows(request,
wait_for_ready=None,
timeout=None,
metadata=None):
- return grpc.experimental.unary_stream(request, target, '/google.bigtable.v2.Bigtable/ReadRows',
+ return grpc.experimental.unary_stream(
+ request,
+ target,
+ '/google.bigtable.v2.Bigtable/ReadRows',
google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsRequest.SerializeToString,
google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsResponse.FromString,
- options, channel_credentials,
- insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
@staticmethod
def SampleRowKeys(request,
@@ -237,11 +302,21 @@ def SampleRowKeys(request,
wait_for_ready=None,
timeout=None,
metadata=None):
- return grpc.experimental.unary_stream(request, target, '/google.bigtable.v2.Bigtable/SampleRowKeys',
+ return grpc.experimental.unary_stream(
+ request,
+ target,
+ '/google.bigtable.v2.Bigtable/SampleRowKeys',
google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString,
google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysResponse.FromString,
- options, channel_credentials,
- insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
@staticmethod
def MutateRow(request,
@@ -254,11 +329,21 @@ def MutateRow(request,
wait_for_ready=None,
timeout=None,
metadata=None):
- return grpc.experimental.unary_unary(request, target, '/google.bigtable.v2.Bigtable/MutateRow',
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/google.bigtable.v2.Bigtable/MutateRow',
google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowRequest.SerializeToString,
google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowResponse.FromString,
- options, channel_credentials,
- insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
@staticmethod
def MutateRows(request,
@@ -271,11 +356,21 @@ def MutateRows(request,
wait_for_ready=None,
timeout=None,
metadata=None):
- return grpc.experimental.unary_stream(request, target, '/google.bigtable.v2.Bigtable/MutateRows',
+ return grpc.experimental.unary_stream(
+ request,
+ target,
+ '/google.bigtable.v2.Bigtable/MutateRows',
google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsRequest.SerializeToString,
google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsResponse.FromString,
- options, channel_credentials,
- insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
@staticmethod
def CheckAndMutateRow(request,
@@ -288,11 +383,21 @@ def CheckAndMutateRow(request,
wait_for_ready=None,
timeout=None,
metadata=None):
- return grpc.experimental.unary_unary(request, target, '/google.bigtable.v2.Bigtable/CheckAndMutateRow',
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/google.bigtable.v2.Bigtable/CheckAndMutateRow',
google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString,
google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString,
- options, channel_credentials,
- insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
@staticmethod
def PingAndWarm(request,
@@ -305,11 +410,21 @@ def PingAndWarm(request,
wait_for_ready=None,
timeout=None,
metadata=None):
- return grpc.experimental.unary_unary(request, target, '/google.bigtable.v2.Bigtable/PingAndWarm',
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/google.bigtable.v2.Bigtable/PingAndWarm',
google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmRequest.SerializeToString,
google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmResponse.FromString,
- options, channel_credentials,
- insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
@staticmethod
def ReadModifyWriteRow(request,
@@ -322,11 +437,21 @@ def ReadModifyWriteRow(request,
wait_for_ready=None,
timeout=None,
metadata=None):
- return grpc.experimental.unary_unary(request, target, '/google.bigtable.v2.Bigtable/ReadModifyWriteRow',
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/google.bigtable.v2.Bigtable/ReadModifyWriteRow',
google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString,
google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString,
- options, channel_credentials,
- insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
@staticmethod
def GenerateInitialChangeStreamPartitions(request,
@@ -339,11 +464,21 @@ def GenerateInitialChangeStreamPartitions(request,
wait_for_ready=None,
timeout=None,
metadata=None):
- return grpc.experimental.unary_stream(request, target, '/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions',
+ return grpc.experimental.unary_stream(
+ request,
+ target,
+ '/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions',
google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsRequest.SerializeToString,
google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsResponse.FromString,
- options, channel_credentials,
- insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
@staticmethod
def ReadChangeStream(request,
@@ -356,8 +491,72 @@ def ReadChangeStream(request,
wait_for_ready=None,
timeout=None,
metadata=None):
- return grpc.experimental.unary_stream(request, target, '/google.bigtable.v2.Bigtable/ReadChangeStream',
+ return grpc.experimental.unary_stream(
+ request,
+ target,
+ '/google.bigtable.v2.Bigtable/ReadChangeStream',
google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamRequest.SerializeToString,
google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamResponse.FromString,
- options, channel_credentials,
- insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def PrepareQuery(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/google.bigtable.v2.Bigtable/PrepareQuery',
+ google_dot_bigtable_dot_v2_dot_bigtable__pb2.PrepareQueryRequest.SerializeToString,
+ google_dot_bigtable_dot_v2_dot_bigtable__pb2.PrepareQueryResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def ExecuteQuery(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_stream(
+ request,
+ target,
+ '/google.bigtable.v2.Bigtable/ExecuteQuery',
+ google_dot_bigtable_dot_v2_dot_bigtable__pb2.ExecuteQueryRequest.SerializeToString,
+ google_dot_bigtable_dot_v2_dot_bigtable__pb2.ExecuteQueryResponse.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
diff --git a/test_proxy/protos/data_pb2.py b/test_proxy/protos/data_pb2.py
index fff212034..8b6e68df1 100644
--- a/test_proxy/protos/data_pb2.py
+++ b/test_proxy/protos/data_pb2.py
@@ -1,68 +1,105 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
# source: google/bigtable/v2/data.proto
+# Protobuf Python Version: 5.29.0
"""Generated protocol buffer code."""
-from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 29,
+ 0,
+ '',
+ 'google/bigtable/v2/data.proto'
+)
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
+from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
+import types_pb2 as google_dot_bigtable_dot_v2_dot_types__pb2
+from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
+from google.type import date_pb2 as google_dot_type_dot_date__pb2
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1dgoogle/bigtable/v2/data.proto\x12\x12google.bigtable.v2\"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family\"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column\"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell\"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t\"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key\"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange\"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier\"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03\"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value\"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12\"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32\".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter\"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32\".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation\"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04rule\"B\n\x0fStreamPartition\x12/\n\trow_range\x18\x01 \x01(\x0b\x32\x1c.google.bigtable.v2.RowRange\"W\n\x18StreamContinuationTokens\x12;\n\x06tokens\x18\x01 \x03(\x0b\x32+.google.bigtable.v2.StreamContinuationToken\"`\n\x17StreamContinuationToken\x12\x36\n\tpartition\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\x12\r\n\x05token\x18\x02 \x01(\tB\xb5\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3')
-
-_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
-_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.bigtable.v2.data_pb2', globals())
-if _descriptor._USE_C_DESCRIPTORS == False:
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1dgoogle/bigtable/v2/data.proto\x12\x12google.bigtable.v2\x1a\x1fgoogle/api/field_behavior.proto\x1a\x1egoogle/bigtable/v2/types.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x16google/type/date.proto\"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family\"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column\"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell\"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t\"\xf4\x02\n\x05Value\x12&\n\x04type\x18\x07 \x01(\x0b\x32\x18.google.bigtable.v2.Type\x12\x13\n\traw_value\x18\x08 \x01(\x0cH\x00\x12\x1e\n\x14raw_timestamp_micros\x18\t \x01(\x03H\x00\x12\x15\n\x0b\x62ytes_value\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0cstring_value\x18\x03 \x01(\tH\x00\x12\x13\n\tint_value\x18\x06 \x01(\x03H\x00\x12\x14\n\nbool_value\x18\n \x01(\x08H\x00\x12\x15\n\x0b\x66loat_value\x18\x0b \x01(\x01H\x00\x12\x35\n\x0ftimestamp_value\x18\x0c \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\'\n\ndate_value\x18\r \x01(\x0b\x32\x11.google.type.DateH\x00\x12\x35\n\x0b\x61rray_value\x18\x04 \x01(\x0b\x32\x1e.google.bigtable.v2.ArrayValueH\x00\x42\x06\n\x04kind\"7\n\nArrayValue\x12)\n\x06values\x18\x01 \x03(\x0b\x32\x19.google.bigtable.v2.Value\"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key\"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange\"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier\"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03\"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value\"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12\"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32\".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter\"\xad\x08\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12=\n\x0b\x61\x64\x64_to_cell\x18\x05 \x01(\x0b\x32&.google.bigtable.v2.Mutation.AddToCellH\x00\x12\x41\n\rmerge_to_cell\x18\x06 \x01(\x0b\x32(.google.bigtable.v2.Mutation.MergeToCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1a\xad\x01\n\tAddToCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x33\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0b\x32\x19.google.bigtable.v2.Value\x12,\n\ttimestamp\x18\x03 \x01(\x0b\x32\x19.google.bigtable.v2.Value\x12(\n\x05input\x18\x04 \x01(\x0b\x32\x19.google.bigtable.v2.Value\x1a\xaf\x01\n\x0bMergeToCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x33\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0b\x32\x19.google.bigtable.v2.Value\x12,\n\ttimestamp\x18\x03 \x01(\x0b\x32\x19.google.bigtable.v2.Value\x12(\n\x05input\x18\x04 \x01(\x0b\x32\x19.google.bigtable.v2.Value\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32\".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation\"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04rule\"B\n\x0fStreamPartition\x12/\n\trow_range\x18\x01 \x01(\x0b\x32\x1c.google.bigtable.v2.RowRange\"W\n\x18StreamContinuationTokens\x12;\n\x06tokens\x18\x01 \x03(\x0b\x32+.google.bigtable.v2.StreamContinuationToken\"`\n\x17StreamContinuationToken\x12\x36\n\tpartition\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\x12\r\n\x05token\x18\x02 \x01(\t\"\r\n\x0bProtoFormat\"F\n\x0e\x43olumnMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x04type\x18\x02 \x01(\x0b\x32\x18.google.bigtable.v2.Type\"B\n\x0bProtoSchema\x12\x33\n\x07\x63olumns\x18\x01 \x03(\x0b\x32\".google.bigtable.v2.ColumnMetadata\"V\n\x11ResultSetMetadata\x12\x37\n\x0cproto_schema\x18\x01 \x01(\x0b\x32\x1f.google.bigtable.v2.ProtoSchemaH\x00\x42\x08\n\x06schema\"6\n\tProtoRows\x12)\n\x06values\x18\x02 \x03(\x0b\x32\x19.google.bigtable.v2.Value\"$\n\x0eProtoRowsBatch\x12\x12\n\nbatch_data\x18\x01 \x01(\x0c\"\xd5\x01\n\x10PartialResultSet\x12>\n\x10proto_rows_batch\x18\x03 \x01(\x0b\x32\".google.bigtable.v2.ProtoRowsBatchH\x00\x12\x1b\n\x0e\x62\x61tch_checksum\x18\x06 \x01(\rH\x01\x88\x01\x01\x12\x14\n\x0cresume_token\x18\x05 \x01(\x0c\x12\r\n\x05reset\x18\x07 \x01(\x08\x12\x1c\n\x14\x65stimated_batch_size\x18\x04 \x01(\x05\x42\x0e\n\x0cpartial_rowsB\x11\n\x0f_batch_checksumB\xb3\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z8cloud.google.com/go/bigtable/apiv2/bigtablepb;bigtablepb\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3')
- DESCRIPTOR._options = None
- DESCRIPTOR._serialized_options = b'\n\026com.google.bigtable.v2B\tDataProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2'
- _ROW._serialized_start=53
- _ROW._serialized_end=117
- _FAMILY._serialized_start=119
- _FAMILY._serialized_end=186
- _COLUMN._serialized_start=188
- _COLUMN._serialized_end=256
- _CELL._serialized_start=258
- _CELL._serialized_end=321
- _ROWRANGE._serialized_start=324
- _ROWRANGE._serialized_end=462
- _ROWSET._serialized_start=464
- _ROWSET._serialized_end=540
- _COLUMNRANGE._serialized_start=543
- _COLUMNRANGE._serialized_end=741
- _TIMESTAMPRANGE._serialized_start=743
- _TIMESTAMPRANGE._serialized_end=821
- _VALUERANGE._serialized_start=824
- _VALUERANGE._serialized_end=976
- _ROWFILTER._serialized_start=979
- _ROWFILTER._serialized_end=2098
- _ROWFILTER_CHAIN._serialized_start=1795
- _ROWFILTER_CHAIN._serialized_end=1850
- _ROWFILTER_INTERLEAVE._serialized_start=1852
- _ROWFILTER_INTERLEAVE._serialized_end=1912
- _ROWFILTER_CONDITION._serialized_start=1915
- _ROWFILTER_CONDITION._serialized_end=2088
- _MUTATION._serialized_start=2101
- _MUTATION._serialized_end=2686
- _MUTATION_SETCELL._serialized_start=2396
- _MUTATION_SETCELL._serialized_end=2493
- _MUTATION_DELETEFROMCOLUMN._serialized_start=2495
- _MUTATION_DELETEFROMCOLUMN._serialized_end=2616
- _MUTATION_DELETEFROMFAMILY._serialized_start=2618
- _MUTATION_DELETEFROMFAMILY._serialized_end=2657
- _MUTATION_DELETEFROMROW._serialized_start=2659
- _MUTATION_DELETEFROMROW._serialized_end=2674
- _READMODIFYWRITERULE._serialized_start=2689
- _READMODIFYWRITERULE._serialized_end=2817
- _STREAMPARTITION._serialized_start=2819
- _STREAMPARTITION._serialized_end=2885
- _STREAMCONTINUATIONTOKENS._serialized_start=2887
- _STREAMCONTINUATIONTOKENS._serialized_end=2974
- _STREAMCONTINUATIONTOKEN._serialized_start=2976
- _STREAMCONTINUATIONTOKEN._serialized_end=3072
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.bigtable.v2.data_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ _globals['DESCRIPTOR']._loaded_options = None
+ _globals['DESCRIPTOR']._serialized_options = b'\n\026com.google.bigtable.v2B\tDataProtoP\001Z8cloud.google.com/go/bigtable/apiv2/bigtablepb;bigtablepb\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2'
+ _globals['_ROW']._serialized_start=175
+ _globals['_ROW']._serialized_end=239
+ _globals['_FAMILY']._serialized_start=241
+ _globals['_FAMILY']._serialized_end=308
+ _globals['_COLUMN']._serialized_start=310
+ _globals['_COLUMN']._serialized_end=378
+ _globals['_CELL']._serialized_start=380
+ _globals['_CELL']._serialized_end=443
+ _globals['_VALUE']._serialized_start=446
+ _globals['_VALUE']._serialized_end=818
+ _globals['_ARRAYVALUE']._serialized_start=820
+ _globals['_ARRAYVALUE']._serialized_end=875
+ _globals['_ROWRANGE']._serialized_start=878
+ _globals['_ROWRANGE']._serialized_end=1016
+ _globals['_ROWSET']._serialized_start=1018
+ _globals['_ROWSET']._serialized_end=1094
+ _globals['_COLUMNRANGE']._serialized_start=1097
+ _globals['_COLUMNRANGE']._serialized_end=1295
+ _globals['_TIMESTAMPRANGE']._serialized_start=1297
+ _globals['_TIMESTAMPRANGE']._serialized_end=1375
+ _globals['_VALUERANGE']._serialized_start=1378
+ _globals['_VALUERANGE']._serialized_end=1530
+ _globals['_ROWFILTER']._serialized_start=1533
+ _globals['_ROWFILTER']._serialized_end=2652
+ _globals['_ROWFILTER_CHAIN']._serialized_start=2349
+ _globals['_ROWFILTER_CHAIN']._serialized_end=2404
+ _globals['_ROWFILTER_INTERLEAVE']._serialized_start=2406
+ _globals['_ROWFILTER_INTERLEAVE']._serialized_end=2466
+ _globals['_ROWFILTER_CONDITION']._serialized_start=2469
+ _globals['_ROWFILTER_CONDITION']._serialized_end=2642
+ _globals['_MUTATION']._serialized_start=2655
+ _globals['_MUTATION']._serialized_end=3724
+ _globals['_MUTATION_SETCELL']._serialized_start=3080
+ _globals['_MUTATION_SETCELL']._serialized_end=3177
+ _globals['_MUTATION_ADDTOCELL']._serialized_start=3180
+ _globals['_MUTATION_ADDTOCELL']._serialized_end=3353
+ _globals['_MUTATION_MERGETOCELL']._serialized_start=3356
+ _globals['_MUTATION_MERGETOCELL']._serialized_end=3531
+ _globals['_MUTATION_DELETEFROMCOLUMN']._serialized_start=3533
+ _globals['_MUTATION_DELETEFROMCOLUMN']._serialized_end=3654
+ _globals['_MUTATION_DELETEFROMFAMILY']._serialized_start=3656
+ _globals['_MUTATION_DELETEFROMFAMILY']._serialized_end=3695
+ _globals['_MUTATION_DELETEFROMROW']._serialized_start=3697
+ _globals['_MUTATION_DELETEFROMROW']._serialized_end=3712
+ _globals['_READMODIFYWRITERULE']._serialized_start=3727
+ _globals['_READMODIFYWRITERULE']._serialized_end=3855
+ _globals['_STREAMPARTITION']._serialized_start=3857
+ _globals['_STREAMPARTITION']._serialized_end=3923
+ _globals['_STREAMCONTINUATIONTOKENS']._serialized_start=3925
+ _globals['_STREAMCONTINUATIONTOKENS']._serialized_end=4012
+ _globals['_STREAMCONTINUATIONTOKEN']._serialized_start=4014
+ _globals['_STREAMCONTINUATIONTOKEN']._serialized_end=4110
+ _globals['_PROTOFORMAT']._serialized_start=4112
+ _globals['_PROTOFORMAT']._serialized_end=4125
+ _globals['_COLUMNMETADATA']._serialized_start=4127
+ _globals['_COLUMNMETADATA']._serialized_end=4197
+ _globals['_PROTOSCHEMA']._serialized_start=4199
+ _globals['_PROTOSCHEMA']._serialized_end=4265
+ _globals['_RESULTSETMETADATA']._serialized_start=4267
+ _globals['_RESULTSETMETADATA']._serialized_end=4353
+ _globals['_PROTOROWS']._serialized_start=4355
+ _globals['_PROTOROWS']._serialized_end=4409
+ _globals['_PROTOROWSBATCH']._serialized_start=4411
+ _globals['_PROTOROWSBATCH']._serialized_end=4447
+ _globals['_PARTIALRESULTSET']._serialized_start=4450
+ _globals['_PARTIALRESULTSET']._serialized_end=4663
# @@protoc_insertion_point(module_scope)
diff --git a/test_proxy/protos/data_pb2_grpc.py b/test_proxy/protos/data_pb2_grpc.py
index 2daafffeb..f7a5195e8 100644
--- a/test_proxy/protos/data_pb2_grpc.py
+++ b/test_proxy/protos/data_pb2_grpc.py
@@ -1,4 +1,24 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
+import warnings
+
+GRPC_GENERATED_VERSION = '1.70.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in google/bigtable/v2/data_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
diff --git a/test_proxy/protos/test_proxy_pb2.py b/test_proxy/protos/test_proxy_pb2.py
index 8c7817b14..1f85b086b 100644
--- a/test_proxy/protos/test_proxy_pb2.py
+++ b/test_proxy/protos/test_proxy_pb2.py
@@ -1,11 +1,22 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
# source: test_proxy.proto
+# Protobuf Python Version: 5.29.0
"""Generated protocol buffer code."""
-from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 29,
+ 0,
+ '',
+ 'test_proxy.proto'
+)
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
@@ -18,54 +29,66 @@
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10test_proxy.proto\x12\x19google.bigtable.testproxy\x1a\x17google/api/client.proto\x1a!google/bigtable/v2/bigtable.proto\x1a\x1dgoogle/bigtable/v2/data.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x17google/rpc/status.proto\"\xb8\x01\n\x13\x43reateClientRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x61ta_target\x18\x02 \x01(\t\x12\x12\n\nproject_id\x18\x03 \x01(\t\x12\x13\n\x0binstance_id\x18\x04 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12\x38\n\x15per_operation_timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x16\n\x14\x43reateClientResponse\"\'\n\x12\x43loseClientRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"\x15\n\x13\x43loseClientResponse\"(\n\x13RemoveClientRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"\x16\n\x14RemoveClientResponse\"w\n\x0eReadRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x12\n\ntable_name\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\t\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\"U\n\tRowResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12$\n\x03row\x18\x02 \x01(\x0b\x32\x17.google.bigtable.v2.Row\"u\n\x0fReadRowsRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x34\n\x07request\x18\x02 \x01(\x0b\x32#.google.bigtable.v2.ReadRowsRequest\x12\x19\n\x11\x63\x61ncel_after_rows\x18\x03 \x01(\x05\"V\n\nRowsResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12$\n\x03row\x18\x02 \x03(\x0b\x32\x17.google.bigtable.v2.Row\"\\\n\x10MutateRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x35\n\x07request\x18\x02 \x01(\x0b\x32$.google.bigtable.v2.MutateRowRequest\"5\n\x0fMutateRowResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\"^\n\x11MutateRowsRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x36\n\x07request\x18\x02 \x01(\x0b\x32%.google.bigtable.v2.MutateRowsRequest\"s\n\x10MutateRowsResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12;\n\x05\x65ntry\x18\x02 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\"l\n\x18\x43heckAndMutateRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12=\n\x07request\x18\x02 \x01(\x0b\x32,.google.bigtable.v2.CheckAndMutateRowRequest\"|\n\x17\x43heckAndMutateRowResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12=\n\x06result\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.CheckAndMutateRowResponse\"d\n\x14SampleRowKeysRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x39\n\x07request\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.SampleRowKeysRequest\"t\n\x13SampleRowKeysResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x39\n\x06sample\x18\x02 \x03(\x0b\x32).google.bigtable.v2.SampleRowKeysResponse\"n\n\x19ReadModifyWriteRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12>\n\x07request\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.ReadModifyWriteRowRequest2\xa4\t\n\x18\x43loudBigtableV2TestProxy\x12q\n\x0c\x43reateClient\x12..google.bigtable.testproxy.CreateClientRequest\x1a/.google.bigtable.testproxy.CreateClientResponse\"\x00\x12n\n\x0b\x43loseClient\x12-.google.bigtable.testproxy.CloseClientRequest\x1a..google.bigtable.testproxy.CloseClientResponse\"\x00\x12q\n\x0cRemoveClient\x12..google.bigtable.testproxy.RemoveClientRequest\x1a/.google.bigtable.testproxy.RemoveClientResponse\"\x00\x12\\\n\x07ReadRow\x12).google.bigtable.testproxy.ReadRowRequest\x1a$.google.bigtable.testproxy.RowResult\"\x00\x12_\n\x08ReadRows\x12*.google.bigtable.testproxy.ReadRowsRequest\x1a%.google.bigtable.testproxy.RowsResult\"\x00\x12\x66\n\tMutateRow\x12+.google.bigtable.testproxy.MutateRowRequest\x1a*.google.bigtable.testproxy.MutateRowResult\"\x00\x12m\n\x0e\x42ulkMutateRows\x12,.google.bigtable.testproxy.MutateRowsRequest\x1a+.google.bigtable.testproxy.MutateRowsResult\"\x00\x12~\n\x11\x43heckAndMutateRow\x12\x33.google.bigtable.testproxy.CheckAndMutateRowRequest\x1a\x32.google.bigtable.testproxy.CheckAndMutateRowResult\"\x00\x12r\n\rSampleRowKeys\x12/.google.bigtable.testproxy.SampleRowKeysRequest\x1a..google.bigtable.testproxy.SampleRowKeysResult\"\x00\x12r\n\x12ReadModifyWriteRow\x12\x34.google.bigtable.testproxy.ReadModifyWriteRowRequest\x1a$.google.bigtable.testproxy.RowResult\"\x00\x1a\x34\xca\x41\x31\x62igtable-test-proxy-not-accessible.googleapis.comB6\n#com.google.cloud.bigtable.testproxyP\x01Z\r./testproxypbb\x06proto3')
-
-_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
-_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'test_proxy_pb2', globals())
-if _descriptor._USE_C_DESCRIPTORS == False:
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10test_proxy.proto\x12\x19google.bigtable.testproxy\x1a\x17google/api/client.proto\x1a!google/bigtable/v2/bigtable.proto\x1a\x1dgoogle/bigtable/v2/data.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x17google/rpc/status.proto\"\xda\x03\n\x13\x43reateClientRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x61ta_target\x18\x02 \x01(\t\x12\x12\n\nproject_id\x18\x03 \x01(\t\x12\x13\n\x0binstance_id\x18\x04 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12\x38\n\x15per_operation_timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12Q\n\x17optional_feature_config\x18\x07 \x01(\x0e\x32\x30.google.bigtable.testproxy.OptionalFeatureConfig\x12X\n\x10security_options\x18\x08 \x01(\x0b\x32>.google.bigtable.testproxy.CreateClientRequest.SecurityOptions\x1as\n\x0fSecurityOptions\x12\x14\n\x0c\x61\x63\x63\x65ss_token\x18\x01 \x01(\t\x12\x0f\n\x07use_ssl\x18\x02 \x01(\x08\x12\x1d\n\x15ssl_endpoint_override\x18\x03 \x01(\t\x12\x1a\n\x12ssl_root_certs_pem\x18\x04 \x01(\t\"\x16\n\x14\x43reateClientResponse\"\'\n\x12\x43loseClientRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"\x15\n\x13\x43loseClientResponse\"(\n\x13RemoveClientRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"\x16\n\x14RemoveClientResponse\"w\n\x0eReadRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x12\n\ntable_name\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\t\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\"U\n\tRowResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12$\n\x03row\x18\x02 \x01(\x0b\x32\x17.google.bigtable.v2.Row\"u\n\x0fReadRowsRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x34\n\x07request\x18\x02 \x01(\x0b\x32#.google.bigtable.v2.ReadRowsRequest\x12\x19\n\x11\x63\x61ncel_after_rows\x18\x03 \x01(\x05\"W\n\nRowsResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12%\n\x04rows\x18\x02 \x03(\x0b\x32\x17.google.bigtable.v2.Row\"\\\n\x10MutateRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x35\n\x07request\x18\x02 \x01(\x0b\x32$.google.bigtable.v2.MutateRowRequest\"5\n\x0fMutateRowResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\"^\n\x11MutateRowsRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x36\n\x07request\x18\x02 \x01(\x0b\x32%.google.bigtable.v2.MutateRowsRequest\"u\n\x10MutateRowsResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12=\n\x07\x65ntries\x18\x02 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\"l\n\x18\x43heckAndMutateRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12=\n\x07request\x18\x02 \x01(\x0b\x32,.google.bigtable.v2.CheckAndMutateRowRequest\"|\n\x17\x43heckAndMutateRowResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12=\n\x06result\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.CheckAndMutateRowResponse\"d\n\x14SampleRowKeysRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x39\n\x07request\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.SampleRowKeysRequest\"u\n\x13SampleRowKeysResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12:\n\x07samples\x18\x02 \x03(\x0b\x32).google.bigtable.v2.SampleRowKeysResponse\"n\n\x19ReadModifyWriteRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12>\n\x07request\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.ReadModifyWriteRowRequest\"b\n\x13\x45xecuteQueryRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x38\n\x07request\x18\x02 \x01(\x0b\x32\'.google.bigtable.v2.ExecuteQueryRequest\"\xa9\x01\n\x12\x45xecuteQueryResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12>\n\x08metadata\x18\x04 \x01(\x0b\x32,.google.bigtable.testproxy.ResultSetMetadata\x12/\n\x04rows\x18\x03 \x03(\x0b\x32!.google.bigtable.testproxy.SqlRow\"H\n\x11ResultSetMetadata\x12\x33\n\x07\x63olumns\x18\x01 \x03(\x0b\x32\".google.bigtable.v2.ColumnMetadata\"3\n\x06SqlRow\x12)\n\x06values\x18\x01 \x03(\x0b\x32\x19.google.bigtable.v2.Value*d\n\x15OptionalFeatureConfig\x12#\n\x1fOPTIONAL_FEATURE_CONFIG_DEFAULT\x10\x00\x12&\n\"OPTIONAL_FEATURE_CONFIG_ENABLE_ALL\x10\x01\x32\x95\n\n\x18\x43loudBigtableV2TestProxy\x12q\n\x0c\x43reateClient\x12..google.bigtable.testproxy.CreateClientRequest\x1a/.google.bigtable.testproxy.CreateClientResponse\"\x00\x12n\n\x0b\x43loseClient\x12-.google.bigtable.testproxy.CloseClientRequest\x1a..google.bigtable.testproxy.CloseClientResponse\"\x00\x12q\n\x0cRemoveClient\x12..google.bigtable.testproxy.RemoveClientRequest\x1a/.google.bigtable.testproxy.RemoveClientResponse\"\x00\x12\\\n\x07ReadRow\x12).google.bigtable.testproxy.ReadRowRequest\x1a$.google.bigtable.testproxy.RowResult\"\x00\x12_\n\x08ReadRows\x12*.google.bigtable.testproxy.ReadRowsRequest\x1a%.google.bigtable.testproxy.RowsResult\"\x00\x12\x66\n\tMutateRow\x12+.google.bigtable.testproxy.MutateRowRequest\x1a*.google.bigtable.testproxy.MutateRowResult\"\x00\x12m\n\x0e\x42ulkMutateRows\x12,.google.bigtable.testproxy.MutateRowsRequest\x1a+.google.bigtable.testproxy.MutateRowsResult\"\x00\x12~\n\x11\x43heckAndMutateRow\x12\x33.google.bigtable.testproxy.CheckAndMutateRowRequest\x1a\x32.google.bigtable.testproxy.CheckAndMutateRowResult\"\x00\x12r\n\rSampleRowKeys\x12/.google.bigtable.testproxy.SampleRowKeysRequest\x1a..google.bigtable.testproxy.SampleRowKeysResult\"\x00\x12r\n\x12ReadModifyWriteRow\x12\x34.google.bigtable.testproxy.ReadModifyWriteRowRequest\x1a$.google.bigtable.testproxy.RowResult\"\x00\x12o\n\x0c\x45xecuteQuery\x12..google.bigtable.testproxy.ExecuteQueryRequest\x1a-.google.bigtable.testproxy.ExecuteQueryResult\"\x00\x1a\x34\xca\x41\x31\x62igtable-test-proxy-not-accessible.googleapis.comBg\n#com.google.cloud.bigtable.testproxyP\x01Z>cloud.google.com/go/bigtable/testproxy/testproxypb;testproxypbb\x06proto3')
- DESCRIPTOR._options = None
- DESCRIPTOR._serialized_options = b'\n#com.google.cloud.bigtable.testproxyP\001Z\r./testproxypb'
- _CLOUDBIGTABLEV2TESTPROXY._options = None
- _CLOUDBIGTABLEV2TESTPROXY._serialized_options = b'\312A1bigtable-test-proxy-not-accessible.googleapis.com'
- _CREATECLIENTREQUEST._serialized_start=196
- _CREATECLIENTREQUEST._serialized_end=380
- _CREATECLIENTRESPONSE._serialized_start=382
- _CREATECLIENTRESPONSE._serialized_end=404
- _CLOSECLIENTREQUEST._serialized_start=406
- _CLOSECLIENTREQUEST._serialized_end=445
- _CLOSECLIENTRESPONSE._serialized_start=447
- _CLOSECLIENTRESPONSE._serialized_end=468
- _REMOVECLIENTREQUEST._serialized_start=470
- _REMOVECLIENTREQUEST._serialized_end=510
- _REMOVECLIENTRESPONSE._serialized_start=512
- _REMOVECLIENTRESPONSE._serialized_end=534
- _READROWREQUEST._serialized_start=536
- _READROWREQUEST._serialized_end=655
- _ROWRESULT._serialized_start=657
- _ROWRESULT._serialized_end=742
- _READROWSREQUEST._serialized_start=744
- _READROWSREQUEST._serialized_end=861
- _ROWSRESULT._serialized_start=863
- _ROWSRESULT._serialized_end=949
- _MUTATEROWREQUEST._serialized_start=951
- _MUTATEROWREQUEST._serialized_end=1043
- _MUTATEROWRESULT._serialized_start=1045
- _MUTATEROWRESULT._serialized_end=1098
- _MUTATEROWSREQUEST._serialized_start=1100
- _MUTATEROWSREQUEST._serialized_end=1194
- _MUTATEROWSRESULT._serialized_start=1196
- _MUTATEROWSRESULT._serialized_end=1311
- _CHECKANDMUTATEROWREQUEST._serialized_start=1313
- _CHECKANDMUTATEROWREQUEST._serialized_end=1421
- _CHECKANDMUTATEROWRESULT._serialized_start=1423
- _CHECKANDMUTATEROWRESULT._serialized_end=1547
- _SAMPLEROWKEYSREQUEST._serialized_start=1549
- _SAMPLEROWKEYSREQUEST._serialized_end=1649
- _SAMPLEROWKEYSRESULT._serialized_start=1651
- _SAMPLEROWKEYSRESULT._serialized_end=1767
- _READMODIFYWRITEROWREQUEST._serialized_start=1769
- _READMODIFYWRITEROWREQUEST._serialized_end=1879
- _CLOUDBIGTABLEV2TESTPROXY._serialized_start=1882
- _CLOUDBIGTABLEV2TESTPROXY._serialized_end=3070
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'test_proxy_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ _globals['DESCRIPTOR']._loaded_options = None
+ _globals['DESCRIPTOR']._serialized_options = b'\n#com.google.cloud.bigtable.testproxyP\001Z>cloud.google.com/go/bigtable/testproxy/testproxypb;testproxypb'
+ _globals['_CLOUDBIGTABLEV2TESTPROXY']._loaded_options = None
+ _globals['_CLOUDBIGTABLEV2TESTPROXY']._serialized_options = b'\312A1bigtable-test-proxy-not-accessible.googleapis.com'
+ _globals['_OPTIONALFEATURECONFIG']._serialized_start=2574
+ _globals['_OPTIONALFEATURECONFIG']._serialized_end=2674
+ _globals['_CREATECLIENTREQUEST']._serialized_start=196
+ _globals['_CREATECLIENTREQUEST']._serialized_end=670
+ _globals['_CREATECLIENTREQUEST_SECURITYOPTIONS']._serialized_start=555
+ _globals['_CREATECLIENTREQUEST_SECURITYOPTIONS']._serialized_end=670
+ _globals['_CREATECLIENTRESPONSE']._serialized_start=672
+ _globals['_CREATECLIENTRESPONSE']._serialized_end=694
+ _globals['_CLOSECLIENTREQUEST']._serialized_start=696
+ _globals['_CLOSECLIENTREQUEST']._serialized_end=735
+ _globals['_CLOSECLIENTRESPONSE']._serialized_start=737
+ _globals['_CLOSECLIENTRESPONSE']._serialized_end=758
+ _globals['_REMOVECLIENTREQUEST']._serialized_start=760
+ _globals['_REMOVECLIENTREQUEST']._serialized_end=800
+ _globals['_REMOVECLIENTRESPONSE']._serialized_start=802
+ _globals['_REMOVECLIENTRESPONSE']._serialized_end=824
+ _globals['_READROWREQUEST']._serialized_start=826
+ _globals['_READROWREQUEST']._serialized_end=945
+ _globals['_ROWRESULT']._serialized_start=947
+ _globals['_ROWRESULT']._serialized_end=1032
+ _globals['_READROWSREQUEST']._serialized_start=1034
+ _globals['_READROWSREQUEST']._serialized_end=1151
+ _globals['_ROWSRESULT']._serialized_start=1153
+ _globals['_ROWSRESULT']._serialized_end=1240
+ _globals['_MUTATEROWREQUEST']._serialized_start=1242
+ _globals['_MUTATEROWREQUEST']._serialized_end=1334
+ _globals['_MUTATEROWRESULT']._serialized_start=1336
+ _globals['_MUTATEROWRESULT']._serialized_end=1389
+ _globals['_MUTATEROWSREQUEST']._serialized_start=1391
+ _globals['_MUTATEROWSREQUEST']._serialized_end=1485
+ _globals['_MUTATEROWSRESULT']._serialized_start=1487
+ _globals['_MUTATEROWSRESULT']._serialized_end=1604
+ _globals['_CHECKANDMUTATEROWREQUEST']._serialized_start=1606
+ _globals['_CHECKANDMUTATEROWREQUEST']._serialized_end=1714
+ _globals['_CHECKANDMUTATEROWRESULT']._serialized_start=1716
+ _globals['_CHECKANDMUTATEROWRESULT']._serialized_end=1840
+ _globals['_SAMPLEROWKEYSREQUEST']._serialized_start=1842
+ _globals['_SAMPLEROWKEYSREQUEST']._serialized_end=1942
+ _globals['_SAMPLEROWKEYSRESULT']._serialized_start=1944
+ _globals['_SAMPLEROWKEYSRESULT']._serialized_end=2061
+ _globals['_READMODIFYWRITEROWREQUEST']._serialized_start=2063
+ _globals['_READMODIFYWRITEROWREQUEST']._serialized_end=2173
+ _globals['_EXECUTEQUERYREQUEST']._serialized_start=2175
+ _globals['_EXECUTEQUERYREQUEST']._serialized_end=2273
+ _globals['_EXECUTEQUERYRESULT']._serialized_start=2276
+ _globals['_EXECUTEQUERYRESULT']._serialized_end=2445
+ _globals['_RESULTSETMETADATA']._serialized_start=2447
+ _globals['_RESULTSETMETADATA']._serialized_end=2519
+ _globals['_SQLROW']._serialized_start=2521
+ _globals['_SQLROW']._serialized_end=2572
+ _globals['_CLOUDBIGTABLEV2TESTPROXY']._serialized_start=2677
+ _globals['_CLOUDBIGTABLEV2TESTPROXY']._serialized_end=3978
# @@protoc_insertion_point(module_scope)
diff --git a/test_proxy/protos/test_proxy_pb2_grpc.py b/test_proxy/protos/test_proxy_pb2_grpc.py
index 60214a584..b9d11034e 100644
--- a/test_proxy/protos/test_proxy_pb2_grpc.py
+++ b/test_proxy/protos/test_proxy_pb2_grpc.py
@@ -1,9 +1,29 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
+import warnings
import test_proxy_pb2 as test__proxy__pb2
+GRPC_GENERATED_VERSION = '1.70.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in test_proxy_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
+
class CloudBigtableV2TestProxyStub(object):
"""Note that all RPCs are unary, even when the equivalent client binding call
@@ -34,52 +54,57 @@ def __init__(self, channel):
'/google.bigtable.testproxy.CloudBigtableV2TestProxy/CreateClient',
request_serializer=test__proxy__pb2.CreateClientRequest.SerializeToString,
response_deserializer=test__proxy__pb2.CreateClientResponse.FromString,
- )
+ _registered_method=True)
self.CloseClient = channel.unary_unary(
'/google.bigtable.testproxy.CloudBigtableV2TestProxy/CloseClient',
request_serializer=test__proxy__pb2.CloseClientRequest.SerializeToString,
response_deserializer=test__proxy__pb2.CloseClientResponse.FromString,
- )
+ _registered_method=True)
self.RemoveClient = channel.unary_unary(
'/google.bigtable.testproxy.CloudBigtableV2TestProxy/RemoveClient',
request_serializer=test__proxy__pb2.RemoveClientRequest.SerializeToString,
response_deserializer=test__proxy__pb2.RemoveClientResponse.FromString,
- )
+ _registered_method=True)
self.ReadRow = channel.unary_unary(
'/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRow',
request_serializer=test__proxy__pb2.ReadRowRequest.SerializeToString,
response_deserializer=test__proxy__pb2.RowResult.FromString,
- )
+ _registered_method=True)
self.ReadRows = channel.unary_unary(
'/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRows',
request_serializer=test__proxy__pb2.ReadRowsRequest.SerializeToString,
response_deserializer=test__proxy__pb2.RowsResult.FromString,
- )
+ _registered_method=True)
self.MutateRow = channel.unary_unary(
'/google.bigtable.testproxy.CloudBigtableV2TestProxy/MutateRow',
request_serializer=test__proxy__pb2.MutateRowRequest.SerializeToString,
response_deserializer=test__proxy__pb2.MutateRowResult.FromString,
- )
+ _registered_method=True)
self.BulkMutateRows = channel.unary_unary(
'/google.bigtable.testproxy.CloudBigtableV2TestProxy/BulkMutateRows',
request_serializer=test__proxy__pb2.MutateRowsRequest.SerializeToString,
response_deserializer=test__proxy__pb2.MutateRowsResult.FromString,
- )
+ _registered_method=True)
self.CheckAndMutateRow = channel.unary_unary(
'/google.bigtable.testproxy.CloudBigtableV2TestProxy/CheckAndMutateRow',
request_serializer=test__proxy__pb2.CheckAndMutateRowRequest.SerializeToString,
response_deserializer=test__proxy__pb2.CheckAndMutateRowResult.FromString,
- )
+ _registered_method=True)
self.SampleRowKeys = channel.unary_unary(
'/google.bigtable.testproxy.CloudBigtableV2TestProxy/SampleRowKeys',
request_serializer=test__proxy__pb2.SampleRowKeysRequest.SerializeToString,
response_deserializer=test__proxy__pb2.SampleRowKeysResult.FromString,
- )
+ _registered_method=True)
self.ReadModifyWriteRow = channel.unary_unary(
'/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadModifyWriteRow',
request_serializer=test__proxy__pb2.ReadModifyWriteRowRequest.SerializeToString,
response_deserializer=test__proxy__pb2.RowResult.FromString,
- )
+ _registered_method=True)
+ self.ExecuteQuery = channel.unary_unary(
+ '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ExecuteQuery',
+ request_serializer=test__proxy__pb2.ExecuteQueryRequest.SerializeToString,
+ response_deserializer=test__proxy__pb2.ExecuteQueryResult.FromString,
+ _registered_method=True)
class CloudBigtableV2TestProxyServicer(object):
@@ -183,6 +208,13 @@ def ReadModifyWriteRow(self, request, context):
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
+ def ExecuteQuery(self, request, context):
+ """Executes a BTQL query with the client.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
def add_CloudBigtableV2TestProxyServicer_to_server(servicer, server):
rpc_method_handlers = {
@@ -236,10 +268,16 @@ def add_CloudBigtableV2TestProxyServicer_to_server(servicer, server):
request_deserializer=test__proxy__pb2.ReadModifyWriteRowRequest.FromString,
response_serializer=test__proxy__pb2.RowResult.SerializeToString,
),
+ 'ExecuteQuery': grpc.unary_unary_rpc_method_handler(
+ servicer.ExecuteQuery,
+ request_deserializer=test__proxy__pb2.ExecuteQueryRequest.FromString,
+ response_serializer=test__proxy__pb2.ExecuteQueryResult.SerializeToString,
+ ),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.bigtable.testproxy.CloudBigtableV2TestProxy', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
+ server.add_registered_method_handlers('google.bigtable.testproxy.CloudBigtableV2TestProxy', rpc_method_handlers)
# This class is part of an EXPERIMENTAL API.
@@ -273,11 +311,21 @@ def CreateClient(request,
wait_for_ready=None,
timeout=None,
metadata=None):
- return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CreateClient',
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CreateClient',
test__proxy__pb2.CreateClientRequest.SerializeToString,
test__proxy__pb2.CreateClientResponse.FromString,
- options, channel_credentials,
- insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
@staticmethod
def CloseClient(request,
@@ -290,11 +338,21 @@ def CloseClient(request,
wait_for_ready=None,
timeout=None,
metadata=None):
- return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CloseClient',
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CloseClient',
test__proxy__pb2.CloseClientRequest.SerializeToString,
test__proxy__pb2.CloseClientResponse.FromString,
- options, channel_credentials,
- insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
@staticmethod
def RemoveClient(request,
@@ -307,11 +365,21 @@ def RemoveClient(request,
wait_for_ready=None,
timeout=None,
metadata=None):
- return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/RemoveClient',
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/google.bigtable.testproxy.CloudBigtableV2TestProxy/RemoveClient',
test__proxy__pb2.RemoveClientRequest.SerializeToString,
test__proxy__pb2.RemoveClientResponse.FromString,
- options, channel_credentials,
- insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
@staticmethod
def ReadRow(request,
@@ -324,11 +392,21 @@ def ReadRow(request,
wait_for_ready=None,
timeout=None,
metadata=None):
- return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRow',
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRow',
test__proxy__pb2.ReadRowRequest.SerializeToString,
test__proxy__pb2.RowResult.FromString,
- options, channel_credentials,
- insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
@staticmethod
def ReadRows(request,
@@ -341,11 +419,21 @@ def ReadRows(request,
wait_for_ready=None,
timeout=None,
metadata=None):
- return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRows',
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRows',
test__proxy__pb2.ReadRowsRequest.SerializeToString,
test__proxy__pb2.RowsResult.FromString,
- options, channel_credentials,
- insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
@staticmethod
def MutateRow(request,
@@ -358,11 +446,21 @@ def MutateRow(request,
wait_for_ready=None,
timeout=None,
metadata=None):
- return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/MutateRow',
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/google.bigtable.testproxy.CloudBigtableV2TestProxy/MutateRow',
test__proxy__pb2.MutateRowRequest.SerializeToString,
test__proxy__pb2.MutateRowResult.FromString,
- options, channel_credentials,
- insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
@staticmethod
def BulkMutateRows(request,
@@ -375,11 +473,21 @@ def BulkMutateRows(request,
wait_for_ready=None,
timeout=None,
metadata=None):
- return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/BulkMutateRows',
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/google.bigtable.testproxy.CloudBigtableV2TestProxy/BulkMutateRows',
test__proxy__pb2.MutateRowsRequest.SerializeToString,
test__proxy__pb2.MutateRowsResult.FromString,
- options, channel_credentials,
- insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
@staticmethod
def CheckAndMutateRow(request,
@@ -392,11 +500,21 @@ def CheckAndMutateRow(request,
wait_for_ready=None,
timeout=None,
metadata=None):
- return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CheckAndMutateRow',
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CheckAndMutateRow',
test__proxy__pb2.CheckAndMutateRowRequest.SerializeToString,
test__proxy__pb2.CheckAndMutateRowResult.FromString,
- options, channel_credentials,
- insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
@staticmethod
def SampleRowKeys(request,
@@ -409,11 +527,21 @@ def SampleRowKeys(request,
wait_for_ready=None,
timeout=None,
metadata=None):
- return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/SampleRowKeys',
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/google.bigtable.testproxy.CloudBigtableV2TestProxy/SampleRowKeys',
test__proxy__pb2.SampleRowKeysRequest.SerializeToString,
test__proxy__pb2.SampleRowKeysResult.FromString,
- options, channel_credentials,
- insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
@staticmethod
def ReadModifyWriteRow(request,
@@ -426,8 +554,45 @@ def ReadModifyWriteRow(request,
wait_for_ready=None,
timeout=None,
metadata=None):
- return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadModifyWriteRow',
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadModifyWriteRow',
test__proxy__pb2.ReadModifyWriteRowRequest.SerializeToString,
test__proxy__pb2.RowResult.FromString,
- options, channel_credentials,
- insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
+
+ @staticmethod
+ def ExecuteQuery(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ insecure=False,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(
+ request,
+ target,
+ '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ExecuteQuery',
+ test__proxy__pb2.ExecuteQueryRequest.SerializeToString,
+ test__proxy__pb2.ExecuteQueryResult.FromString,
+ options,
+ channel_credentials,
+ insecure,
+ call_credentials,
+ compression,
+ wait_for_ready,
+ timeout,
+ metadata,
+ _registered_method=True)
diff --git a/test_proxy/protos/types_pb2.py b/test_proxy/protos/types_pb2.py
new file mode 100644
index 000000000..7acdbf7f1
--- /dev/null
+++ b/test_proxy/protos/types_pb2.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: google/bigtable/v2/types.proto
+# Protobuf Python Version: 5.29.0
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC,
+ 5,
+ 29,
+ 0,
+ '',
+ 'google/bigtable/v2/types.proto'
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1egoogle/bigtable/v2/types.proto\x12\x12google.bigtable.v2\x1a\x1fgoogle/api/field_behavior.proto\"\xe0\x10\n\x04Type\x12\x34\n\nbytes_type\x18\x01 \x01(\x0b\x32\x1e.google.bigtable.v2.Type.BytesH\x00\x12\x36\n\x0bstring_type\x18\x02 \x01(\x0b\x32\x1f.google.bigtable.v2.Type.StringH\x00\x12\x34\n\nint64_type\x18\x05 \x01(\x0b\x32\x1e.google.bigtable.v2.Type.Int64H\x00\x12\x38\n\x0c\x66loat32_type\x18\x0c \x01(\x0b\x32 .google.bigtable.v2.Type.Float32H\x00\x12\x38\n\x0c\x66loat64_type\x18\t \x01(\x0b\x32 .google.bigtable.v2.Type.Float64H\x00\x12\x32\n\tbool_type\x18\x08 \x01(\x0b\x32\x1d.google.bigtable.v2.Type.BoolH\x00\x12<\n\x0etimestamp_type\x18\n \x01(\x0b\x32\".google.bigtable.v2.Type.TimestampH\x00\x12\x32\n\tdate_type\x18\x0b \x01(\x0b\x32\x1d.google.bigtable.v2.Type.DateH\x00\x12<\n\x0e\x61ggregate_type\x18\x06 \x01(\x0b\x32\".google.bigtable.v2.Type.AggregateH\x00\x12\x36\n\x0bstruct_type\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.Type.StructH\x00\x12\x34\n\narray_type\x18\x03 \x01(\x0b\x32\x1e.google.bigtable.v2.Type.ArrayH\x00\x12\x30\n\x08map_type\x18\x04 \x01(\x0b\x32\x1c.google.bigtable.v2.Type.MapH\x00\x1a\x9d\x01\n\x05\x42ytes\x12\x39\n\x08\x65ncoding\x18\x01 \x01(\x0b\x32\'.google.bigtable.v2.Type.Bytes.Encoding\x1aY\n\x08\x45ncoding\x12:\n\x03raw\x18\x01 \x01(\x0b\x32+.google.bigtable.v2.Type.Bytes.Encoding.RawH\x00\x1a\x05\n\x03RawB\n\n\x08\x65ncoding\x1a\x8d\x02\n\x06String\x12:\n\x08\x65ncoding\x18\x01 \x01(\x0b\x32(.google.bigtable.v2.Type.String.Encoding\x1a\xc6\x01\n\x08\x45ncoding\x12H\n\x08utf8_raw\x18\x01 \x01(\x0b\x32\x30.google.bigtable.v2.Type.String.Encoding.Utf8RawB\x02\x18\x01H\x00\x12H\n\nutf8_bytes\x18\x02 \x01(\x0b\x32\x32.google.bigtable.v2.Type.String.Encoding.Utf8BytesH\x00\x1a\r\n\x07Utf8Raw:\x02\x18\x01\x1a\x0b\n\tUtf8BytesB\n\n\x08\x65ncoding\x1a\xf5\x01\n\x05Int64\x12\x39\n\x08\x65ncoding\x18\x01 \x01(\x0b\x32\'.google.bigtable.v2.Type.Int64.Encoding\x1a\xb0\x01\n\x08\x45ncoding\x12R\n\x10\x62ig_endian_bytes\x18\x01 \x01(\x0b\x32\x36.google.bigtable.v2.Type.Int64.Encoding.BigEndianBytesH\x00\x1a\x44\n\x0e\x42igEndianBytes\x12\x32\n\nbytes_type\x18\x01 \x01(\x0b\x32\x1e.google.bigtable.v2.Type.BytesB\n\n\x08\x65ncoding\x1a\x06\n\x04\x42ool\x1a\t\n\x07\x46loat32\x1a\t\n\x07\x46loat64\x1a\x0b\n\tTimestamp\x1a\x06\n\x04\x44\x61te\x1a\x84\x01\n\x06Struct\x12\x35\n\x06\x66ields\x18\x01 \x03(\x0b\x32%.google.bigtable.v2.Type.Struct.Field\x1a\x43\n\x05\x46ield\x12\x12\n\nfield_name\x18\x01 \x01(\t\x12&\n\x04type\x18\x02 \x01(\x0b\x32\x18.google.bigtable.v2.Type\x1a\x37\n\x05\x41rray\x12.\n\x0c\x65lement_type\x18\x01 \x01(\x0b\x32\x18.google.bigtable.v2.Type\x1a_\n\x03Map\x12*\n\x08key_type\x18\x01 \x01(\x0b\x32\x18.google.bigtable.v2.Type\x12,\n\nvalue_type\x18\x02 \x01(\x0b\x32\x18.google.bigtable.v2.Type\x1a\xb7\x03\n\tAggregate\x12,\n\ninput_type\x18\x01 \x01(\x0b\x32\x18.google.bigtable.v2.Type\x12\x31\n\nstate_type\x18\x02 \x01(\x0b\x32\x18.google.bigtable.v2.TypeB\x03\xe0\x41\x03\x12\x35\n\x03sum\x18\x04 \x01(\x0b\x32&.google.bigtable.v2.Type.Aggregate.SumH\x00\x12_\n\x12hllpp_unique_count\x18\x05 \x01(\x0b\x32\x41.google.bigtable.v2.Type.Aggregate.HyperLogLogPlusPlusUniqueCountH\x00\x12\x35\n\x03max\x18\x06 \x01(\x0b\x32&.google.bigtable.v2.Type.Aggregate.MaxH\x00\x12\x35\n\x03min\x18\x07 \x01(\x0b\x32&.google.bigtable.v2.Type.Aggregate.MinH\x00\x1a\x05\n\x03Sum\x1a\x05\n\x03Max\x1a\x05\n\x03Min\x1a \n\x1eHyperLogLogPlusPlusUniqueCountB\x0c\n\naggregatorB\x06\n\x04kindB\xb4\x01\n\x16\x63om.google.bigtable.v2B\nTypesProtoP\x01Z8cloud.google.com/go/bigtable/apiv2/bigtablepb;bigtablepb\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.bigtable.v2.types_pb2', _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ _globals['DESCRIPTOR']._loaded_options = None
+ _globals['DESCRIPTOR']._serialized_options = b'\n\026com.google.bigtable.v2B\nTypesProtoP\001Z8cloud.google.com/go/bigtable/apiv2/bigtablepb;bigtablepb\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2'
+ _globals['_TYPE_STRING_ENCODING_UTF8RAW']._loaded_options = None
+ _globals['_TYPE_STRING_ENCODING_UTF8RAW']._serialized_options = b'\030\001'
+ _globals['_TYPE_STRING_ENCODING'].fields_by_name['utf8_raw']._loaded_options = None
+ _globals['_TYPE_STRING_ENCODING'].fields_by_name['utf8_raw']._serialized_options = b'\030\001'
+ _globals['_TYPE_AGGREGATE'].fields_by_name['state_type']._loaded_options = None
+ _globals['_TYPE_AGGREGATE'].fields_by_name['state_type']._serialized_options = b'\340A\003'
+ _globals['_TYPE']._serialized_start=88
+ _globals['_TYPE']._serialized_end=2232
+ _globals['_TYPE_BYTES']._serialized_start=765
+ _globals['_TYPE_BYTES']._serialized_end=922
+ _globals['_TYPE_BYTES_ENCODING']._serialized_start=833
+ _globals['_TYPE_BYTES_ENCODING']._serialized_end=922
+ _globals['_TYPE_BYTES_ENCODING_RAW']._serialized_start=905
+ _globals['_TYPE_BYTES_ENCODING_RAW']._serialized_end=910
+ _globals['_TYPE_STRING']._serialized_start=925
+ _globals['_TYPE_STRING']._serialized_end=1194
+ _globals['_TYPE_STRING_ENCODING']._serialized_start=996
+ _globals['_TYPE_STRING_ENCODING']._serialized_end=1194
+ _globals['_TYPE_STRING_ENCODING_UTF8RAW']._serialized_start=1156
+ _globals['_TYPE_STRING_ENCODING_UTF8RAW']._serialized_end=1169
+ _globals['_TYPE_STRING_ENCODING_UTF8BYTES']._serialized_start=1171
+ _globals['_TYPE_STRING_ENCODING_UTF8BYTES']._serialized_end=1182
+ _globals['_TYPE_INT64']._serialized_start=1197
+ _globals['_TYPE_INT64']._serialized_end=1442
+ _globals['_TYPE_INT64_ENCODING']._serialized_start=1266
+ _globals['_TYPE_INT64_ENCODING']._serialized_end=1442
+ _globals['_TYPE_INT64_ENCODING_BIGENDIANBYTES']._serialized_start=1362
+ _globals['_TYPE_INT64_ENCODING_BIGENDIANBYTES']._serialized_end=1430
+ _globals['_TYPE_BOOL']._serialized_start=1444
+ _globals['_TYPE_BOOL']._serialized_end=1450
+ _globals['_TYPE_FLOAT32']._serialized_start=1452
+ _globals['_TYPE_FLOAT32']._serialized_end=1461
+ _globals['_TYPE_FLOAT64']._serialized_start=1463
+ _globals['_TYPE_FLOAT64']._serialized_end=1472
+ _globals['_TYPE_TIMESTAMP']._serialized_start=1474
+ _globals['_TYPE_TIMESTAMP']._serialized_end=1485
+ _globals['_TYPE_DATE']._serialized_start=1487
+ _globals['_TYPE_DATE']._serialized_end=1493
+ _globals['_TYPE_STRUCT']._serialized_start=1496
+ _globals['_TYPE_STRUCT']._serialized_end=1628
+ _globals['_TYPE_STRUCT_FIELD']._serialized_start=1561
+ _globals['_TYPE_STRUCT_FIELD']._serialized_end=1628
+ _globals['_TYPE_ARRAY']._serialized_start=1630
+ _globals['_TYPE_ARRAY']._serialized_end=1685
+ _globals['_TYPE_MAP']._serialized_start=1687
+ _globals['_TYPE_MAP']._serialized_end=1782
+ _globals['_TYPE_AGGREGATE']._serialized_start=1785
+ _globals['_TYPE_AGGREGATE']._serialized_end=2224
+ _globals['_TYPE_AGGREGATE_SUM']._serialized_start=2157
+ _globals['_TYPE_AGGREGATE_SUM']._serialized_end=2162
+ _globals['_TYPE_AGGREGATE_MAX']._serialized_start=2164
+ _globals['_TYPE_AGGREGATE_MAX']._serialized_end=2169
+ _globals['_TYPE_AGGREGATE_MIN']._serialized_start=2171
+ _globals['_TYPE_AGGREGATE_MIN']._serialized_end=2176
+ _globals['_TYPE_AGGREGATE_HYPERLOGLOGPLUSPLUSUNIQUECOUNT']._serialized_start=2178
+ _globals['_TYPE_AGGREGATE_HYPERLOGLOGPLUSPLUSUNIQUECOUNT']._serialized_end=2210
+# @@protoc_insertion_point(module_scope)
diff --git a/test_proxy/protos/types_pb2_grpc.py b/test_proxy/protos/types_pb2_grpc.py
new file mode 100644
index 000000000..29956dd38
--- /dev/null
+++ b/test_proxy/protos/types_pb2_grpc.py
@@ -0,0 +1,24 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+import warnings
+
+
+GRPC_GENERATED_VERSION = '1.70.0'
+GRPC_VERSION = grpc.__version__
+_version_not_supported = False
+
+try:
+ from grpc._utilities import first_version_is_lower
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
+except ImportError:
+ _version_not_supported = True
+
+if _version_not_supported:
+ raise RuntimeError(
+ f'The grpc package installed is at version {GRPC_VERSION},'
+ + f' but the generated code in google/bigtable/v2/types_pb2_grpc.py depends on'
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ )
diff --git a/testing/constraints-3.14.txt b/testing/constraints-3.14.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/testing/constraints-3.7.txt b/testing/constraints-3.7.txt
index 5a3f3e3fc..023133380 100644
--- a/testing/constraints-3.7.txt
+++ b/testing/constraints-3.7.txt
@@ -5,8 +5,8 @@
#
# e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev",
# Then this file should have foo==1.14.0
-google-api-core==2.16.0
-google-auth==2.14.1
+google-api-core==2.17.0
+google-auth==2.23.0
google-cloud-core==2.0.0
grpc-google-iam-v1==0.12.4
proto-plus==1.22.3
diff --git a/testing/constraints-3.8.txt b/testing/constraints-3.8.txt
index 5ed0c2fb9..a7e4616c9 100644
--- a/testing/constraints-3.8.txt
+++ b/testing/constraints-3.8.txt
@@ -5,8 +5,8 @@
#
# e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev",
# Then this file should have foo==1.14.0
-google-api-core==2.16.0
-google-auth==2.14.1
+google-api-core==2.17.0
+google-auth==2.23.0
google-cloud-core==2.0.0
grpc-google-iam-v1==0.12.4
proto-plus==1.22.3
diff --git a/tests/system/admin_overlay/__init__.py b/tests/system/admin_overlay/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/system/admin_overlay/conftest.py b/tests/system/admin_overlay/conftest.py
new file mode 100644
index 000000000..66baef3f4
--- /dev/null
+++ b/tests/system/admin_overlay/conftest.py
@@ -0,0 +1,38 @@
+import google.auth
+
+import os
+import pytest
+import uuid
+
+
+INSTANCE_PREFIX = "admin-overlay-instance"
+BACKUP_PREFIX = "admin-overlay-backup"
+ROW_PREFIX = "test-row"
+
+DEFAULT_CLUSTER_LOCATIONS = ["us-east1-b"]
+REPLICATION_CLUSTER_LOCATIONS = ["us-east1-b", "us-west1-b"]
+TEST_TABLE_NAME = "system-test-table"
+TEST_BACKUP_TABLE_NAME = "system-test-backup-table"
+TEST_COLUMMN_FAMILY_NAME = "test-column"
+TEST_COLUMN_NAME = "value"
+NUM_ROWS = 500
+INITIAL_CELL_VALUE = "Hello"
+NEW_CELL_VALUE = "World"
+
+
+@pytest.fixture(scope="session")
+def admin_overlay_project_id():
+ project_id = os.getenv("GOOGLE_CLOUD_PROJECT")
+ if not project_id:
+ _, project_id = google.auth.default()
+ return project_id
+
+
+def generate_unique_suffix(name):
+ """
+ Generates a unique suffix for the name.
+
+ Uses UUID4 because using time.time doesn't guarantee
+ uniqueness when the time is frozen in containers.
+ """
+ return f"{name}-{uuid.uuid4().hex[:7]}"
diff --git a/tests/system/admin_overlay/test_system_async.py b/tests/system/admin_overlay/test_system_async.py
new file mode 100644
index 000000000..aa412569e
--- /dev/null
+++ b/tests/system/admin_overlay/test_system_async.py
@@ -0,0 +1,395 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Tuple
+
+from google.cloud import bigtable_admin_v2 as admin_v2
+from google.cloud.bigtable.data._cross_sync import CrossSync
+from google.cloud.bigtable.data import mutations, read_rows_query
+from google.cloud.environment_vars import BIGTABLE_EMULATOR
+
+from .conftest import (
+ INSTANCE_PREFIX,
+ BACKUP_PREFIX,
+ ROW_PREFIX,
+ DEFAULT_CLUSTER_LOCATIONS,
+ REPLICATION_CLUSTER_LOCATIONS,
+ TEST_TABLE_NAME,
+ TEST_BACKUP_TABLE_NAME,
+ TEST_COLUMMN_FAMILY_NAME,
+ TEST_COLUMN_NAME,
+ NUM_ROWS,
+ INITIAL_CELL_VALUE,
+ NEW_CELL_VALUE,
+ generate_unique_suffix,
+)
+
+from datetime import datetime, timedelta
+
+import pytest
+import os
+
+
+if CrossSync.is_async:
+ from google.api_core import operation_async as api_core_operation
+else:
+ from google.api_core import operation as api_core_operation
+
+
+__CROSS_SYNC_OUTPUT__ = "tests.system.admin_overlay.test_system_autogen"
+
+if os.getenv(BIGTABLE_EMULATOR):
+ pytest.skip(
+ allow_module_level=True,
+ reason="Emulator support for admin client tests unsupported.",
+ )
+
+
+@CrossSync.convert
+@CrossSync.pytest_fixture(scope="session")
+async def data_client(admin_overlay_project_id):
+ async with CrossSync.DataClient(project=admin_overlay_project_id) as client:
+ yield client
+
+
+@CrossSync.convert(
+ replace_symbols={"BigtableTableAdminAsyncClient": "BigtableTableAdminClient"}
+)
+@CrossSync.pytest_fixture(scope="session")
+async def table_admin_client(admin_overlay_project_id):
+ async with admin_v2.BigtableTableAdminAsyncClient(
+ client_options={
+ "quota_project_id": admin_overlay_project_id,
+ }
+ ) as client:
+ yield client
+
+
+@CrossSync.convert(
+ replace_symbols={"BigtableInstanceAdminAsyncClient": "BigtableInstanceAdminClient"}
+)
+@CrossSync.pytest_fixture(scope="session")
+async def instance_admin_client(admin_overlay_project_id):
+ async with admin_v2.BigtableInstanceAdminAsyncClient(
+ client_options={
+ "quota_project_id": admin_overlay_project_id,
+ }
+ ) as client:
+ yield client
+
+
+@CrossSync.convert
+@CrossSync.pytest_fixture(scope="session")
+async def instances_to_delete(instance_admin_client):
+ instances = []
+
+ try:
+ yield instances
+ finally:
+ for instance in instances:
+ await instance_admin_client.delete_instance(name=instance.name)
+
+
+@CrossSync.convert
+@CrossSync.pytest_fixture(scope="session")
+async def backups_to_delete(table_admin_client):
+ backups = []
+
+ try:
+ yield backups
+ finally:
+ for backup in backups:
+ await table_admin_client.delete_backup(name=backup.name)
+
+
+@CrossSync.convert
+async def create_instance(
+ instance_admin_client,
+ table_admin_client,
+ data_client,
+ project_id,
+ instances_to_delete,
+ storage_type=admin_v2.StorageType.HDD,
+ cluster_locations=DEFAULT_CLUSTER_LOCATIONS,
+) -> Tuple[admin_v2.Instance, admin_v2.Table]:
+ """
+ Creates a new Bigtable instance with the specified project_id, storage type, and cluster locations.
+
+ After creating the Bigtable instance, it will create a test table and populate it with dummy data.
+ This is not defined as a fixture because the different system tests need different kinds of instances.
+ """
+ # Create the instance
+ clusters = {}
+
+ instance_id = generate_unique_suffix(INSTANCE_PREFIX)
+
+ for idx, location in enumerate(cluster_locations):
+ clusters[location] = admin_v2.Cluster(
+ name=instance_admin_client.cluster_path(
+ project_id, instance_id, f"{instance_id}-{idx}"
+ ),
+ location=instance_admin_client.common_location_path(project_id, location),
+ default_storage_type=storage_type,
+ )
+
+ # Instance and cluster creation are currently unsupported in the Bigtable emulator
+ if os.getenv(BIGTABLE_EMULATOR):
+ # All we need for system tests so far is the instance name.
+ instance = admin_v2.Instance(
+ name=instance_admin_client.instance_path(project_id, instance_id),
+ )
+ else:
+ create_instance_request = admin_v2.CreateInstanceRequest(
+ parent=instance_admin_client.common_project_path(project_id),
+ instance_id=instance_id,
+ instance=admin_v2.Instance(
+ display_name=instance_id[
+ :30
+ ], # truncate to 30 characters because of character limit
+ ),
+ clusters=clusters,
+ )
+ operation = await instance_admin_client.create_instance(create_instance_request)
+ instance = await operation.result()
+
+ instances_to_delete.append(instance)
+
+ # Create a table within the instance
+ create_table_request = admin_v2.CreateTableRequest(
+ parent=instance_admin_client.instance_path(project_id, instance_id),
+ table_id=TEST_TABLE_NAME,
+ table=admin_v2.Table(
+ column_families={
+ TEST_COLUMMN_FAMILY_NAME: admin_v2.ColumnFamily(),
+ }
+ ),
+ )
+
+ table = await table_admin_client.create_table(create_table_request)
+
+ # Populate with dummy data
+ await populate_table(
+ table_admin_client, data_client, instance, table, INITIAL_CELL_VALUE
+ )
+
+ return instance, table
+
+
+@CrossSync.convert
+async def populate_table(table_admin_client, data_client, instance, table, cell_value):
+ """
+ Populates all the test cells in the given table with the given cell value.
+
+ This is used to populate test data when creating an instance, and for testing the
+ wait_for_consistency call.
+ """
+ data_client_table = data_client.get_table(
+ table_admin_client.parse_instance_path(instance.name)["instance"],
+ table_admin_client.parse_table_path(table.name)["table"],
+ )
+ row_mutation_entries = []
+ for i in range(0, NUM_ROWS):
+ row_mutation_entries.append(
+ mutations.RowMutationEntry(
+ row_key=f"{ROW_PREFIX}-{i}",
+ mutations=[
+ mutations.SetCell(
+ family=TEST_COLUMMN_FAMILY_NAME,
+ qualifier=TEST_COLUMN_NAME,
+ new_value=cell_value,
+ timestamp_micros=-1,
+ )
+ ],
+ )
+ )
+
+ await data_client_table.bulk_mutate_rows(row_mutation_entries)
+
+
+@CrossSync.convert
+async def create_backup(
+ instance_admin_client, table_admin_client, instance, table, backups_to_delete
+) -> admin_v2.Backup:
+ """
+ Creates a backup of the given table under the given instance.
+
+ This will be restored to a different instance later on, to test
+ optimize_restored_table.
+ """
+ # Get a cluster in the instance for the backup
+ list_clusters_response = await instance_admin_client.list_clusters(
+ parent=instance.name
+ )
+ cluster_name = list_clusters_response.clusters[0].name
+
+ backup_id = generate_unique_suffix(BACKUP_PREFIX)
+
+ # Create the backup
+ operation = await table_admin_client.create_backup(
+ admin_v2.CreateBackupRequest(
+ parent=cluster_name,
+ backup_id=backup_id,
+ backup=admin_v2.Backup(
+ name=f"{cluster_name}/backups/{backup_id}",
+ source_table=table.name,
+ expire_time=datetime.now() + timedelta(hours=7),
+ ),
+ )
+ )
+
+ backup = await operation.result()
+ backups_to_delete.append(backup)
+ return backup
+
+
+@CrossSync.convert
+async def assert_table_cell_value_equal_to(
+ table_admin_client, data_client, instance, table, value
+):
+ """
+ Asserts that all cells in the given table have the given value.
+ """
+ data_client_table = data_client.get_table(
+ table_admin_client.parse_instance_path(instance.name)["instance"],
+ table_admin_client.parse_table_path(table.name)["table"],
+ )
+
+ # Read all the rows; there shouldn't be that many of them
+ query = read_rows_query.ReadRowsQuery(limit=NUM_ROWS)
+ async for row in await data_client_table.read_rows_stream(query):
+ latest_cell = row[TEST_COLUMMN_FAMILY_NAME, TEST_COLUMN_NAME][0]
+ assert latest_cell.value.decode("utf-8") == value
+
+
+@CrossSync.convert(
+ replace_symbols={
+ "AsyncRestoreTableOperation": "RestoreTableOperation",
+ "AsyncOperation": "Operation",
+ }
+)
+@CrossSync.pytest
+@pytest.mark.skipif(
+ os.getenv(BIGTABLE_EMULATOR),
+ reason="Backups are not supported in the Bigtable emulator",
+)
+@pytest.mark.parametrize(
+ "second_instance_storage_type,expect_optimize_operation",
+ [
+ (admin_v2.StorageType.HDD, False),
+ (admin_v2.StorageType.SSD, True),
+ ],
+)
+async def test_optimize_restored_table(
+ admin_overlay_project_id,
+ instance_admin_client,
+ table_admin_client,
+ data_client,
+ instances_to_delete,
+ backups_to_delete,
+ second_instance_storage_type,
+ expect_optimize_operation,
+):
+ # Create two instances. We backup a table from the first instance to a new table in the
+ # second instance. This is to test whether or not different scenarios trigger an
+ # optimize_restored_table operation
+ instance_with_backup, table_to_backup = await create_instance(
+ instance_admin_client,
+ table_admin_client,
+ data_client,
+ admin_overlay_project_id,
+ instances_to_delete,
+ admin_v2.StorageType.HDD,
+ )
+
+ instance_to_restore, _ = await create_instance(
+ instance_admin_client,
+ table_admin_client,
+ data_client,
+ admin_overlay_project_id,
+ instances_to_delete,
+ second_instance_storage_type,
+ )
+
+ backup = await create_backup(
+ instance_admin_client,
+ table_admin_client,
+ instance_with_backup,
+ table_to_backup,
+ backups_to_delete,
+ )
+
+ # Restore to other instance
+ restore_operation = await table_admin_client.restore_table(
+ admin_v2.RestoreTableRequest(
+ parent=instance_to_restore.name,
+ table_id=TEST_BACKUP_TABLE_NAME,
+ backup=backup.name,
+ )
+ )
+
+ assert isinstance(restore_operation, admin_v2.AsyncRestoreTableOperation)
+ restored_table = await restore_operation.result()
+
+ optimize_operation = await restore_operation.optimize_restored_table_operation()
+ if expect_optimize_operation:
+ assert isinstance(optimize_operation, api_core_operation.AsyncOperation)
+ await optimize_operation.result()
+ else:
+ assert optimize_operation is None
+
+ # Test that the new table exists
+ assert (
+ restored_table.name
+ == f"{instance_to_restore.name}/tables/{TEST_BACKUP_TABLE_NAME}"
+ )
+ await assert_table_cell_value_equal_to(
+ table_admin_client,
+ data_client,
+ instance_to_restore,
+ restored_table,
+ INITIAL_CELL_VALUE,
+ )
+
+
+@CrossSync.pytest
+async def test_wait_for_consistency(
+ instance_admin_client,
+ table_admin_client,
+ data_client,
+ instances_to_delete,
+ admin_overlay_project_id,
+):
+ # Create an instance and a table, then try to write NEW_CELL_VALUE
+ # to each table row instead of INITIAL_CELL_VALUE.
+ instance, table = await create_instance(
+ instance_admin_client,
+ table_admin_client,
+ data_client,
+ admin_overlay_project_id,
+ instances_to_delete,
+ cluster_locations=REPLICATION_CLUSTER_LOCATIONS,
+ )
+
+ await populate_table(
+ table_admin_client, data_client, instance, table, NEW_CELL_VALUE
+ )
+
+ wait_for_consistency_request = admin_v2.WaitForConsistencyRequest(
+ name=table.name,
+ standard_read_remote_writes=admin_v2.StandardReadRemoteWrites(),
+ )
+ await table_admin_client.wait_for_consistency(wait_for_consistency_request)
+ await assert_table_cell_value_equal_to(
+ table_admin_client, data_client, instance, table, NEW_CELL_VALUE
+ )
diff --git a/tests/system/admin_overlay/test_system_autogen.py b/tests/system/admin_overlay/test_system_autogen.py
new file mode 100644
index 000000000..4fde3571f
--- /dev/null
+++ b/tests/system/admin_overlay/test_system_autogen.py
@@ -0,0 +1,300 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# This file is automatically generated by CrossSync. Do not edit manually.
+
+from typing import Tuple
+from google.cloud import bigtable_admin_v2 as admin_v2
+from google.cloud.bigtable.data._cross_sync import CrossSync
+from google.cloud.bigtable.data import mutations, read_rows_query
+from google.cloud.environment_vars import BIGTABLE_EMULATOR
+from .conftest import (
+ INSTANCE_PREFIX,
+ BACKUP_PREFIX,
+ ROW_PREFIX,
+ DEFAULT_CLUSTER_LOCATIONS,
+ REPLICATION_CLUSTER_LOCATIONS,
+ TEST_TABLE_NAME,
+ TEST_BACKUP_TABLE_NAME,
+ TEST_COLUMMN_FAMILY_NAME,
+ TEST_COLUMN_NAME,
+ NUM_ROWS,
+ INITIAL_CELL_VALUE,
+ NEW_CELL_VALUE,
+ generate_unique_suffix,
+)
+from datetime import datetime, timedelta
+import pytest
+import os
+from google.api_core import operation as api_core_operation
+
+if os.getenv(BIGTABLE_EMULATOR):
+ pytest.skip(
+ allow_module_level=True,
+ reason="Emulator support for admin client tests unsupported.",
+ )
+
+
+@pytest.fixture(scope="session")
+def data_client(admin_overlay_project_id):
+ with CrossSync._Sync_Impl.DataClient(project=admin_overlay_project_id) as client:
+ yield client
+
+
+@pytest.fixture(scope="session")
+def table_admin_client(admin_overlay_project_id):
+ with admin_v2.BigtableTableAdminClient(
+ client_options={"quota_project_id": admin_overlay_project_id}
+ ) as client:
+ yield client
+
+
+@pytest.fixture(scope="session")
+def instance_admin_client(admin_overlay_project_id):
+ with admin_v2.BigtableInstanceAdminClient(
+ client_options={"quota_project_id": admin_overlay_project_id}
+ ) as client:
+ yield client
+
+
+@pytest.fixture(scope="session")
+def instances_to_delete(instance_admin_client):
+ instances = []
+ try:
+ yield instances
+ finally:
+ for instance in instances:
+ instance_admin_client.delete_instance(name=instance.name)
+
+
+@pytest.fixture(scope="session")
+def backups_to_delete(table_admin_client):
+ backups = []
+ try:
+ yield backups
+ finally:
+ for backup in backups:
+ table_admin_client.delete_backup(name=backup.name)
+
+
+def create_instance(
+ instance_admin_client,
+ table_admin_client,
+ data_client,
+ project_id,
+ instances_to_delete,
+ storage_type=admin_v2.StorageType.HDD,
+ cluster_locations=DEFAULT_CLUSTER_LOCATIONS,
+) -> Tuple[admin_v2.Instance, admin_v2.Table]:
+ """Creates a new Bigtable instance with the specified project_id, storage type, and cluster locations.
+
+ After creating the Bigtable instance, it will create a test table and populate it with dummy data.
+ This is not defined as a fixture because the different system tests need different kinds of instances.
+ """
+ clusters = {}
+ instance_id = generate_unique_suffix(INSTANCE_PREFIX)
+ for idx, location in enumerate(cluster_locations):
+ clusters[location] = admin_v2.Cluster(
+ name=instance_admin_client.cluster_path(
+ project_id, instance_id, f"{instance_id}-{idx}"
+ ),
+ location=instance_admin_client.common_location_path(project_id, location),
+ default_storage_type=storage_type,
+ )
+ if os.getenv(BIGTABLE_EMULATOR):
+ instance = admin_v2.Instance(
+ name=instance_admin_client.instance_path(project_id, instance_id)
+ )
+ else:
+ create_instance_request = admin_v2.CreateInstanceRequest(
+ parent=instance_admin_client.common_project_path(project_id),
+ instance_id=instance_id,
+ instance=admin_v2.Instance(display_name=instance_id[:30]),
+ clusters=clusters,
+ )
+ operation = instance_admin_client.create_instance(create_instance_request)
+ instance = operation.result()
+ instances_to_delete.append(instance)
+ create_table_request = admin_v2.CreateTableRequest(
+ parent=instance_admin_client.instance_path(project_id, instance_id),
+ table_id=TEST_TABLE_NAME,
+ table=admin_v2.Table(
+ column_families={TEST_COLUMMN_FAMILY_NAME: admin_v2.ColumnFamily()}
+ ),
+ )
+ table = table_admin_client.create_table(create_table_request)
+ populate_table(table_admin_client, data_client, instance, table, INITIAL_CELL_VALUE)
+ return (instance, table)
+
+
+def populate_table(table_admin_client, data_client, instance, table, cell_value):
+ """Populates all the test cells in the given table with the given cell value.
+
+ This is used to populate test data when creating an instance, and for testing the
+ wait_for_consistency call."""
+ data_client_table = data_client.get_table(
+ table_admin_client.parse_instance_path(instance.name)["instance"],
+ table_admin_client.parse_table_path(table.name)["table"],
+ )
+ row_mutation_entries = []
+ for i in range(0, NUM_ROWS):
+ row_mutation_entries.append(
+ mutations.RowMutationEntry(
+ row_key=f"{ROW_PREFIX}-{i}",
+ mutations=[
+ mutations.SetCell(
+ family=TEST_COLUMMN_FAMILY_NAME,
+ qualifier=TEST_COLUMN_NAME,
+ new_value=cell_value,
+ timestamp_micros=-1,
+ )
+ ],
+ )
+ )
+ data_client_table.bulk_mutate_rows(row_mutation_entries)
+
+
+def create_backup(
+ instance_admin_client, table_admin_client, instance, table, backups_to_delete
+) -> admin_v2.Backup:
+ """Creates a backup of the given table under the given instance.
+
+ This will be restored to a different instance later on, to test
+ optimize_restored_table."""
+ list_clusters_response = instance_admin_client.list_clusters(parent=instance.name)
+ cluster_name = list_clusters_response.clusters[0].name
+ backup_id = generate_unique_suffix(BACKUP_PREFIX)
+ operation = table_admin_client.create_backup(
+ admin_v2.CreateBackupRequest(
+ parent=cluster_name,
+ backup_id=backup_id,
+ backup=admin_v2.Backup(
+ name=f"{cluster_name}/backups/{backup_id}",
+ source_table=table.name,
+ expire_time=datetime.now() + timedelta(hours=7),
+ ),
+ )
+ )
+ backup = operation.result()
+ backups_to_delete.append(backup)
+ return backup
+
+
+def assert_table_cell_value_equal_to(
+ table_admin_client, data_client, instance, table, value
+):
+ """Asserts that all cells in the given table have the given value."""
+ data_client_table = data_client.get_table(
+ table_admin_client.parse_instance_path(instance.name)["instance"],
+ table_admin_client.parse_table_path(table.name)["table"],
+ )
+ query = read_rows_query.ReadRowsQuery(limit=NUM_ROWS)
+ for row in data_client_table.read_rows_stream(query):
+ latest_cell = row[TEST_COLUMMN_FAMILY_NAME, TEST_COLUMN_NAME][0]
+ assert latest_cell.value.decode("utf-8") == value
+
+
+@pytest.mark.skipif(
+ os.getenv(BIGTABLE_EMULATOR),
+ reason="Backups are not supported in the Bigtable emulator",
+)
+@pytest.mark.parametrize(
+ "second_instance_storage_type,expect_optimize_operation",
+ [(admin_v2.StorageType.HDD, False), (admin_v2.StorageType.SSD, True)],
+)
+def test_optimize_restored_table(
+ admin_overlay_project_id,
+ instance_admin_client,
+ table_admin_client,
+ data_client,
+ instances_to_delete,
+ backups_to_delete,
+ second_instance_storage_type,
+ expect_optimize_operation,
+):
+ (instance_with_backup, table_to_backup) = create_instance(
+ instance_admin_client,
+ table_admin_client,
+ data_client,
+ admin_overlay_project_id,
+ instances_to_delete,
+ admin_v2.StorageType.HDD,
+ )
+ (instance_to_restore, _) = create_instance(
+ instance_admin_client,
+ table_admin_client,
+ data_client,
+ admin_overlay_project_id,
+ instances_to_delete,
+ second_instance_storage_type,
+ )
+ backup = create_backup(
+ instance_admin_client,
+ table_admin_client,
+ instance_with_backup,
+ table_to_backup,
+ backups_to_delete,
+ )
+ restore_operation = table_admin_client.restore_table(
+ admin_v2.RestoreTableRequest(
+ parent=instance_to_restore.name,
+ table_id=TEST_BACKUP_TABLE_NAME,
+ backup=backup.name,
+ )
+ )
+ assert isinstance(restore_operation, admin_v2.RestoreTableOperation)
+ restored_table = restore_operation.result()
+ optimize_operation = restore_operation.optimize_restored_table_operation()
+ if expect_optimize_operation:
+ assert isinstance(optimize_operation, api_core_operation.Operation)
+ optimize_operation.result()
+ else:
+ assert optimize_operation is None
+ assert (
+ restored_table.name
+ == f"{instance_to_restore.name}/tables/{TEST_BACKUP_TABLE_NAME}"
+ )
+ assert_table_cell_value_equal_to(
+ table_admin_client,
+ data_client,
+ instance_to_restore,
+ restored_table,
+ INITIAL_CELL_VALUE,
+ )
+
+
+def test_wait_for_consistency(
+ instance_admin_client,
+ table_admin_client,
+ data_client,
+ instances_to_delete,
+ admin_overlay_project_id,
+):
+ (instance, table) = create_instance(
+ instance_admin_client,
+ table_admin_client,
+ data_client,
+ admin_overlay_project_id,
+ instances_to_delete,
+ cluster_locations=REPLICATION_CLUSTER_LOCATIONS,
+ )
+ populate_table(table_admin_client, data_client, instance, table, NEW_CELL_VALUE)
+ wait_for_consistency_request = admin_v2.WaitForConsistencyRequest(
+ name=table.name, standard_read_remote_writes=admin_v2.StandardReadRemoteWrites()
+ )
+ table_admin_client.wait_for_consistency(wait_for_consistency_request)
+ assert_table_cell_value_equal_to(
+ table_admin_client, data_client, instance, table, NEW_CELL_VALUE
+ )
diff --git a/tests/system/conftest.py b/tests/system/conftest.py
index b8862ea4b..8c0eb30b1 100644
--- a/tests/system/conftest.py
+++ b/tests/system/conftest.py
@@ -17,9 +17,20 @@
import sys
import os
+import pytest
+import asyncio
+
script_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(script_path)
pytest_plugins = [
"data.setup_fixtures",
]
+
+
+@pytest.fixture(scope="session")
+def event_loop():
+ loop = asyncio.new_event_loop()
+ yield loop
+ loop.stop()
+ loop.close()
diff --git a/tests/system/data/__init__.py b/tests/system/data/__init__.py
index f2952b2cd..2b35cea8f 100644
--- a/tests/system/data/__init__.py
+++ b/tests/system/data/__init__.py
@@ -16,3 +16,4 @@
TEST_FAMILY = "test-family"
TEST_FAMILY_2 = "test-family-2"
+TEST_AGGREGATE_FAMILY = "test-aggregate-family"
diff --git a/tests/system/data/setup_fixtures.py b/tests/system/data/setup_fixtures.py
index a77ffc008..169e2396b 100644
--- a/tests/system/data/setup_fixtures.py
+++ b/tests/system/data/setup_fixtures.py
@@ -20,7 +20,7 @@
import os
import uuid
-from . import TEST_FAMILY, TEST_FAMILY_2
+from . import TEST_FAMILY, TEST_FAMILY_2, TEST_AGGREGATE_FAMILY
# authorized view subset to allow all qualifiers
ALLOW_ALL = ""
@@ -183,6 +183,7 @@ def authorized_view_id(
"family_subsets": {
TEST_FAMILY: ALL_QUALIFIERS,
TEST_FAMILY_2: ALL_QUALIFIERS,
+ TEST_AGGREGATE_FAMILY: ALL_QUALIFIERS,
},
},
},
diff --git a/tests/system/data/test_system_async.py b/tests/system/data/test_system_async.py
index b59131414..ac8a358a3 100644
--- a/tests/system/data/test_system_async.py
+++ b/tests/system/data/test_system_async.py
@@ -13,7 +13,6 @@
# limitations under the License.
import pytest
-import asyncio
import datetime
import uuid
import os
@@ -27,8 +26,16 @@
from google.cloud.bigtable.data._cross_sync import CrossSync
-from . import TEST_FAMILY, TEST_FAMILY_2
+from . import TEST_FAMILY, TEST_FAMILY_2, TEST_AGGREGATE_FAMILY
+if CrossSync.is_async:
+ from google.cloud.bigtable_v2.services.bigtable.transports.grpc_asyncio import (
+ _LoggingClientAIOInterceptor as GapicInterceptor,
+ )
+else:
+ from google.cloud.bigtable_v2.services.bigtable.transports.grpc import (
+ _LoggingClientInterceptor as GapicInterceptor,
+ )
__CROSS_SYNC_OUTPUT__ = "tests.system.data.test_system_autogen"
@@ -76,6 +83,27 @@ async def add_row(
await self.target.client._gapic_client.mutate_row(request)
self.rows.append(row_key)
+ @CrossSync.convert
+ async def add_aggregate_row(
+ self, row_key, *, family=TEST_AGGREGATE_FAMILY, qualifier=b"q", input=0
+ ):
+ request = {
+ "table_name": self.target.table_name,
+ "row_key": row_key,
+ "mutations": [
+ {
+ "add_to_cell": {
+ "family_name": family,
+ "column_qualifier": {"raw_value": qualifier},
+ "timestamp": {"raw_timestamp_micros": 0},
+ "input": {"int_value": input},
+ }
+ }
+ ],
+ }
+ await self.target.client._gapic_client.mutate_row(request)
+ self.rows.append(row_key)
+
@CrossSync.convert
async def delete_rows(self):
if self.rows:
@@ -91,11 +119,14 @@ async def delete_rows(self):
@CrossSync.convert_class(sync_name="TestSystem")
class TestSystemAsync:
+ def _make_client(self):
+ project = os.getenv("GOOGLE_CLOUD_PROJECT") or None
+ return CrossSync.DataClient(project=project)
+
@CrossSync.convert
@CrossSync.pytest_fixture(scope="session")
async def client(self):
- project = os.getenv("GOOGLE_CLOUD_PROJECT") or None
- async with CrossSync.DataClient(project=project) as client:
+ async with self._make_client() as client:
yield client
@CrossSync.convert
@@ -117,14 +148,6 @@ async def target(self, client, table_id, authorized_view_id, instance_id, reques
else:
raise ValueError(f"unknown target type: {request.param}")
- @CrossSync.drop
- @pytest.fixture(scope="session")
- def event_loop(self):
- loop = asyncio.get_event_loop()
- yield loop
- loop.stop()
- loop.close()
-
@pytest.fixture(scope="session")
def column_family_config(self):
"""
@@ -132,7 +155,17 @@ def column_family_config(self):
"""
from google.cloud.bigtable_admin_v2 import types
- return {TEST_FAMILY: types.ColumnFamily(), TEST_FAMILY_2: types.ColumnFamily()}
+ int_aggregate_type = types.Type.Aggregate(
+ input_type=types.Type(int64_type={"encoding": {"big_endian_bytes": {}}}),
+ sum={},
+ )
+ return {
+ TEST_FAMILY: types.ColumnFamily(),
+ TEST_FAMILY_2: types.ColumnFamily(),
+ TEST_AGGREGATE_FAMILY: types.ColumnFamily(
+ value_type=types.Type(aggregate_type=int_aggregate_type)
+ ),
+ }
@pytest.fixture(scope="session")
def init_table_id(self):
@@ -233,34 +266,49 @@ async def test_ping_and_warm(self, client, target):
@CrossSync.pytest
async def test_channel_refresh(self, table_id, instance_id, temp_rows):
"""
- change grpc channel to refresh after 1 second. Schedule a read_rows call after refresh,
- to ensure new channel works
+ perform requests while swapping out the grpc channel. Requests should continue without error
"""
- await temp_rows.add_row(b"row_key_1")
- await temp_rows.add_row(b"row_key_2")
- project = os.getenv("GOOGLE_CLOUD_PROJECT") or None
- client = CrossSync.DataClient(project=project)
- # start custom refresh task
- try:
+ import time
+
+ await temp_rows.add_row(b"test_row")
+ async with self._make_client() as client:
+ client._channel_refresh_task.cancel()
+ channel_wrapper = client.transport.grpc_channel
+ first_channel = channel_wrapper._channel
+ # swap channels frequently, with large grace windows
client._channel_refresh_task = CrossSync.create_task(
client._manage_channel,
- refresh_interval_min=1,
- refresh_interval_max=1,
+ refresh_interval_min=0.1,
+ refresh_interval_max=0.1,
+ grace_period=1,
sync_executor=client._executor,
)
- # let task run
- await CrossSync.yield_to_event_loop()
+
+ # hit channels with frequent requests
+ end_time = time.monotonic() + 3
async with client.get_table(instance_id, table_id) as table:
- rows = await table.read_rows({})
- first_channel = client.transport.grpc_channel
- assert len(rows) == 2
- await CrossSync.sleep(2)
- rows_after_refresh = await table.read_rows({})
- assert len(rows_after_refresh) == 2
- assert client.transport.grpc_channel is not first_channel
- print(table)
- finally:
- await client.close()
+ while time.monotonic() < end_time:
+ # we expect a CancelledError if a channel is closed before completion
+ rows = await table.read_rows({})
+ assert len(rows) == 1
+ await CrossSync.yield_to_event_loop()
+ # ensure channel was updated
+ updated_channel = channel_wrapper._channel
+ assert updated_channel is not first_channel
+ # ensure interceptors are kept (gapic's logging interceptor, and metric interceptor)
+ if CrossSync.is_async:
+ unary_interceptors = updated_channel._unary_unary_interceptors
+ assert len(unary_interceptors) == 2
+ assert GapicInterceptor in [type(i) for i in unary_interceptors]
+ assert client._metrics_interceptor in unary_interceptors
+ stream_interceptors = updated_channel._unary_stream_interceptors
+ assert len(stream_interceptors) == 1
+ assert client._metrics_interceptor in stream_interceptors
+ else:
+ assert isinstance(
+ client.transport._logged_channel._interceptor, GapicInterceptor
+ )
+ assert updated_channel._interceptor == client._metrics_interceptor
@CrossSync.pytest
@pytest.mark.usefixtures("target")
@@ -281,6 +329,37 @@ async def test_mutation_set_cell(self, target, temp_rows):
# ensure cell is updated
assert (await self._retrieve_cell_value(target, row_key)) == new_value
+ @CrossSync.pytest
+ @pytest.mark.usefixtures("target")
+ @CrossSync.Retry(
+ predicate=retry.if_exception_type(ClientError), initial=1, maximum=5
+ )
+ async def test_mutation_add_to_cell(self, target, temp_rows):
+ """
+ Test add to cell mutation
+ """
+ from google.cloud.bigtable.data.mutations import AddToCell
+
+ row_key = b"add_to_cell"
+ family = TEST_AGGREGATE_FAMILY
+ qualifier = b"test-qualifier"
+ # add row to temp_rows, for future deletion
+ await temp_rows.add_aggregate_row(row_key, family=family, qualifier=qualifier)
+ # set and check cell value
+ await target.mutate_row(
+ row_key, AddToCell(family, qualifier, 1, timestamp_micros=0)
+ )
+ encoded_result = await self._retrieve_cell_value(target, row_key)
+ int_result = int.from_bytes(encoded_result, byteorder="big")
+ assert int_result == 1
+ # update again
+ await target.mutate_row(
+ row_key, AddToCell(family, qualifier, 9, timestamp_micros=0)
+ )
+ encoded_result = await self._retrieve_cell_value(target, row_key)
+ int_result = int.from_bytes(encoded_result, byteorder="big")
+ assert int_result == 10
+
@pytest.mark.skipif(
bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't use splits"
)
@@ -1123,7 +1202,7 @@ async def test_execute_query_simple(self, client, table_id, instance_id):
predicate=retry.if_exception_type(ClientError), initial=1, maximum=5
)
async def test_execute_against_target(
- self, client, instance_id, table_id, temp_rows
+ self, client, instance_id, table_id, temp_rows, column_family_config
):
await temp_rows.add_row(b"row_key_1")
result = await client.execute_query(
@@ -1138,7 +1217,9 @@ async def test_execute_against_target(
assert family_map[b"q"] == b"test-value"
assert len(rows[0][TEST_FAMILY_2]) == 0
md = result.metadata
- assert len(md) == 3
+ # we expect it to fetch each column family, plus _key
+ # add additional families here if column_family_config changes
+ assert len(md) == len(column_family_config) + 1
assert md["_key"].column_type == SqlType.Bytes()
assert md[TEST_FAMILY].column_type == SqlType.Map(
SqlType.Bytes(), SqlType.Bytes()
@@ -1146,6 +1227,9 @@ async def test_execute_against_target(
assert md[TEST_FAMILY_2].column_type == SqlType.Map(
SqlType.Bytes(), SqlType.Bytes()
)
+ assert md[TEST_AGGREGATE_FAMILY].column_type == SqlType.Map(
+ SqlType.Bytes(), SqlType.Int64()
+ )
@pytest.mark.skipif(
bool(os.environ.get(BIGTABLE_EMULATOR)),
@@ -1248,7 +1332,7 @@ async def test_execute_query_params(self, client, table_id, instance_id):
predicate=retry.if_exception_type(ClientError), initial=1, maximum=5
)
async def test_execute_metadata_on_empty_response(
- self, client, instance_id, table_id, temp_rows
+ self, client, instance_id, table_id, temp_rows, column_family_config
):
await temp_rows.add_row(b"row_key_1")
result = await client.execute_query(
@@ -1258,7 +1342,9 @@ async def test_execute_metadata_on_empty_response(
assert len(rows) == 0
md = result.metadata
- assert len(md) == 3
+ # we expect it to fetch each column family, plus _key
+ # add additional families here if column_family_config change
+ assert len(md) == len(column_family_config) + 1
assert md["_key"].column_type == SqlType.Bytes()
assert md[TEST_FAMILY].column_type == SqlType.Map(
SqlType.Bytes(), SqlType.Bytes()
@@ -1266,3 +1352,6 @@ async def test_execute_metadata_on_empty_response(
assert md[TEST_FAMILY_2].column_type == SqlType.Map(
SqlType.Bytes(), SqlType.Bytes()
)
+ assert md[TEST_AGGREGATE_FAMILY].column_type == SqlType.Map(
+ SqlType.Bytes(), SqlType.Int64()
+ )
diff --git a/tests/system/data/test_system_autogen.py b/tests/system/data/test_system_autogen.py
index 6b2006d7b..463235087 100644
--- a/tests/system/data/test_system_autogen.py
+++ b/tests/system/data/test_system_autogen.py
@@ -26,7 +26,10 @@
from google.cloud.environment_vars import BIGTABLE_EMULATOR
from google.type import date_pb2
from google.cloud.bigtable.data._cross_sync import CrossSync
-from . import TEST_FAMILY, TEST_FAMILY_2
+from . import TEST_FAMILY, TEST_FAMILY_2, TEST_AGGREGATE_FAMILY
+from google.cloud.bigtable_v2.services.bigtable.transports.grpc import (
+ _LoggingClientInterceptor as GapicInterceptor,
+)
TARGETS = ["table"]
if not os.environ.get(BIGTABLE_EMULATOR):
@@ -66,6 +69,26 @@ def add_row(
self.target.client._gapic_client.mutate_row(request)
self.rows.append(row_key)
+ def add_aggregate_row(
+ self, row_key, *, family=TEST_AGGREGATE_FAMILY, qualifier=b"q", input=0
+ ):
+ request = {
+ "table_name": self.target.table_name,
+ "row_key": row_key,
+ "mutations": [
+ {
+ "add_to_cell": {
+ "family_name": family,
+ "column_qualifier": {"raw_value": qualifier},
+ "timestamp": {"raw_timestamp_micros": 0},
+ "input": {"int_value": input},
+ }
+ }
+ ],
+ }
+ self.target.client._gapic_client.mutate_row(request)
+ self.rows.append(row_key)
+
def delete_rows(self):
if self.rows:
request = {
@@ -79,10 +102,13 @@ def delete_rows(self):
class TestSystem:
+ def _make_client(self):
+ project = os.getenv("GOOGLE_CLOUD_PROJECT") or None
+ return CrossSync._Sync_Impl.DataClient(project=project)
+
@pytest.fixture(scope="session")
def client(self):
- project = os.getenv("GOOGLE_CLOUD_PROJECT") or None
- with CrossSync._Sync_Impl.DataClient(project=project) as client:
+ with self._make_client() as client:
yield client
@pytest.fixture(scope="session", params=TARGETS)
@@ -106,7 +132,17 @@ def column_family_config(self):
"""specify column families to create when creating a new test table"""
from google.cloud.bigtable_admin_v2 import types
- return {TEST_FAMILY: types.ColumnFamily(), TEST_FAMILY_2: types.ColumnFamily()}
+ int_aggregate_type = types.Type.Aggregate(
+ input_type=types.Type(int64_type={"encoding": {"big_endian_bytes": {}}}),
+ sum={},
+ )
+ return {
+ TEST_FAMILY: types.ColumnFamily(),
+ TEST_FAMILY_2: types.ColumnFamily(),
+ TEST_AGGREGATE_FAMILY: types.ColumnFamily(
+ value_type=types.Type(aggregate_type=int_aggregate_type)
+ ),
+ }
@pytest.fixture(scope="session")
def init_table_id(self):
@@ -185,31 +221,33 @@ def test_ping_and_warm(self, client, target):
reason="emulator mode doesn't refresh channel",
)
def test_channel_refresh(self, table_id, instance_id, temp_rows):
- """change grpc channel to refresh after 1 second. Schedule a read_rows call after refresh,
- to ensure new channel works"""
- temp_rows.add_row(b"row_key_1")
- temp_rows.add_row(b"row_key_2")
- project = os.getenv("GOOGLE_CLOUD_PROJECT") or None
- client = CrossSync._Sync_Impl.DataClient(project=project)
- try:
+ """perform requests while swapping out the grpc channel. Requests should continue without error"""
+ import time
+
+ temp_rows.add_row(b"test_row")
+ with self._make_client() as client:
+ client._channel_refresh_task.cancel()
+ channel_wrapper = client.transport.grpc_channel
+ first_channel = channel_wrapper._channel
client._channel_refresh_task = CrossSync._Sync_Impl.create_task(
client._manage_channel,
- refresh_interval_min=1,
- refresh_interval_max=1,
+ refresh_interval_min=0.1,
+ refresh_interval_max=0.1,
+ grace_period=1,
sync_executor=client._executor,
)
- CrossSync._Sync_Impl.yield_to_event_loop()
+ end_time = time.monotonic() + 3
with client.get_table(instance_id, table_id) as table:
- rows = table.read_rows({})
- first_channel = client.transport.grpc_channel
- assert len(rows) == 2
- CrossSync._Sync_Impl.sleep(2)
- rows_after_refresh = table.read_rows({})
- assert len(rows_after_refresh) == 2
- assert client.transport.grpc_channel is not first_channel
- print(table)
- finally:
- client.close()
+ while time.monotonic() < end_time:
+ rows = table.read_rows({})
+ assert len(rows) == 1
+ CrossSync._Sync_Impl.yield_to_event_loop()
+ updated_channel = channel_wrapper._channel
+ assert updated_channel is not first_channel
+ assert isinstance(
+ client.transport._logged_channel._interceptor, GapicInterceptor
+ )
+ assert updated_channel._interceptor == client._metrics_interceptor
@pytest.mark.usefixtures("target")
@CrossSync._Sync_Impl.Retry(
@@ -225,6 +263,27 @@ def test_mutation_set_cell(self, target, temp_rows):
target.mutate_row(row_key, mutation)
assert self._retrieve_cell_value(target, row_key) == new_value
+ @pytest.mark.usefixtures("target")
+ @CrossSync._Sync_Impl.Retry(
+ predicate=retry.if_exception_type(ClientError), initial=1, maximum=5
+ )
+ def test_mutation_add_to_cell(self, target, temp_rows):
+ """Test add to cell mutation"""
+ from google.cloud.bigtable.data.mutations import AddToCell
+
+ row_key = b"add_to_cell"
+ family = TEST_AGGREGATE_FAMILY
+ qualifier = b"test-qualifier"
+ temp_rows.add_aggregate_row(row_key, family=family, qualifier=qualifier)
+ target.mutate_row(row_key, AddToCell(family, qualifier, 1, timestamp_micros=0))
+ encoded_result = self._retrieve_cell_value(target, row_key)
+ int_result = int.from_bytes(encoded_result, byteorder="big")
+ assert int_result == 1
+ target.mutate_row(row_key, AddToCell(family, qualifier, 9, timestamp_micros=0))
+ encoded_result = self._retrieve_cell_value(target, row_key)
+ int_result = int.from_bytes(encoded_result, byteorder="big")
+ assert int_result == 10
+
@pytest.mark.skipif(
bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't use splits"
)
@@ -915,7 +974,9 @@ def test_execute_query_simple(self, client, table_id, instance_id):
@CrossSync._Sync_Impl.Retry(
predicate=retry.if_exception_type(ClientError), initial=1, maximum=5
)
- def test_execute_against_target(self, client, instance_id, table_id, temp_rows):
+ def test_execute_against_target(
+ self, client, instance_id, table_id, temp_rows, column_family_config
+ ):
temp_rows.add_row(b"row_key_1")
result = client.execute_query("SELECT * FROM `" + table_id + "`", instance_id)
rows = [r for r in result]
@@ -926,7 +987,7 @@ def test_execute_against_target(self, client, instance_id, table_id, temp_rows):
assert family_map[b"q"] == b"test-value"
assert len(rows[0][TEST_FAMILY_2]) == 0
md = result.metadata
- assert len(md) == 3
+ assert len(md) == len(column_family_config) + 1
assert md["_key"].column_type == SqlType.Bytes()
assert md[TEST_FAMILY].column_type == SqlType.Map(
SqlType.Bytes(), SqlType.Bytes()
@@ -934,6 +995,9 @@ def test_execute_against_target(self, client, instance_id, table_id, temp_rows):
assert md[TEST_FAMILY_2].column_type == SqlType.Map(
SqlType.Bytes(), SqlType.Bytes()
)
+ assert md[TEST_AGGREGATE_FAMILY].column_type == SqlType.Map(
+ SqlType.Bytes(), SqlType.Int64()
+ )
@pytest.mark.skipif(
bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't support SQL"
@@ -1023,7 +1087,7 @@ def test_execute_query_params(self, client, table_id, instance_id):
predicate=retry.if_exception_type(ClientError), initial=1, maximum=5
)
def test_execute_metadata_on_empty_response(
- self, client, instance_id, table_id, temp_rows
+ self, client, instance_id, table_id, temp_rows, column_family_config
):
temp_rows.add_row(b"row_key_1")
result = client.execute_query(
@@ -1032,7 +1096,7 @@ def test_execute_metadata_on_empty_response(
rows = [r for r in result]
assert len(rows) == 0
md = result.metadata
- assert len(md) == 3
+ assert len(md) == len(column_family_config) + 1
assert md["_key"].column_type == SqlType.Bytes()
assert md[TEST_FAMILY].column_type == SqlType.Map(
SqlType.Bytes(), SqlType.Bytes()
@@ -1040,3 +1104,6 @@ def test_execute_metadata_on_empty_response(
assert md[TEST_FAMILY_2].column_type == SqlType.Map(
SqlType.Bytes(), SqlType.Bytes()
)
+ assert md[TEST_AGGREGATE_FAMILY].column_type == SqlType.Map(
+ SqlType.Bytes(), SqlType.Int64()
+ )
diff --git a/tests/system/v2_client/_helpers.py b/tests/system/v2_client/_helpers.py
index 95261879e..e792def15 100644
--- a/tests/system/v2_client/_helpers.py
+++ b/tests/system/v2_client/_helpers.py
@@ -12,12 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import datetime
+from datetime import datetime, timezone
import grpc
from google.api_core import exceptions
from google.cloud import exceptions as core_exceptions
-from google.cloud._helpers import UTC
from test_utils import retry
@@ -41,7 +40,5 @@ def _retry_on_unavailable(exc):
def label_stamp():
return (
- datetime.datetime.utcnow()
- .replace(microsecond=0, tzinfo=UTC)
- .strftime("%Y-%m-%dt%H-%M-%S")
+ datetime.now(timezone.utc).replace(microsecond=0).strftime("%Y-%m-%dt%H-%M-%S")
)
diff --git a/tests/system/v2_client/test_data_api.py b/tests/system/v2_client/test_data_api.py
index 579837e34..c012eb32a 100644
--- a/tests/system/v2_client/test_data_api.py
+++ b/tests/system/v2_client/test_data_api.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import datetime
+from datetime import datetime, timedelta, timezone
import operator
import pytest
@@ -62,8 +62,8 @@ def rows_to_delete():
def test_table_read_rows_filter_millis(data_table):
from google.cloud.bigtable import row_filters
- end = datetime.datetime.now()
- start = end - datetime.timedelta(minutes=60)
+ end = datetime.now()
+ start = end - timedelta(minutes=60)
timestamp_range = row_filters.TimestampRange(start=start, end=end)
timefilter = row_filters.TimestampRangeFilter(timestamp_range)
row_data = data_table.read_rows(filter_=timefilter)
@@ -233,20 +233,19 @@ def test_table_read_row_large_cell(data_table, rows_to_delete, skip_on_emulator)
def _write_to_row(row1, row2, row3, row4):
from google.cloud._helpers import _datetime_from_microseconds
from google.cloud._helpers import _microseconds_from_datetime
- from google.cloud._helpers import UTC
from google.cloud.bigtable.row_data import Cell
- timestamp1 = datetime.datetime.utcnow().replace(tzinfo=UTC)
+ timestamp1 = datetime.now(timezone.utc)
timestamp1_micros = _microseconds_from_datetime(timestamp1)
# Truncate to millisecond granularity.
timestamp1_micros -= timestamp1_micros % 1000
timestamp1 = _datetime_from_microseconds(timestamp1_micros)
# 1000 microseconds is a millisecond
- timestamp2 = timestamp1 + datetime.timedelta(microseconds=1000)
+ timestamp2 = timestamp1 + timedelta(microseconds=1000)
timestamp2_micros = _microseconds_from_datetime(timestamp2)
- timestamp3 = timestamp1 + datetime.timedelta(microseconds=2000)
+ timestamp3 = timestamp1 + timedelta(microseconds=2000)
timestamp3_micros = _microseconds_from_datetime(timestamp3)
- timestamp4 = timestamp1 + datetime.timedelta(microseconds=3000)
+ timestamp4 = timestamp1 + timedelta(microseconds=3000)
timestamp4_micros = _microseconds_from_datetime(timestamp4)
if row1 is not None:
diff --git a/tests/unit/admin_overlay/my_oneof_message.py b/tests/unit/admin_overlay/my_oneof_message.py
new file mode 100644
index 000000000..25667cfca
--- /dev/null
+++ b/tests/unit/admin_overlay/my_oneof_message.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import proto
+
+from google.cloud.bigtable_admin_v2.utils import oneof_message
+
+__protobuf__ = proto.module(
+ package="test.oneof.v1",
+ manifest={
+ "MyOneofMessage",
+ },
+)
+
+
+# Foo and Bar belong to oneof foobar, and baz is independent.
+class MyOneofMessage(oneof_message.OneofMessage):
+ foo: int = proto.Field(
+ proto.INT32,
+ number=1,
+ oneof="foobar",
+ )
+
+ bar: int = proto.Field(
+ proto.INT32,
+ number=2,
+ oneof="foobar",
+ )
+
+ baz: int = proto.Field(
+ proto.INT32,
+ number=3,
+ )
diff --git a/tests/unit/admin_overlay/test_admin_packaging.py b/tests/unit/admin_overlay/test_admin_packaging.py
new file mode 100644
index 000000000..729a92b5c
--- /dev/null
+++ b/tests/unit/admin_overlay/test_admin_packaging.py
@@ -0,0 +1,41 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import importlib
+
+import pytest
+
+
+@pytest.mark.parametrize(
+ "module", ["google.cloud.bigtable_admin", "google.cloud.bigtable_admin_v2"]
+)
+def test_admin_overlay_imports(module):
+ # Simulate from import dynamically using importlib
+ mod = importlib.import_module(module)
+
+ # Check that the import aliasing works as expected for overlay/autogenerated clients/types.
+ classes_to_modules = {
+ "BigtableTableAdminClient": "google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.client",
+ "RestoreTableOperation": "google.cloud.bigtable_admin_v2.overlay.types.restore_table",
+ "BigtableInstanceAdminClient": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.client",
+ "RestoreTableRequest": "google.cloud.bigtable_admin_v2.types.bigtable_table_admin",
+ }
+
+ for cls_name, submodule_name in classes_to_modules.items():
+ cls = getattr(mod, cls_name)
+ submodule = importlib.import_module(submodule_name)
+ assert cls == getattr(submodule, cls_name)
+
+ # Check that from import * has the class inside.
+ assert cls_name in mod.__all__
diff --git a/tests/unit/admin_overlay/test_async_client.py b/tests/unit/admin_overlay/test_async_client.py
new file mode 100644
index 000000000..0d844a9e4
--- /dev/null
+++ b/tests/unit/admin_overlay/test_async_client.py
@@ -0,0 +1,297 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# try/except added for compatibility with python < 3.8
+try:
+ from unittest import mock
+ from unittest.mock import AsyncMock # pragma: NO COVER # noqa: F401
+except ImportError: # pragma: NO COVER
+ import mock
+
+from google.api_core import exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+from google.auth.credentials import AnonymousCredentials
+from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import transports
+from google.cloud.bigtable_admin_v2.types import bigtable_table_admin
+from google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.async_client import (
+ BigtableTableAdminAsyncClient,
+ DEFAULT_CLIENT_INFO,
+)
+from google.cloud.bigtable_admin_v2.overlay.types import (
+ async_restore_table,
+ wait_for_consistency_request,
+)
+
+from google.cloud.bigtable import __version__ as bigtable_version
+
+from test_async_consistency import (
+ FALSE_CONSISTENCY_RESPONSE,
+ TRUE_CONSISTENCY_RESPONSE,
+)
+
+import pytest
+
+
+PARENT_NAME = "my_parent"
+TABLE_NAME = "my_table"
+CONSISTENCY_TOKEN = "abcdefg"
+
+
+def _make_client(**kwargs):
+ kwargs["credentials"] = kwargs.get("credentials", AnonymousCredentials())
+ return BigtableTableAdminAsyncClient(**kwargs)
+
+
+@pytest.mark.parametrize(
+ "transport_class,transport_name",
+ [
+ (
+ transports.BigtableTableAdminGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ ],
+)
+def test_bigtable_table_admin_async_client_client_version(
+ transport_class, transport_name
+):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ _make_client(transport=transport_name)
+
+ # call_args.kwargs is not supported in Python 3.7, so find them from the tuple
+ # instead. It's always the last item in the call_args tuple.
+ transport_init_call_kwargs = patched.call_args[-1]
+ assert transport_init_call_kwargs["client_info"] == DEFAULT_CLIENT_INFO
+
+ assert (
+ DEFAULT_CLIENT_INFO.client_library_version
+ == f"{bigtable_version}-admin-overlay-async"
+ )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "kwargs",
+ [
+ {
+ "request": bigtable_table_admin.RestoreTableRequest(
+ parent=PARENT_NAME,
+ table_id=TABLE_NAME,
+ )
+ },
+ {
+ "request": {
+ "parent": PARENT_NAME,
+ "table_id": TABLE_NAME,
+ },
+ },
+ {
+ "request": bigtable_table_admin.RestoreTableRequest(
+ parent=PARENT_NAME,
+ table_id=TABLE_NAME,
+ ),
+ "retry": mock.Mock(spec=retries.Retry),
+ "timeout": mock.Mock(spec=retries.Retry),
+ "metadata": [("foo", "bar")],
+ },
+ ],
+)
+async def test_bigtable_table_admin_async_client_restore_table(kwargs):
+ client = _make_client()
+
+ with mock.patch.object(
+ async_restore_table, "AsyncRestoreTableOperation", new_callable=mock.AsyncMock
+ ) as future_mock:
+ with mock.patch.object(
+ client._client, "_transport", new_callable=mock.AsyncMock
+ ) as transport_mock:
+ with mock.patch.object(
+ client, "_restore_table", new_callable=mock.AsyncMock
+ ) as restore_table_mock:
+ operation_mock = mock.Mock()
+ restore_table_mock.return_value = operation_mock
+ await client.restore_table(**kwargs)
+
+ restore_table_mock.assert_called_once_with(
+ request=kwargs["request"],
+ retry=kwargs.get("retry", gapic_v1.method.DEFAULT),
+ timeout=kwargs.get("timeout", gapic_v1.method.DEFAULT),
+ metadata=kwargs.get("metadata", ()),
+ )
+ future_mock.assert_called_once_with(
+ transport_mock.operations_client, operation_mock
+ )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "kwargs,check_consistency_request_extras",
+ [
+ (
+ {
+ "request": wait_for_consistency_request.WaitForConsistencyRequest(
+ name=TABLE_NAME,
+ )
+ },
+ {},
+ ),
+ (
+ {
+ "request": wait_for_consistency_request.WaitForConsistencyRequest(
+ name=TABLE_NAME,
+ standard_read_remote_writes=bigtable_table_admin.StandardReadRemoteWrites(),
+ )
+ },
+ {
+ "standard_read_remote_writes": bigtable_table_admin.StandardReadRemoteWrites(),
+ },
+ ),
+ (
+ {
+ "request": wait_for_consistency_request.WaitForConsistencyRequest(
+ name=TABLE_NAME,
+ data_boost_read_local_writes=bigtable_table_admin.DataBoostReadLocalWrites(),
+ )
+ },
+ {
+ "data_boost_read_local_writes": bigtable_table_admin.DataBoostReadLocalWrites(),
+ },
+ ),
+ (
+ {
+ "request": {
+ "name": TABLE_NAME,
+ "data_boost_read_local_writes": {},
+ }
+ },
+ {
+ "data_boost_read_local_writes": bigtable_table_admin.DataBoostReadLocalWrites(),
+ },
+ ),
+ (
+ {
+ "name": TABLE_NAME,
+ },
+ {},
+ ),
+ (
+ {
+ "request": wait_for_consistency_request.WaitForConsistencyRequest(
+ name=TABLE_NAME,
+ ),
+ "retry": mock.Mock(spec=retries.Retry),
+ "timeout": mock.Mock(spec=retries.Retry),
+ "metadata": [("foo", "bar")],
+ },
+ {},
+ ),
+ ],
+)
+async def test_bigtable_table_admin_async_client_wait_for_consistency(
+ kwargs, check_consistency_request_extras
+):
+ client = _make_client()
+ poll_count = 3
+ check_mock_side_effect = [FALSE_CONSISTENCY_RESPONSE] * (poll_count - 1)
+ check_mock_side_effect.append(TRUE_CONSISTENCY_RESPONSE)
+
+ with mock.patch.object(
+ client, "generate_consistency_token", new_callable=mock.AsyncMock
+ ) as generate_mock:
+ with mock.patch.object(
+ client, "check_consistency", new_callable=mock.AsyncMock
+ ) as check_mock:
+ generate_mock.return_value = (
+ bigtable_table_admin.GenerateConsistencyTokenResponse(
+ consistency_token=CONSISTENCY_TOKEN,
+ )
+ )
+
+ check_mock.side_effect = check_mock_side_effect
+ result = await client.wait_for_consistency(**kwargs)
+
+ assert result is True
+
+ generate_mock.assert_awaited_once_with(
+ bigtable_table_admin.GenerateConsistencyTokenRequest(
+ name=TABLE_NAME,
+ ),
+ retry=kwargs.get("retry", gapic_v1.method.DEFAULT),
+ timeout=kwargs.get("timeout", gapic_v1.method.DEFAULT),
+ metadata=kwargs.get("metadata", ()),
+ )
+
+ expected_check_consistency_request = (
+ bigtable_table_admin.CheckConsistencyRequest(
+ name=TABLE_NAME,
+ consistency_token=CONSISTENCY_TOKEN,
+ **check_consistency_request_extras,
+ )
+ )
+
+ check_mock.assert_awaited_with(
+ expected_check_consistency_request,
+ retry=kwargs.get("retry", gapic_v1.method.DEFAULT),
+ timeout=kwargs.get("timeout", gapic_v1.method.DEFAULT),
+ metadata=kwargs.get("metadata", ()),
+ )
+
+
+@pytest.mark.asyncio
+async def test_bigtable_table_admin_async_client_wait_for_consistency_error_in_call():
+ client = _make_client()
+ request = wait_for_consistency_request.WaitForConsistencyRequest(
+ name=TABLE_NAME,
+ )
+
+ with pytest.raises(exceptions.GoogleAPICallError):
+ with mock.patch.object(
+ client, "generate_consistency_token", new_callable=mock.AsyncMock
+ ) as generate_mock:
+ generate_mock.side_effect = exceptions.DeadlineExceeded(
+ "Deadline Exceeded."
+ )
+ await client.wait_for_consistency(request)
+
+ with pytest.raises(exceptions.GoogleAPICallError):
+ with mock.patch.object(
+ client, "generate_consistency_token", new_callable=mock.AsyncMock
+ ) as generate_mock:
+ with mock.patch.object(
+ client, "check_consistency", new_callable=mock.AsyncMock
+ ) as check_mock:
+ generate_mock.return_value = (
+ bigtable_table_admin.GenerateConsistencyTokenResponse(
+ consistency_token=CONSISTENCY_TOKEN,
+ )
+ )
+
+ check_mock.side_effect = exceptions.DeadlineExceeded(
+ "Deadline Exceeded."
+ )
+ await client.wait_for_consistency(request)
+
+
+@pytest.mark.asyncio
+async def test_bigtable_table_admin_async_client_wait_for_consistency_user_error():
+ client = _make_client()
+ with pytest.raises(ValueError):
+ await client.wait_for_consistency(
+ {
+ "name": TABLE_NAME,
+ },
+ name=TABLE_NAME,
+ )
diff --git a/tests/unit/admin_overlay/test_async_consistency.py b/tests/unit/admin_overlay/test_async_consistency.py
new file mode 100644
index 000000000..b64ae1a11
--- /dev/null
+++ b/tests/unit/admin_overlay/test_async_consistency.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# try/except added for compatibility with python < 3.8
+try:
+ from unittest import mock
+ from unittest.mock import AsyncMock # pragma: NO COVER # noqa: F401
+except ImportError: # pragma: NO COVER
+ import mock
+
+from google.cloud.bigtable_admin_v2.overlay.types import async_consistency
+from google.cloud.bigtable_admin_v2.types import bigtable_table_admin
+
+import pytest
+
+
+TRUE_CONSISTENCY_RESPONSE = bigtable_table_admin.CheckConsistencyResponse(
+ consistent=True
+)
+
+FALSE_CONSISTENCY_RESPONSE = bigtable_table_admin.CheckConsistencyResponse(
+ consistent=False
+)
+
+
+def async_mock_check_consistency_callable(max_poll_count=1):
+ # Return False max_poll_count - 1 times, then True, for a total of
+ # max_poll_count calls.
+ side_effect = [FALSE_CONSISTENCY_RESPONSE] * (max_poll_count - 1)
+ side_effect.append(TRUE_CONSISTENCY_RESPONSE)
+ return mock.AsyncMock(spec=["__call__"], side_effect=side_effect)
+
+
+@pytest.mark.asyncio
+async def test_check_consistency_future_cancel():
+ check_consistency_call = async_mock_check_consistency_callable()
+ future = async_consistency._AsyncCheckConsistencyPollingFuture(
+ check_consistency_call
+ )
+ with pytest.raises(NotImplementedError):
+ future.cancel()
+
+ with pytest.raises(NotImplementedError):
+ future.cancelled()
+
+
+@pytest.mark.asyncio
+async def test_check_consistency_future_result():
+ times = 5
+ check_consistency_call = async_mock_check_consistency_callable(times)
+ future = async_consistency._AsyncCheckConsistencyPollingFuture(
+ check_consistency_call
+ )
+ is_consistent = await future.result()
+
+ assert is_consistent
+ check_consistency_call.assert_has_calls([mock.call()] * times)
+
+ # Check that calling result again doesn't produce more calls.
+ is_consistent = future.result()
+
+ assert is_consistent
+ check_consistency_call.assert_has_calls([mock.call()] * times)
diff --git a/tests/unit/admin_overlay/test_async_restore_table.py b/tests/unit/admin_overlay/test_async_restore_table.py
new file mode 100644
index 000000000..95799fc14
--- /dev/null
+++ b/tests/unit/admin_overlay/test_async_restore_table.py
@@ -0,0 +1,248 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# try/except added for compatibility with python < 3.8
+try:
+ from unittest import mock
+ from unittest.mock import AsyncMock # pragma: NO COVER # noqa: F401
+except ImportError: # pragma: NO COVER
+ import mock
+
+from google.longrunning import operations_pb2
+from google.rpc import status_pb2, code_pb2
+
+from google.api_core import operation_async, exceptions
+from google.api_core.future import async_future
+from google.api_core.operations_v1 import operations_async_client
+from google.cloud.bigtable_admin_v2.types import bigtable_table_admin, table
+from google.cloud.bigtable_admin_v2.overlay.types import async_restore_table
+
+import pytest
+
+
+# Set up the mock operations
+DEFAULT_MAX_POLL = 3
+RESTORE_TABLE_OPERATION_TABLE_NAME = "Test Table"
+RESTORE_TABLE_OPERATION_NAME = "test/restore_table"
+RESTORE_TABLE_OPERATION_METADATA = bigtable_table_admin.RestoreTableMetadata(
+ name=RESTORE_TABLE_OPERATION_TABLE_NAME,
+)
+OPTIMIZE_RESTORED_TABLE_OPERATION_NAME = "test/optimize_restore_table"
+OPTIMIZE_RESTORED_TABLE_METADATA = bigtable_table_admin.OptimizeRestoredTableMetadata(
+ name=RESTORE_TABLE_OPERATION_TABLE_NAME,
+)
+
+OPTIMIZE_RESTORED_TABLE_OPERATION_ID = "abcdefg"
+RESTORE_TABLE_OPERATION_FINISHED_RESPONSE = table.Table(
+ name=RESTORE_TABLE_OPERATION_TABLE_NAME,
+)
+RESTORE_TABLE_OPERATION_FINISHED_ERROR = status_pb2.Status(
+ code=code_pb2.DEADLINE_EXCEEDED, message="Deadline Exceeded"
+)
+
+
+def make_operation_proto(
+ name, done=False, metadata=None, response=None, error=None, **kwargs
+):
+ operation_proto = operations_pb2.Operation(name=name, done=done, **kwargs)
+
+ if metadata is not None:
+ operation_proto.metadata.Pack(metadata._pb)
+
+ if response is not None:
+ operation_proto.response.Pack(response._pb)
+
+ if error is not None:
+ operation_proto.error.CopyFrom(error)
+
+ return operation_proto
+
+
+RESTORE_TABLE_IN_PROGRESS_OPERATION_PROTO = make_operation_proto(
+ name=RESTORE_TABLE_OPERATION_NAME,
+ done=False,
+ metadata=RESTORE_TABLE_OPERATION_METADATA,
+)
+
+OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO = make_operation_proto(
+ name=OPTIMIZE_RESTORED_TABLE_OPERATION_NAME,
+ metadata=OPTIMIZE_RESTORED_TABLE_METADATA,
+)
+
+
+# Set up the mock operation client
+def mock_restore_table_operation(
+ max_poll_count=DEFAULT_MAX_POLL, fail=False, has_optimize_operation=True
+):
+ client = mock.AsyncMock(spec=operations_async_client.OperationsAsyncClient)
+
+ # Set up the polling
+ side_effect = [RESTORE_TABLE_IN_PROGRESS_OPERATION_PROTO] * (max_poll_count - 1)
+ finished_operation_metadata = bigtable_table_admin.RestoreTableMetadata()
+ bigtable_table_admin.RestoreTableMetadata.copy_from(
+ finished_operation_metadata, RESTORE_TABLE_OPERATION_METADATA
+ )
+ if has_optimize_operation:
+ finished_operation_metadata.optimize_table_operation_name = (
+ OPTIMIZE_RESTORED_TABLE_OPERATION_ID
+ )
+
+ if fail:
+ final_operation_proto = make_operation_proto(
+ name=RESTORE_TABLE_OPERATION_NAME,
+ done=True,
+ metadata=finished_operation_metadata,
+ error=RESTORE_TABLE_OPERATION_FINISHED_ERROR,
+ )
+ else:
+ final_operation_proto = make_operation_proto(
+ name=RESTORE_TABLE_OPERATION_NAME,
+ done=True,
+ metadata=finished_operation_metadata,
+ response=RESTORE_TABLE_OPERATION_FINISHED_RESPONSE,
+ )
+ side_effect.append(final_operation_proto)
+ refresh = mock.AsyncMock(spec=["__call__"], side_effect=side_effect)
+ cancel = mock.AsyncMock(spec=["__call__"])
+ future = operation_async.AsyncOperation(
+ RESTORE_TABLE_IN_PROGRESS_OPERATION_PROTO,
+ refresh,
+ cancel,
+ result_type=table.Table,
+ metadata_type=bigtable_table_admin.RestoreTableMetadata,
+ )
+
+ # Set up the optimize_restore_table_operation
+ client.get_operation.side_effect = [OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO]
+
+ return async_restore_table.AsyncRestoreTableOperation(client, future)
+
+
+@pytest.mark.asyncio
+async def test_async_restore_table_operation_client_success_has_optimize():
+ restore_table_operation = mock_restore_table_operation()
+
+ await restore_table_operation.result()
+ optimize_restored_table_operation = (
+ await restore_table_operation.optimize_restored_table_operation()
+ )
+
+ assert isinstance(optimize_restored_table_operation, operation_async.AsyncOperation)
+ assert (
+ optimize_restored_table_operation._operation
+ == OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO
+ )
+ restore_table_operation._operations_client.get_operation.assert_called_with(
+ name=OPTIMIZE_RESTORED_TABLE_OPERATION_ID
+ )
+ restore_table_operation._refresh.assert_has_calls(
+ [mock.call(retry=async_future.DEFAULT_RETRY)] * DEFAULT_MAX_POLL
+ )
+
+
+@pytest.mark.asyncio
+async def test_restore_table_operation_client_success_has_optimize_multiple_calls():
+ restore_table_operation = mock_restore_table_operation()
+
+ await restore_table_operation.result()
+ optimize_restored_table_operation = (
+ await restore_table_operation.optimize_restored_table_operation()
+ )
+
+ assert isinstance(optimize_restored_table_operation, operation_async.AsyncOperation)
+ assert (
+ optimize_restored_table_operation._operation
+ == OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO
+ )
+ restore_table_operation._operations_client.get_operation.assert_called_with(
+ name=OPTIMIZE_RESTORED_TABLE_OPERATION_ID
+ )
+ restore_table_operation._refresh.assert_has_calls(
+ [mock.call(retry=async_future.DEFAULT_RETRY)] * DEFAULT_MAX_POLL
+ )
+
+ await restore_table_operation.optimize_restored_table_operation()
+ restore_table_operation._refresh.assert_has_calls(
+ [mock.call(retry=async_future.DEFAULT_RETRY)] * DEFAULT_MAX_POLL
+ )
+
+
+@pytest.mark.asyncio
+async def test_restore_table_operation_success_has_optimize_call_before_done():
+ restore_table_operation = mock_restore_table_operation()
+
+ with pytest.raises(exceptions.GoogleAPIError):
+ await restore_table_operation.optimize_restored_table_operation()
+
+ restore_table_operation._operations_client.get_operation.assert_not_called()
+
+
+@pytest.mark.asyncio
+async def test_restore_table_operation_client_success_only_cache_after_finishing():
+ restore_table_operation = mock_restore_table_operation()
+
+ with pytest.raises(exceptions.GoogleAPIError):
+ await restore_table_operation.optimize_restored_table_operation()
+
+ await restore_table_operation.result()
+ optimize_restored_table_operation = (
+ await restore_table_operation.optimize_restored_table_operation()
+ )
+
+ assert isinstance(optimize_restored_table_operation, operation_async.AsyncOperation)
+ assert (
+ optimize_restored_table_operation._operation
+ == OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO
+ )
+ restore_table_operation._operations_client.get_operation.assert_called_with(
+ name=OPTIMIZE_RESTORED_TABLE_OPERATION_ID
+ )
+ restore_table_operation._refresh.assert_has_calls(
+ [mock.call(retry=async_future.DEFAULT_RETRY)] * DEFAULT_MAX_POLL
+ )
+
+ restore_table_operation.optimize_restored_table_operation()
+ restore_table_operation._refresh.assert_has_calls(
+ [mock.call(retry=async_future.DEFAULT_RETRY)] * DEFAULT_MAX_POLL
+ )
+
+
+@pytest.mark.asyncio
+async def test_restore_table_operation_success_no_optimize():
+ restore_table_operation = mock_restore_table_operation(has_optimize_operation=False)
+
+ await restore_table_operation.result()
+ optimize_restored_table_operation = (
+ await restore_table_operation.optimize_restored_table_operation()
+ )
+
+ assert optimize_restored_table_operation is None
+ restore_table_operation._operations_client.get_operation.assert_not_called()
+
+
+@pytest.mark.asyncio
+async def test_restore_table_operation_exception():
+ restore_table_operation = mock_restore_table_operation(
+ fail=True, has_optimize_operation=False
+ )
+
+ with pytest.raises(exceptions.GoogleAPICallError):
+ await restore_table_operation.result()
+
+ optimize_restored_table_operation = (
+ await restore_table_operation.optimize_restored_table_operation()
+ )
+
+ assert optimize_restored_table_operation is None
+ restore_table_operation._operations_client.get_operation.assert_not_called()
diff --git a/tests/unit/admin_overlay/test_client.py b/tests/unit/admin_overlay/test_client.py
new file mode 100644
index 000000000..07922b349
--- /dev/null
+++ b/tests/unit/admin_overlay/test_client.py
@@ -0,0 +1,278 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# try/except added for compatibility with python < 3.8
+try:
+ from unittest import mock
+except ImportError: # pragma: NO COVER
+ import mock
+
+from google.api_core import exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+from google.auth.credentials import AnonymousCredentials
+from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import transports
+from google.cloud.bigtable_admin_v2.types import bigtable_table_admin
+from google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.client import (
+ BigtableTableAdminClient,
+ DEFAULT_CLIENT_INFO,
+)
+from google.cloud.bigtable_admin_v2.overlay.types import (
+ restore_table,
+ wait_for_consistency_request,
+)
+
+from google.cloud.bigtable import __version__ as bigtable_version
+
+from test_consistency import (
+ FALSE_CONSISTENCY_RESPONSE,
+ TRUE_CONSISTENCY_RESPONSE,
+)
+
+import pytest
+
+
+PARENT_NAME = "my_parent"
+TABLE_NAME = "my_table"
+CONSISTENCY_TOKEN = "abcdefg"
+
+
+def _make_client(**kwargs):
+ kwargs["credentials"] = kwargs.get("credentials", AnonymousCredentials())
+ return BigtableTableAdminClient(**kwargs)
+
+
+@pytest.mark.parametrize(
+ "transport_class,transport_name",
+ [
+ (
+ transports.BigtableTableAdminGrpcTransport,
+ "grpc",
+ ),
+ (
+ transports.BigtableTableAdminRestTransport,
+ "rest",
+ ),
+ ],
+)
+def test_bigtable_table_admin_client_client_version(transport_class, transport_name):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ _make_client(transport=transport_name)
+
+ # call_args.kwargs is not supported in Python 3.7, so find them from the tuple
+ # instead. It's always the last item in the call_args tuple.
+ transport_init_call_kwargs = patched.call_args[-1]
+ assert transport_init_call_kwargs["client_info"] == DEFAULT_CLIENT_INFO
+
+ assert (
+ DEFAULT_CLIENT_INFO.client_library_version
+ == f"{bigtable_version}-admin-overlay"
+ )
+
+
+@pytest.mark.parametrize(
+ "kwargs",
+ [
+ {
+ "request": bigtable_table_admin.RestoreTableRequest(
+ parent=PARENT_NAME,
+ table_id=TABLE_NAME,
+ )
+ },
+ {
+ "request": {
+ "parent": PARENT_NAME,
+ "table_id": TABLE_NAME,
+ },
+ },
+ {
+ "request": bigtable_table_admin.RestoreTableRequest(
+ parent=PARENT_NAME,
+ table_id=TABLE_NAME,
+ ),
+ "retry": mock.Mock(spec=retries.Retry),
+ "timeout": mock.Mock(spec=retries.Retry),
+ "metadata": [("foo", "bar")],
+ },
+ ],
+)
+def test_bigtable_table_admin_client_restore_table(kwargs):
+ client = _make_client()
+
+ with mock.patch.object(restore_table, "RestoreTableOperation") as future_mock:
+ with mock.patch.object(client, "_transport") as transport_mock:
+ with mock.patch.object(client, "_restore_table") as restore_table_mock:
+ operation_mock = mock.Mock()
+ restore_table_mock.return_value = operation_mock
+ client.restore_table(**kwargs)
+
+ restore_table_mock.assert_called_once_with(
+ request=kwargs["request"],
+ retry=kwargs.get("retry", gapic_v1.method.DEFAULT),
+ timeout=kwargs.get("timeout", gapic_v1.method.DEFAULT),
+ metadata=kwargs.get("metadata", ()),
+ )
+ future_mock.assert_called_once_with(
+ transport_mock.operations_client, operation_mock
+ )
+
+
+@pytest.mark.parametrize(
+ "kwargs,check_consistency_request_extras",
+ [
+ (
+ {
+ "request": wait_for_consistency_request.WaitForConsistencyRequest(
+ name=TABLE_NAME,
+ )
+ },
+ {},
+ ),
+ (
+ {
+ "request": wait_for_consistency_request.WaitForConsistencyRequest(
+ name=TABLE_NAME,
+ standard_read_remote_writes=bigtable_table_admin.StandardReadRemoteWrites(),
+ )
+ },
+ {
+ "standard_read_remote_writes": bigtable_table_admin.StandardReadRemoteWrites(),
+ },
+ ),
+ (
+ {
+ "request": wait_for_consistency_request.WaitForConsistencyRequest(
+ name=TABLE_NAME,
+ data_boost_read_local_writes=bigtable_table_admin.DataBoostReadLocalWrites(),
+ )
+ },
+ {
+ "data_boost_read_local_writes": bigtable_table_admin.DataBoostReadLocalWrites(),
+ },
+ ),
+ (
+ {
+ "request": {
+ "name": TABLE_NAME,
+ "data_boost_read_local_writes": {},
+ }
+ },
+ {
+ "data_boost_read_local_writes": bigtable_table_admin.DataBoostReadLocalWrites(),
+ },
+ ),
+ (
+ {
+ "name": TABLE_NAME,
+ },
+ {},
+ ),
+ (
+ {
+ "request": wait_for_consistency_request.WaitForConsistencyRequest(
+ name=TABLE_NAME,
+ ),
+ "retry": mock.Mock(spec=retries.Retry),
+ "timeout": mock.Mock(spec=retries.Retry),
+ "metadata": [("foo", "bar")],
+ },
+ {},
+ ),
+ ],
+)
+def test_bigtable_table_admin_client_wait_for_consistency(
+ kwargs, check_consistency_request_extras
+):
+ client = _make_client()
+ poll_count = 3
+ check_mock_side_effect = [FALSE_CONSISTENCY_RESPONSE] * (poll_count - 1)
+ check_mock_side_effect.append(TRUE_CONSISTENCY_RESPONSE)
+
+ with mock.patch.object(client, "generate_consistency_token") as generate_mock:
+ with mock.patch.object(client, "check_consistency") as check_mock:
+ generate_mock.return_value = (
+ bigtable_table_admin.GenerateConsistencyTokenResponse(
+ consistency_token=CONSISTENCY_TOKEN,
+ )
+ )
+
+ check_mock.side_effect = check_mock_side_effect
+ result = client.wait_for_consistency(**kwargs)
+
+ assert result is True
+
+ generate_mock.assert_called_once_with(
+ bigtable_table_admin.GenerateConsistencyTokenRequest(
+ name=TABLE_NAME,
+ ),
+ retry=kwargs.get("retry", gapic_v1.method.DEFAULT),
+ timeout=kwargs.get("timeout", gapic_v1.method.DEFAULT),
+ metadata=kwargs.get("metadata", ()),
+ )
+
+ expected_check_consistency_request = (
+ bigtable_table_admin.CheckConsistencyRequest(
+ name=TABLE_NAME,
+ consistency_token=CONSISTENCY_TOKEN,
+ **check_consistency_request_extras,
+ )
+ )
+
+ check_mock.assert_called_with(
+ expected_check_consistency_request,
+ retry=kwargs.get("retry", gapic_v1.method.DEFAULT),
+ timeout=kwargs.get("timeout", gapic_v1.method.DEFAULT),
+ metadata=kwargs.get("metadata", ()),
+ )
+
+
+def test_bigtable_table_admin_client_wait_for_consistency_error_in_call():
+ client = _make_client()
+ request = wait_for_consistency_request.WaitForConsistencyRequest(
+ name=TABLE_NAME,
+ )
+
+ with pytest.raises(exceptions.GoogleAPICallError):
+ with mock.patch.object(client, "generate_consistency_token") as generate_mock:
+ generate_mock.side_effect = exceptions.DeadlineExceeded(
+ "Deadline Exceeded."
+ )
+ client.wait_for_consistency(request)
+
+ with pytest.raises(exceptions.GoogleAPICallError):
+ with mock.patch.object(client, "generate_consistency_token") as generate_mock:
+ with mock.patch.object(client, "check_consistency") as check_mock:
+ generate_mock.return_value = (
+ bigtable_table_admin.GenerateConsistencyTokenResponse(
+ consistency_token=CONSISTENCY_TOKEN,
+ )
+ )
+
+ check_mock.side_effect = exceptions.DeadlineExceeded(
+ "Deadline Exceeded."
+ )
+ client.wait_for_consistency(request)
+
+
+def test_bigtable_table_admin_client_wait_for_consistency_user_error():
+ client = _make_client()
+ with pytest.raises(ValueError):
+ client.wait_for_consistency(
+ {
+ "name": TABLE_NAME,
+ },
+ name=TABLE_NAME,
+ )
diff --git a/tests/unit/admin_overlay/test_consistency.py b/tests/unit/admin_overlay/test_consistency.py
new file mode 100644
index 000000000..29bc0c481
--- /dev/null
+++ b/tests/unit/admin_overlay/test_consistency.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# try/except added for compatibility with python < 3.8
+try:
+ from unittest import mock
+except ImportError: # pragma: NO COVER
+ import mock
+
+from google.cloud.bigtable_admin_v2.overlay.types import consistency
+from google.cloud.bigtable_admin_v2.types import bigtable_table_admin
+
+import pytest
+
+
+TRUE_CONSISTENCY_RESPONSE = bigtable_table_admin.CheckConsistencyResponse(
+ consistent=True
+)
+
+FALSE_CONSISTENCY_RESPONSE = bigtable_table_admin.CheckConsistencyResponse(
+ consistent=False
+)
+
+
+def mock_check_consistency_callable(max_poll_count=1):
+ # Return False max_poll_count - 1 times, then True, for a total of
+ # max_poll_count calls.
+ side_effect = [FALSE_CONSISTENCY_RESPONSE] * (max_poll_count - 1)
+ side_effect.append(TRUE_CONSISTENCY_RESPONSE)
+ return mock.Mock(spec=["__call__"], side_effect=side_effect)
+
+
+def test_check_consistency_future_cancel():
+ check_consistency_call = mock_check_consistency_callable()
+ future = consistency._CheckConsistencyPollingFuture(check_consistency_call)
+ with pytest.raises(NotImplementedError):
+ future.cancel()
+
+ with pytest.raises(NotImplementedError):
+ future.cancelled()
+
+
+def test_check_consistency_future_result():
+ times = 5
+ check_consistency_call = mock_check_consistency_callable(times)
+ future = consistency._CheckConsistencyPollingFuture(check_consistency_call)
+ is_consistent = future.result()
+
+ assert is_consistent
+ check_consistency_call.assert_has_calls([mock.call()] * times)
+
+ # Check that calling result again doesn't produce more calls.
+ is_consistent = future.result()
+
+ assert is_consistent
+ check_consistency_call.assert_has_calls([mock.call()] * times)
diff --git a/tests/unit/admin_overlay/test_oneof_message.py b/tests/unit/admin_overlay/test_oneof_message.py
new file mode 100644
index 000000000..b9c521235
--- /dev/null
+++ b/tests/unit/admin_overlay/test_oneof_message.py
@@ -0,0 +1,164 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from google.cloud.bigtable_admin_v2.types import GcRule
+from google.protobuf import duration_pb2
+
+import my_oneof_message
+
+import pytest
+
+
+# The following proto bytestring was constructed running printproto in
+# text-to-binary mode on the following textproto for GcRule:
+#
+# intersection {
+# rules {
+# max_num_versions: 1234
+# }
+# rules {
+# max_age {
+# seconds: 12345
+# }
+# }
+# }
+GCRULE_RAW_PROTO_BYTESTRING = b"\x1a\x0c\n\x03\x08\xd2\t\n\x05\x12\x03\x08\xb9`"
+INITIAL_VALUE = 123
+FINAL_VALUE = 456
+
+
+@pytest.fixture
+def default_msg():
+ return my_oneof_message.MyOneofMessage()
+
+
+@pytest.fixture
+def foo_msg():
+ return my_oneof_message.MyOneofMessage(foo=INITIAL_VALUE)
+
+
+def test_oneof_message_setattr_oneof_no_conflict(default_msg):
+ default_msg.foo = INITIAL_VALUE
+ default_msg.baz = INITIAL_VALUE
+ assert default_msg.foo == INITIAL_VALUE
+ assert default_msg.baz == INITIAL_VALUE
+ assert not default_msg.bar
+
+
+def test_oneof_message_setattr_conflict(default_msg, foo_msg):
+ with pytest.raises(ValueError):
+ foo_msg.bar = INITIAL_VALUE
+ assert foo_msg.foo == INITIAL_VALUE
+ assert not foo_msg.bar
+
+ default_msg.bar = INITIAL_VALUE
+ with pytest.raises(ValueError):
+ default_msg.foo = INITIAL_VALUE
+ assert default_msg.bar == INITIAL_VALUE
+ assert not default_msg.foo
+
+
+def test_oneof_message_setattr_oneof_same_oneof_field(default_msg, foo_msg):
+ foo_msg.foo = FINAL_VALUE
+ assert foo_msg.foo == FINAL_VALUE
+ assert not foo_msg.bar
+
+ default_msg.bar = INITIAL_VALUE
+ default_msg.bar = FINAL_VALUE
+ assert default_msg.bar == FINAL_VALUE
+ assert not default_msg.foo
+
+
+def test_oneof_message_setattr_oneof_delattr(foo_msg):
+ del foo_msg.foo
+ foo_msg.bar = INITIAL_VALUE
+ assert foo_msg.bar == INITIAL_VALUE
+ assert not foo_msg.foo
+
+
+def test_oneof_message_init_oneof_conflict(foo_msg):
+ with pytest.raises(ValueError):
+ my_oneof_message.MyOneofMessage(foo=INITIAL_VALUE, bar=INITIAL_VALUE)
+
+ with pytest.raises(ValueError):
+ my_oneof_message.MyOneofMessage(
+ {
+ "foo": INITIAL_VALUE,
+ "bar": INITIAL_VALUE,
+ }
+ )
+
+ with pytest.raises(ValueError):
+ my_oneof_message.MyOneofMessage(foo_msg._pb, bar=INITIAL_VALUE)
+
+ with pytest.raises(ValueError):
+ my_oneof_message.MyOneofMessage(foo_msg, bar=INITIAL_VALUE)
+
+
+def test_oneof_message_init_oneof_no_conflict(foo_msg):
+ msg = my_oneof_message.MyOneofMessage(foo=INITIAL_VALUE, baz=INITIAL_VALUE)
+ assert msg.foo == INITIAL_VALUE
+ assert msg.baz == INITIAL_VALUE
+ assert not msg.bar
+
+ msg = my_oneof_message.MyOneofMessage(
+ {
+ "foo": INITIAL_VALUE,
+ "baz": INITIAL_VALUE,
+ }
+ )
+ assert msg.foo == INITIAL_VALUE
+ assert msg.baz == INITIAL_VALUE
+ assert not msg.bar
+
+ msg = my_oneof_message.MyOneofMessage(foo_msg, baz=INITIAL_VALUE)
+ assert msg.foo == INITIAL_VALUE
+ assert msg.baz == INITIAL_VALUE
+ assert not msg.bar
+
+ msg = my_oneof_message.MyOneofMessage(foo_msg._pb, baz=INITIAL_VALUE)
+ assert msg.foo == INITIAL_VALUE
+ assert msg.baz == INITIAL_VALUE
+ assert not msg.bar
+
+
+def test_oneof_message_init_kwargs_override_same_field_oneof(foo_msg):
+ # Kwargs take precedence over mapping, and this should be OK
+ msg = my_oneof_message.MyOneofMessage(
+ {
+ "foo": INITIAL_VALUE,
+ },
+ foo=FINAL_VALUE,
+ )
+ assert msg.foo == FINAL_VALUE
+
+ msg = my_oneof_message.MyOneofMessage(foo_msg, foo=FINAL_VALUE)
+ assert msg.foo == FINAL_VALUE
+
+ msg = my_oneof_message.MyOneofMessage(foo_msg._pb, foo=FINAL_VALUE)
+ assert msg.foo == FINAL_VALUE
+
+
+def test_gcrule_serialize_deserialize():
+ test = GcRule(
+ intersection=GcRule.Intersection(
+ rules=[
+ GcRule(max_num_versions=1234),
+ GcRule(max_age=duration_pb2.Duration(seconds=12345)),
+ ]
+ )
+ )
+ assert GcRule.serialize(test) == GCRULE_RAW_PROTO_BYTESTRING
+ assert GcRule.deserialize(GCRULE_RAW_PROTO_BYTESTRING) == test
diff --git a/tests/unit/admin_overlay/test_restore_table.py b/tests/unit/admin_overlay/test_restore_table.py
new file mode 100644
index 000000000..23c6609e4
--- /dev/null
+++ b/tests/unit/admin_overlay/test_restore_table.py
@@ -0,0 +1,230 @@
+# Copyright 2025 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# try/except added for compatibility with python < 3.8
+try:
+ from unittest import mock
+except ImportError: # pragma: NO COVER
+ import mock
+
+from google.longrunning import operations_pb2
+from google.rpc import status_pb2, code_pb2
+
+from google.api_core import operation, exceptions
+from google.api_core.operations_v1 import operations_client
+from google.cloud.bigtable_admin_v2.types import bigtable_table_admin, table
+from google.cloud.bigtable_admin_v2.overlay.types import restore_table
+
+import pytest
+
+
+# Set up the mock operations
+DEFAULT_MAX_POLL = 3
+RESTORE_TABLE_OPERATION_TABLE_NAME = "Test Table"
+RESTORE_TABLE_OPERATION_NAME = "test/restore_table"
+RESTORE_TABLE_OPERATION_METADATA = bigtable_table_admin.RestoreTableMetadata(
+ name=RESTORE_TABLE_OPERATION_TABLE_NAME,
+)
+OPTIMIZE_RESTORED_TABLE_OPERATION_NAME = "test/optimize_restore_table"
+OPTIMIZE_RESTORED_TABLE_METADATA = bigtable_table_admin.OptimizeRestoredTableMetadata(
+ name=RESTORE_TABLE_OPERATION_TABLE_NAME,
+)
+
+OPTIMIZE_RESTORED_TABLE_OPERATION_ID = "abcdefg"
+RESTORE_TABLE_OPERATION_FINISHED_RESPONSE = table.Table(
+ name=RESTORE_TABLE_OPERATION_TABLE_NAME,
+)
+RESTORE_TABLE_OPERATION_FINISHED_ERROR = status_pb2.Status(
+ code=code_pb2.DEADLINE_EXCEEDED, message="Deadline Exceeded"
+)
+
+
+def make_operation_proto(
+ name, done=False, metadata=None, response=None, error=None, **kwargs
+):
+ operation_proto = operations_pb2.Operation(name=name, done=done, **kwargs)
+
+ if metadata is not None:
+ operation_proto.metadata.Pack(metadata._pb)
+
+ if response is not None:
+ operation_proto.response.Pack(response._pb)
+
+ if error is not None:
+ operation_proto.error.CopyFrom(error)
+
+ return operation_proto
+
+
+RESTORE_TABLE_IN_PROGRESS_OPERATION_PROTO = make_operation_proto(
+ name=RESTORE_TABLE_OPERATION_NAME,
+ done=False,
+ metadata=RESTORE_TABLE_OPERATION_METADATA,
+)
+
+OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO = make_operation_proto(
+ name=OPTIMIZE_RESTORED_TABLE_OPERATION_NAME,
+ metadata=OPTIMIZE_RESTORED_TABLE_METADATA,
+)
+
+
+# Set up the mock operation client
+def mock_restore_table_operation(
+ max_poll_count=DEFAULT_MAX_POLL, fail=False, has_optimize_operation=True
+):
+ client = mock.Mock(spec=operations_client.OperationsClient)
+
+ # Set up the polling
+ side_effect = [RESTORE_TABLE_IN_PROGRESS_OPERATION_PROTO] * (max_poll_count - 1)
+ finished_operation_metadata = bigtable_table_admin.RestoreTableMetadata()
+ bigtable_table_admin.RestoreTableMetadata.copy_from(
+ finished_operation_metadata, RESTORE_TABLE_OPERATION_METADATA
+ )
+ if has_optimize_operation:
+ finished_operation_metadata.optimize_table_operation_name = (
+ OPTIMIZE_RESTORED_TABLE_OPERATION_ID
+ )
+
+ if fail:
+ final_operation_proto = make_operation_proto(
+ name=RESTORE_TABLE_OPERATION_NAME,
+ done=True,
+ metadata=finished_operation_metadata,
+ error=RESTORE_TABLE_OPERATION_FINISHED_ERROR,
+ )
+ else:
+ final_operation_proto = make_operation_proto(
+ name=RESTORE_TABLE_OPERATION_NAME,
+ done=True,
+ metadata=finished_operation_metadata,
+ response=RESTORE_TABLE_OPERATION_FINISHED_RESPONSE,
+ )
+ side_effect.append(final_operation_proto)
+ refresh = mock.Mock(spec=["__call__"], side_effect=side_effect)
+ cancel = mock.Mock(spec=["__call__"])
+ future = operation.Operation(
+ RESTORE_TABLE_IN_PROGRESS_OPERATION_PROTO,
+ refresh,
+ cancel,
+ result_type=table.Table,
+ metadata_type=bigtable_table_admin.RestoreTableMetadata,
+ )
+
+ # Set up the optimize_restore_table_operation
+ client.get_operation.side_effect = [OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO]
+
+ return restore_table.RestoreTableOperation(client, future)
+
+
+def test_restore_table_operation_client_success_has_optimize():
+ restore_table_operation = mock_restore_table_operation()
+
+ restore_table_operation.result()
+ optimize_restored_table_operation = (
+ restore_table_operation.optimize_restored_table_operation()
+ )
+
+ assert isinstance(optimize_restored_table_operation, operation.Operation)
+ assert (
+ optimize_restored_table_operation._operation
+ == OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO
+ )
+ restore_table_operation._operations_client.get_operation.assert_called_with(
+ name=OPTIMIZE_RESTORED_TABLE_OPERATION_ID
+ )
+ restore_table_operation._refresh.assert_has_calls([mock.call()] * DEFAULT_MAX_POLL)
+
+
+def test_restore_table_operation_client_success_has_optimize_multiple_calls():
+ restore_table_operation = mock_restore_table_operation()
+
+ restore_table_operation.result()
+ optimize_restored_table_operation = (
+ restore_table_operation.optimize_restored_table_operation()
+ )
+
+ assert isinstance(optimize_restored_table_operation, operation.Operation)
+ assert (
+ optimize_restored_table_operation._operation
+ == OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO
+ )
+ restore_table_operation._operations_client.get_operation.assert_called_with(
+ name=OPTIMIZE_RESTORED_TABLE_OPERATION_ID
+ )
+ restore_table_operation._refresh.assert_has_calls([mock.call()] * DEFAULT_MAX_POLL)
+
+ restore_table_operation.optimize_restored_table_operation()
+ restore_table_operation._refresh.assert_has_calls([mock.call()] * DEFAULT_MAX_POLL)
+
+
+def test_restore_table_operation_success_has_optimize_call_before_done():
+ restore_table_operation = mock_restore_table_operation()
+
+ with pytest.raises(exceptions.GoogleAPIError):
+ restore_table_operation.optimize_restored_table_operation()
+
+ restore_table_operation._operations_client.get_operation.assert_not_called()
+
+
+def test_restore_table_operation_client_success_only_cache_after_finishing():
+ restore_table_operation = mock_restore_table_operation()
+
+ with pytest.raises(exceptions.GoogleAPIError):
+ restore_table_operation.optimize_restored_table_operation()
+
+ restore_table_operation.result()
+ optimize_restored_table_operation = (
+ restore_table_operation.optimize_restored_table_operation()
+ )
+
+ assert isinstance(optimize_restored_table_operation, operation.Operation)
+ assert (
+ optimize_restored_table_operation._operation
+ == OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO
+ )
+ restore_table_operation._operations_client.get_operation.assert_called_with(
+ name=OPTIMIZE_RESTORED_TABLE_OPERATION_ID
+ )
+ restore_table_operation._refresh.assert_has_calls([mock.call()] * DEFAULT_MAX_POLL)
+
+ restore_table_operation.optimize_restored_table_operation()
+ restore_table_operation._refresh.assert_has_calls([mock.call()] * DEFAULT_MAX_POLL)
+
+
+def test_restore_table_operation_success_no_optimize():
+ restore_table_operation = mock_restore_table_operation(has_optimize_operation=False)
+
+ restore_table_operation.result()
+ optimize_restored_table_operation = (
+ restore_table_operation.optimize_restored_table_operation()
+ )
+
+ assert optimize_restored_table_operation is None
+ restore_table_operation._operations_client.get_operation.assert_not_called()
+
+
+def test_restore_table_operation_exception():
+ restore_table_operation = mock_restore_table_operation(
+ fail=True, has_optimize_operation=False
+ )
+
+ with pytest.raises(exceptions.GoogleAPICallError):
+ restore_table_operation.result()
+
+ optimize_restored_table_operation = (
+ restore_table_operation.optimize_restored_table_operation()
+ )
+
+ assert optimize_restored_table_operation is None
+ restore_table_operation._operations_client.get_operation.assert_not_called()
diff --git a/tests/unit/data/_async/test__swappable_channel.py b/tests/unit/data/_async/test__swappable_channel.py
new file mode 100644
index 000000000..14fef2c85
--- /dev/null
+++ b/tests/unit/data/_async/test__swappable_channel.py
@@ -0,0 +1,135 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# try/except added for compatibility with python < 3.8
+try:
+ from unittest import mock
+except ImportError: # pragma: NO COVER
+ import mock # type: ignore
+
+import pytest
+from grpc import ChannelConnectivity
+
+from google.cloud.bigtable.data._cross_sync import CrossSync
+
+if CrossSync.is_async:
+ from google.cloud.bigtable.data._async._swappable_channel import (
+ AsyncSwappableChannel as TargetType,
+ )
+else:
+ from google.cloud.bigtable.data._sync_autogen._swappable_channel import (
+ SwappableChannel as TargetType,
+ )
+
+
+__CROSS_SYNC_OUTPUT__ = "tests.unit.data._sync_autogen.test__swappable_channel"
+
+
+@CrossSync.convert_class(sync_name="TestSwappableChannel")
+class TestAsyncSwappableChannel:
+ @staticmethod
+ @CrossSync.convert
+ def _get_target_class():
+ return TargetType
+
+ def _make_one(self, *args, **kwargs):
+ return self._get_target_class()(*args, **kwargs)
+
+ def test_ctor(self):
+ channel_fn = mock.Mock()
+ instance = self._make_one(channel_fn)
+ assert instance._channel_fn == channel_fn
+ channel_fn.assert_called_once_with()
+ assert instance._channel == channel_fn.return_value
+
+ def test_swap_channel(self):
+ channel_fn = mock.Mock()
+ instance = self._make_one(channel_fn)
+ old_channel = instance._channel
+ new_channel = object()
+ result = instance.swap_channel(new_channel)
+ assert result == old_channel
+ assert instance._channel == new_channel
+
+ def test_create_channel(self):
+ channel_fn = mock.Mock()
+ instance = self._make_one(channel_fn)
+ # reset mock from ctor call
+ channel_fn.reset_mock()
+ new_channel = instance.create_channel()
+ channel_fn.assert_called_once_with()
+ assert new_channel == channel_fn.return_value
+
+ @CrossSync.drop
+ def test_create_channel_async_interceptors_copied(self):
+ channel_fn = mock.Mock()
+ instance = self._make_one(channel_fn)
+ # reset mock from ctor call
+ channel_fn.reset_mock()
+ # mock out interceptors on original channel
+ instance._channel._unary_unary_interceptors = ["unary_unary"]
+ instance._channel._unary_stream_interceptors = ["unary_stream"]
+ instance._channel._stream_unary_interceptors = ["stream_unary"]
+ instance._channel._stream_stream_interceptors = ["stream_stream"]
+
+ new_channel = instance.create_channel()
+ channel_fn.assert_called_once_with()
+ assert new_channel == channel_fn.return_value
+ assert new_channel._unary_unary_interceptors == ["unary_unary"]
+ assert new_channel._unary_stream_interceptors == ["unary_stream"]
+ assert new_channel._stream_unary_interceptors == ["stream_unary"]
+ assert new_channel._stream_stream_interceptors == ["stream_stream"]
+
+ @pytest.mark.parametrize(
+ "method_name,args,kwargs",
+ [
+ ("unary_unary", (1,), {"kw": 2}),
+ ("unary_stream", (3,), {"kw": 4}),
+ ("stream_unary", (5,), {"kw": 6}),
+ ("stream_stream", (7,), {"kw": 8}),
+ ("get_state", (), {"try_to_connect": True}),
+ ],
+ )
+ def test_forwarded_methods(self, method_name, args, kwargs):
+ channel_fn = mock.Mock()
+ instance = self._make_one(channel_fn)
+ method = getattr(instance, method_name)
+ result = method(*args, **kwargs)
+ mock_method = getattr(channel_fn.return_value, method_name)
+ mock_method.assert_called_once_with(*args, **kwargs)
+ assert result == mock_method.return_value
+
+ @pytest.mark.parametrize(
+ "method_name,args,kwargs",
+ [
+ ("channel_ready", (), {}),
+ ("wait_for_state_change", (ChannelConnectivity.READY,), {}),
+ ],
+ )
+ @CrossSync.pytest
+ async def test_forwarded_async_methods(self, method_name, args, kwargs):
+ async def dummy_coro(*a, **k):
+ return mock.sentinel.result
+
+ channel = mock.Mock()
+ mock_method = getattr(channel, method_name)
+ mock_method.side_effect = dummy_coro
+
+ channel_fn = mock.Mock(return_value=channel)
+ instance = self._make_one(channel_fn)
+ method = getattr(instance, method_name)
+ result = await method(*args, **kwargs)
+
+ mock_method.assert_called_once_with(*args, **kwargs)
+ assert result == mock.sentinel.result
diff --git a/tests/unit/data/_async/test_client.py b/tests/unit/data/_async/test_client.py
index 5e7302d75..9f65d120b 100644
--- a/tests/unit/data/_async/test_client.py
+++ b/tests/unit/data/_async/test_client.py
@@ -26,6 +26,7 @@
from google.cloud.bigtable_v2.types import ReadRowsResponse
from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery
from google.api_core import exceptions as core_exceptions
+from google.api_core import client_options
from google.cloud.bigtable.data.exceptions import InvalidChunk
from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete
from google.cloud.bigtable.data.mutations import DeleteAllFromRow
@@ -51,13 +52,29 @@
if CrossSync.is_async:
from google.api_core import grpc_helpers_async
from google.cloud.bigtable.data._async.client import TableAsync
+ from google.cloud.bigtable.data._async._swappable_channel import (
+ AsyncSwappableChannel,
+ )
+ from google.cloud.bigtable.data._async.metrics_interceptor import (
+ AsyncBigtableMetricsInterceptor,
+ )
CrossSync.add_mapping("grpc_helpers", grpc_helpers_async)
+ CrossSync.add_mapping("SwappableChannel", AsyncSwappableChannel)
+ CrossSync.add_mapping("MetricsInterceptor", AsyncBigtableMetricsInterceptor)
else:
from google.api_core import grpc_helpers
from google.cloud.bigtable.data._sync_autogen.client import Table # noqa: F401
+ from google.cloud.bigtable.data._sync_autogen._swappable_channel import (
+ SwappableChannel,
+ )
+ from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import (
+ BigtableMetricsInterceptor,
+ )
CrossSync.add_mapping("grpc_helpers", grpc_helpers)
+ CrossSync.add_mapping("SwappableChannel", SwappableChannel)
+ CrossSync.add_mapping("MetricsInterceptor", BigtableMetricsInterceptor)
__CROSS_SYNC_OUTPUT__ = "tests.unit.data._sync_autogen.test_client"
@@ -105,6 +122,7 @@ async def test_ctor(self):
assert not client._active_instances
assert client._channel_refresh_task is not None
assert client.transport._credentials == expected_credentials
+ assert isinstance(client._metrics_interceptor, CrossSync.MetricsInterceptor)
await client.close()
@CrossSync.pytest
@@ -228,6 +246,7 @@ async def test__start_background_channel_refresh(self):
client, "_ping_and_warm_instances", CrossSync.Mock()
) as ping_and_warm:
client._emulator_host = None
+ client.transport._grpc_channel = CrossSync.SwappableChannel(mock.Mock)
client._start_background_channel_refresh()
assert client._channel_refresh_task is not None
assert isinstance(client._channel_refresh_task, CrossSync.Task)
@@ -383,44 +402,31 @@ async def test__manage_channel_ping_and_warm(self):
"""
_manage channel should call ping and warm internally
"""
- import time
import threading
- if CrossSync.is_async:
- from google.cloud.bigtable_v2.services.bigtable.transports.grpc_asyncio import (
- _LoggingClientAIOInterceptor as Interceptor,
- )
- else:
- from google.cloud.bigtable_v2.services.bigtable.transports.grpc import (
- _LoggingClientInterceptor as Interceptor,
- )
-
- client_mock = mock.Mock()
- client_mock.transport._interceptor = Interceptor()
- client_mock._is_closed.is_set.return_value = False
- client_mock._channel_init_time = time.monotonic()
- orig_channel = client_mock.transport.grpc_channel
+ client = self._make_client(project="project-id", use_emulator=True)
+ orig_channel = client.transport.grpc_channel
# should ping an warm all new channels, and old channels if sleeping
sleep_tuple = (
(asyncio, "sleep") if CrossSync.is_async else (threading.Event, "wait")
)
- with mock.patch.object(*sleep_tuple):
- # stop process after close is called
- orig_channel.close.side_effect = asyncio.CancelledError
- ping_and_warm = client_mock._ping_and_warm_instances = CrossSync.Mock()
+ with mock.patch.object(*sleep_tuple) as sleep_mock:
+ # stop process after loop
+ sleep_mock.side_effect = [None, asyncio.CancelledError]
+ ping_and_warm = client._ping_and_warm_instances = CrossSync.Mock()
# should ping and warm old channel then new if sleep > 0
try:
- await self._get_target_class()._manage_channel(client_mock, 10)
+ await client._manage_channel(10)
except asyncio.CancelledError:
pass
# should have called at loop start, and after replacement
assert ping_and_warm.call_count == 2
# should have replaced channel once
- assert client_mock.transport._grpc_channel != orig_channel
+ assert client.transport.grpc_channel._channel != orig_channel
# make sure new and old channels were warmed
called_with = [call[1]["channel"] for call in ping_and_warm.call_args_list]
assert orig_channel in called_with
- assert client_mock.transport.grpc_channel in called_with
+ assert client.transport.grpc_channel._channel in called_with
@CrossSync.pytest
@pytest.mark.parametrize(
@@ -438,8 +444,6 @@ async def test__manage_channel_sleeps(
import time
import random
- channel = mock.Mock()
- channel.close = CrossSync.Mock()
with mock.patch.object(random, "uniform") as uniform:
uniform.side_effect = lambda min_, max_: min_
with mock.patch.object(time, "time") as time_mock:
@@ -448,8 +452,7 @@ async def test__manage_channel_sleeps(
sleep.side_effect = [None for i in range(num_cycles - 1)] + [
asyncio.CancelledError
]
- client = self._make_client(project="project-id")
- client.transport._grpc_channel = channel
+ client = self._make_client(project="project-id", use_emulator=True)
with mock.patch.object(
client.transport, "create_channel", CrossSync.Mock
):
@@ -505,26 +508,27 @@ async def test__manage_channel_refresh(self, num_cycles):
expected_refresh = 0.5
grpc_lib = grpc.aio if CrossSync.is_async else grpc
new_channel = grpc_lib.insecure_channel("localhost:8080")
+ create_channel_mock = mock.Mock()
+ create_channel_mock.return_value = new_channel
+ refreshable_channel = CrossSync.SwappableChannel(create_channel_mock)
with mock.patch.object(CrossSync, "event_wait") as sleep:
sleep.side_effect = [None for i in range(num_cycles)] + [RuntimeError]
- with mock.patch.object(
- CrossSync.grpc_helpers, "create_channel"
- ) as create_channel:
- create_channel.return_value = new_channel
- client = self._make_client(project="project-id")
- create_channel.reset_mock()
- try:
- await client._manage_channel(
- refresh_interval_min=expected_refresh,
- refresh_interval_max=expected_refresh,
- grace_period=0,
- )
- except RuntimeError:
- pass
- assert sleep.call_count == num_cycles + 1
- assert create_channel.call_count == num_cycles
- await client.close()
+ client = self._make_client(project="project-id")
+ client.transport._grpc_channel = refreshable_channel
+ create_channel_mock.reset_mock()
+ sleep.reset_mock()
+ try:
+ await client._manage_channel(
+ refresh_interval_min=expected_refresh,
+ refresh_interval_max=expected_refresh,
+ grace_period=0,
+ )
+ except RuntimeError:
+ pass
+ assert sleep.call_count == num_cycles + 1
+ assert create_channel_mock.call_count == num_cycles
+ await client.close()
@CrossSync.pytest
async def test__register_instance(self):
@@ -542,7 +546,7 @@ async def test__register_instance(self):
client_mock._ping_and_warm_instances = CrossSync.Mock()
table_mock = mock.Mock()
await self._get_target_class()._register_instance(
- client_mock, "instance-1", table_mock
+ client_mock, "instance-1", table_mock.app_profile_id, id(table_mock)
)
# first call should start background refresh
assert client_mock._start_background_channel_refresh.call_count == 1
@@ -560,7 +564,7 @@ async def test__register_instance(self):
# next call should not call _start_background_channel_refresh again
table_mock2 = mock.Mock()
await self._get_target_class()._register_instance(
- client_mock, "instance-2", table_mock2
+ client_mock, "instance-2", table_mock2.app_profile_id, id(table_mock2)
)
assert client_mock._start_background_channel_refresh.call_count == 1
assert (
@@ -612,7 +616,7 @@ async def test__register_instance_duplicate(self):
)
# fake first registration
await self._get_target_class()._register_instance(
- client_mock, "instance-1", table_mock
+ client_mock, "instance-1", table_mock.app_profile_id, id(table_mock)
)
assert len(active_instances) == 1
assert expected_key == tuple(list(active_instances)[0])
@@ -622,7 +626,7 @@ async def test__register_instance_duplicate(self):
assert client_mock._ping_and_warm_instances.call_count == 1
# next call should do nothing
await self._get_target_class()._register_instance(
- client_mock, "instance-1", table_mock
+ client_mock, "instance-1", table_mock.app_profile_id, id(table_mock)
)
assert len(active_instances) == 1
assert expected_key == tuple(list(active_instances)[0])
@@ -664,7 +668,7 @@ async def test__register_instance_state(
for instance, profile in insert_instances:
table_mock.app_profile_id = profile
await self._get_target_class()._register_instance(
- client_mock, instance, table_mock
+ client_mock, instance, profile, id(table_mock)
)
assert len(active_instances) == len(expected_active)
assert len(instance_owners) == len(expected_owner_keys)
@@ -687,8 +691,8 @@ async def test__register_instance_state(
async def test__remove_instance_registration(self):
client = self._make_client(project="project-id")
table = mock.Mock()
- await client._register_instance("instance-1", table)
- await client._register_instance("instance-2", table)
+ await client._register_instance("instance-1", table.app_profile_id, id(table))
+ await client._register_instance("instance-2", table.app_profile_id, id(table))
assert len(client._active_instances) == 2
assert len(client._instance_owners.keys()) == 2
instance_1_path = client._gapic_client.instance_path(
@@ -703,13 +707,15 @@ async def test__remove_instance_registration(self):
assert list(client._instance_owners[instance_1_key])[0] == id(table)
assert len(client._instance_owners[instance_2_key]) == 1
assert list(client._instance_owners[instance_2_key])[0] == id(table)
- success = await client._remove_instance_registration("instance-1", table)
+ success = client._remove_instance_registration(
+ "instance-1", table.app_profile_id, id(table)
+ )
assert success
assert len(client._active_instances) == 1
assert len(client._instance_owners[instance_1_key]) == 0
assert len(client._instance_owners[instance_2_key]) == 1
assert client._active_instances == {instance_2_key}
- success = await client._remove_instance_registration("fake-key", table)
+ success = client._remove_instance_registration("fake-key", "profile", id(table))
assert not success
assert len(client._active_instances) == 1
await client.close()
@@ -1038,6 +1044,97 @@ def test_client_ctor_sync(self):
assert client.project == "project-id"
assert client._channel_refresh_task is None
+ @CrossSync.pytest
+ async def test_default_universe_domain(self):
+ """
+ When not passed, universe_domain should default to googleapis.com
+ """
+ async with self._make_client(project="project-id", credentials=None) as client:
+ assert client.universe_domain == "googleapis.com"
+ assert client.api_endpoint == "bigtable.googleapis.com"
+
+ @CrossSync.pytest
+ async def test_custom_universe_domain(self):
+ """test with a customized universe domain value and emulator enabled"""
+ universe_domain = "test-universe.test"
+ options = client_options.ClientOptions(universe_domain=universe_domain)
+ async with self._make_client(
+ project="project_id",
+ client_options=options,
+ use_emulator=True,
+ credentials=None,
+ ) as client:
+ assert client.universe_domain == universe_domain
+ assert client.api_endpoint == f"bigtable.{universe_domain}"
+
+ @CrossSync.pytest
+ async def test_configured_universe_domain_matches_GDU(self):
+ """that configured universe domain succeeds with matched GDU credentials."""
+ universe_domain = "googleapis.com"
+ options = client_options.ClientOptions(universe_domain=universe_domain)
+ async with self._make_client(
+ project="project_id", client_options=options, credentials=None
+ ) as client:
+ assert client.universe_domain == "googleapis.com"
+ assert client.api_endpoint == "bigtable.googleapis.com"
+
+ @CrossSync.pytest
+ async def test_credential_universe_domain_matches_GDU(self):
+ """Test with credentials"""
+ creds = AnonymousCredentials()
+ creds._universe_domain = "googleapis.com"
+ async with self._make_client(project="project_id", credentials=creds) as client:
+ assert client.universe_domain == "googleapis.com"
+ assert client.api_endpoint == "bigtable.googleapis.com"
+
+ @CrossSync.pytest
+ async def test_anomynous_credential_universe_domain(self):
+ """Anomynopus credentials should use default universe domain"""
+ creds = AnonymousCredentials()
+ async with self._make_client(project="project_id", credentials=creds) as client:
+ assert client.universe_domain == "googleapis.com"
+ assert client.api_endpoint == "bigtable.googleapis.com"
+
+ @CrossSync.pytest
+ async def test_configured_universe_domain_mismatched_credentials(self):
+ """Test that configured universe domain errors with mismatched universe
+ domain credentials.
+ """
+ universe_domain = "test-universe.test"
+ options = client_options.ClientOptions(universe_domain=universe_domain)
+ creds = AnonymousCredentials()
+ creds._universe_domain = "different-universe"
+ with pytest.raises(ValueError) as exc:
+ self._make_client(
+ project="project_id",
+ client_options=options,
+ use_emulator=False,
+ credentials=creds,
+ )
+ err_msg = (
+ f"The configured universe domain ({universe_domain}) does "
+ "not match the universe domain found in the credentials "
+ f"({creds.universe_domain}). If you haven't "
+ "configured the universe domain explicitly, `googleapis.com` "
+ "is the default."
+ )
+ assert exc.value.args[0] == err_msg
+
+ @CrossSync.pytest
+ async def test_configured_universe_domain_matches_credentials(self):
+ """Test that configured universe domain succeeds with matching universe
+ domain credentials.
+ """
+ universe_domain = "test-universe.test"
+ options = client_options.ClientOptions(universe_domain=universe_domain)
+ creds = AnonymousCredentials()
+ creds._universe_domain = universe_domain
+ async with self._make_client(
+ project="project_id", credentials=creds, client_options=options
+ ) as client:
+ assert client.universe_domain == universe_domain
+ assert client.api_endpoint == f"bigtable.{universe_domain}"
+
@CrossSync.convert_class("TestTable", add_mapping_for_name="TestTable")
class TestTableAsync:
@@ -1065,6 +1162,9 @@ def _make_one(
@CrossSync.pytest
async def test_ctor(self):
from google.cloud.bigtable.data._helpers import _WarmedInstanceKey
+ from google.cloud.bigtable.data._metrics import (
+ BigtableClientSideMetricsController,
+ )
expected_table_id = "table-id"
expected_instance_id = "instance-id"
@@ -1106,6 +1206,7 @@ async def test_ctor(self):
instance_key = _WarmedInstanceKey(table.instance_name, table.app_profile_id)
assert instance_key in client._active_instances
assert client._instance_owners[instance_key] == {id(table)}
+ assert isinstance(table._metrics, BigtableClientSideMetricsController)
assert table.default_operation_timeout == expected_operation_timeout
assert table.default_attempt_timeout == expected_attempt_timeout
assert (
@@ -1246,6 +1347,7 @@ def test_table_ctor_sync(self):
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
core_exceptions.Aborted,
+ core_exceptions.Cancelled,
],
),
(
@@ -1358,19 +1460,28 @@ async def test_call_metadata(self, include_app_profile, fn_name, fn_args, gapic_
# expect x-goog-request-params tag
assert metadata[0][0] == "x-goog-request-params"
routing_str = metadata[0][1]
- assert self._expected_routing_header(table) in routing_str
+ assert f"table_name={table.table_name}" in routing_str
if include_app_profile:
assert "app_profile_id=profile" in routing_str
else:
# empty app_profile_id should send empty string
assert "app_profile_id=" in routing_str
- @staticmethod
- def _expected_routing_header(table):
- """
- the expected routing header for this _ApiSurface type
- """
- return f"table_name={table.table_name}"
+ @CrossSync.pytest
+ async def test_close(self):
+ client = self._make_client()
+ table = self._make_one(client)
+ with mock.patch.object(
+ table._metrics, "close", mock.Mock()
+ ) as metric_close_mock:
+ with mock.patch.object(
+ client, "_remove_instance_registration"
+ ) as remove_mock:
+ await table.close()
+ remove_mock.assert_called_once_with(
+ table.instance_id, table.app_profile_id, id(table)
+ )
+ metric_close_mock.assert_called_once()
@CrossSync.convert_class(
@@ -1399,16 +1510,12 @@ def _make_one(
client, instance_id, table_id, view_id, app_profile_id, **kwargs
)
- @staticmethod
- def _expected_routing_header(view):
- """
- the expected routing header for this _ApiSurface type
- """
- return f"authorized_view_name={view.authorized_view_name}"
-
@CrossSync.pytest
async def test_ctor(self):
from google.cloud.bigtable.data._helpers import _WarmedInstanceKey
+ from google.cloud.bigtable.data._metrics import (
+ BigtableClientSideMetricsController,
+ )
expected_table_id = "table-id"
expected_instance_id = "instance-id"
@@ -1457,6 +1564,7 @@ async def test_ctor(self):
instance_key = _WarmedInstanceKey(view.instance_name, view.app_profile_id)
assert instance_key in client._active_instances
assert client._instance_owners[instance_key] == {id(view)}
+ assert isinstance(view._metrics, BigtableClientSideMetricsController)
assert view.default_operation_timeout == expected_operation_timeout
assert view.default_attempt_timeout == expected_attempt_timeout
assert (
@@ -1670,9 +1778,8 @@ async def test_read_rows_timeout(self, operation_timeout):
@pytest.mark.parametrize(
"per_request_t, operation_t, expected_num",
[
- (0.05, 0.08, 2),
- (0.05, 0.14, 3),
- (0.05, 0.24, 5),
+ (0.1, 0.19, 2),
+ (0.1, 0.29, 3),
],
)
@CrossSync.pytest
@@ -1758,7 +1865,6 @@ async def test_read_rows_retryable_error(self, exc_type):
@pytest.mark.parametrize(
"exc_type",
[
- core_exceptions.Cancelled,
core_exceptions.PreconditionFailed,
core_exceptions.NotFound,
core_exceptions.PermissionDenied,
diff --git a/tests/unit/data/_async/test_metrics_interceptor.py b/tests/unit/data/_async/test_metrics_interceptor.py
new file mode 100644
index 000000000..1593b8c99
--- /dev/null
+++ b/tests/unit/data/_async/test_metrics_interceptor.py
@@ -0,0 +1,336 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+from grpc import RpcError
+from grpc import ClientCallDetails
+
+from google.cloud.bigtable.data._metrics.data_model import ActiveOperationMetric
+from google.cloud.bigtable.data._metrics.data_model import OperationState
+from google.cloud.bigtable.data._cross_sync import CrossSync
+
+# try/except added for compatibility with python < 3.8
+try:
+ from unittest import mock
+except ImportError: # pragma: NO COVER
+ import mock # type: ignore
+
+if CrossSync.is_async:
+ from google.cloud.bigtable.data._async.metrics_interceptor import (
+ AsyncBigtableMetricsInterceptor,
+ )
+else:
+ from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import ( # noqa: F401
+ BigtableMetricsInterceptor,
+ )
+
+
+__CROSS_SYNC_OUTPUT__ = "tests.unit.data._sync_autogen.test_metrics_interceptor"
+
+
+@CrossSync.convert(replace_symbols={"__aiter__": "__iter__"})
+def _make_mock_stream_call(values, exc=None):
+ """
+ Create a mock call object that can be used for streaming calls
+ """
+ call = CrossSync.Mock()
+
+ async def gen():
+ for val in values:
+ yield val
+ if exc:
+ raise exc
+
+ call.__aiter__ = mock.Mock(return_value=gen())
+ return call
+
+
+@CrossSync.convert_class(sync_name="TestMetricsInterceptor")
+class TestMetricsInterceptorAsync:
+ @staticmethod
+ @CrossSync.convert(
+ replace_symbols={
+ "AsyncBigtableMetricsInterceptor": "BigtableMetricsInterceptor"
+ }
+ )
+ def _get_target_class():
+ return AsyncBigtableMetricsInterceptor
+
+ def _make_one(self, *args, **kwargs):
+ return self._get_target_class()(*args, **kwargs)
+
+ @CrossSync.pytest
+ async def test_unary_unary_interceptor_op_not_found(self):
+ """Test that interceptor call continuation if op is not found"""
+ instance = self._make_one()
+ continuation = CrossSync.Mock()
+ details = ClientCallDetails()
+ details.metadata = []
+ request = mock.Mock()
+ await instance.intercept_unary_unary(continuation, details, request)
+ continuation.assert_called_once_with(details, request)
+
+ @CrossSync.pytest
+ async def test_unary_unary_interceptor_success(self):
+ """Test that interceptor handles successful unary-unary calls"""
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = OperationState.ACTIVE_ATTEMPT
+ ActiveOperationMetric._active_operation_context.set(op)
+ continuation = CrossSync.Mock()
+ call = continuation.return_value
+ call.trailing_metadata = CrossSync.Mock(return_value=[("a", "b")])
+ call.initial_metadata = CrossSync.Mock(return_value=[("c", "d")])
+ details = ClientCallDetails()
+ request = mock.Mock()
+ result = await instance.intercept_unary_unary(continuation, details, request)
+ assert result == call
+ continuation.assert_called_once_with(details, request)
+ op.add_response_metadata.assert_called_once_with({"a": "b", "c": "d"})
+ op.end_attempt_with_status.assert_not_called()
+
+ @CrossSync.pytest
+ async def test_unary_unary_interceptor_failure(self):
+ """test a failed RpcError with metadata"""
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = OperationState.ACTIVE_ATTEMPT
+ ActiveOperationMetric._active_operation_context.set(op)
+ exc = RpcError("test")
+ exc.trailing_metadata = CrossSync.Mock(return_value=[("a", "b")])
+ exc.initial_metadata = CrossSync.Mock(return_value=[("c", "d")])
+ continuation = CrossSync.Mock(side_effect=exc)
+ details = ClientCallDetails()
+ request = mock.Mock()
+ with pytest.raises(RpcError) as e:
+ await instance.intercept_unary_unary(continuation, details, request)
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+ op.add_response_metadata.assert_called_once_with({"a": "b", "c": "d"})
+
+ @CrossSync.pytest
+ async def test_unary_unary_interceptor_failure_no_metadata(self):
+ """test with RpcError without without metadata attached"""
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = OperationState.ACTIVE_ATTEMPT
+ ActiveOperationMetric._active_operation_context.set(op)
+ exc = RpcError("test")
+ continuation = CrossSync.Mock(side_effect=exc)
+ call = continuation.return_value
+ call.trailing_metadata = CrossSync.Mock(return_value=[("a", "b")])
+ call.initial_metadata = CrossSync.Mock(return_value=[("c", "d")])
+ details = ClientCallDetails()
+ request = mock.Mock()
+ with pytest.raises(RpcError) as e:
+ await instance.intercept_unary_unary(continuation, details, request)
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+ op.add_response_metadata.assert_not_called()
+
+ @CrossSync.pytest
+ async def test_unary_unary_interceptor_failure_generic(self):
+ """test generic exception"""
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = OperationState.ACTIVE_ATTEMPT
+ ActiveOperationMetric._active_operation_context.set(op)
+ exc = ValueError("test")
+ continuation = CrossSync.Mock(side_effect=exc)
+ call = continuation.return_value
+ call.trailing_metadata = CrossSync.Mock(return_value=[("a", "b")])
+ call.initial_metadata = CrossSync.Mock(return_value=[("c", "d")])
+ details = ClientCallDetails()
+ request = mock.Mock()
+ with pytest.raises(ValueError) as e:
+ await instance.intercept_unary_unary(continuation, details, request)
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+ op.add_response_metadata.assert_not_called()
+
+ @CrossSync.pytest
+ async def test_unary_stream_interceptor_op_not_found(self):
+ """Test that interceptor calls continuation if op is not found"""
+ instance = self._make_one()
+ continuation = CrossSync.Mock()
+ details = ClientCallDetails()
+ details.metadata = []
+ request = mock.Mock()
+ await instance.intercept_unary_stream(continuation, details, request)
+ continuation.assert_called_once_with(details, request)
+
+ @CrossSync.pytest
+ async def test_unary_stream_interceptor_success(self):
+ """Test that interceptor handles successful unary-stream calls"""
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = OperationState.ACTIVE_ATTEMPT
+ op.start_time_ns = 0
+ op.first_response_latency = None
+ ActiveOperationMetric._active_operation_context.set(op)
+
+ continuation = CrossSync.Mock(return_value=_make_mock_stream_call([1, 2]))
+ call = continuation.return_value
+ call.trailing_metadata = CrossSync.Mock(return_value=[("a", "b")])
+ call.initial_metadata = CrossSync.Mock(return_value=[("c", "d")])
+ details = ClientCallDetails()
+ request = mock.Mock()
+ wrapper = await instance.intercept_unary_stream(continuation, details, request)
+ results = [val async for val in wrapper]
+ assert results == [1, 2]
+ continuation.assert_called_once_with(details, request)
+ assert op.first_response_latency_ns is not None
+ op.add_response_metadata.assert_called_once_with({"a": "b", "c": "d"})
+ op.end_attempt_with_status.assert_not_called()
+
+ @CrossSync.pytest
+ async def test_unary_stream_interceptor_failure_mid_stream(self):
+ """Test that interceptor handles failures mid-stream"""
+ from grpc.aio import AioRpcError, Metadata
+
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = OperationState.ACTIVE_ATTEMPT
+ op.start_time_ns = 0
+ op.first_response_latency = None
+ ActiveOperationMetric._active_operation_context.set(op)
+ exc = AioRpcError(0, Metadata(), Metadata(("a", "b"), ("c", "d")))
+ continuation = CrossSync.Mock(return_value=_make_mock_stream_call([1], exc=exc))
+ details = ClientCallDetails()
+ request = mock.Mock()
+ wrapper = await instance.intercept_unary_stream(continuation, details, request)
+ with pytest.raises(AioRpcError) as e:
+ [val async for val in wrapper]
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+ assert op.first_response_latency_ns is not None
+ op.add_response_metadata.assert_called_once_with({"a": "b", "c": "d"})
+
+ @CrossSync.pytest
+ async def test_unary_stream_interceptor_failure_start_stream(self):
+ """Test that interceptor handles failures at start of stream with RpcError with metadata"""
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = OperationState.ACTIVE_ATTEMPT
+ op.start_time_ns = 0
+ op.first_response_latency = None
+ ActiveOperationMetric._active_operation_context.set(op)
+ exc = RpcError("test")
+ exc.trailing_metadata = CrossSync.Mock(return_value=[("a", "b")])
+ exc.initial_metadata = CrossSync.Mock(return_value=[("c", "d")])
+
+ continuation = CrossSync.Mock()
+ continuation.side_effect = exc
+ details = ClientCallDetails()
+ request = mock.Mock()
+ with pytest.raises(RpcError) as e:
+ await instance.intercept_unary_stream(continuation, details, request)
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+ assert op.first_response_latency_ns is not None
+ op.add_response_metadata.assert_called_once_with({"a": "b", "c": "d"})
+
+ @CrossSync.pytest
+ async def test_unary_stream_interceptor_failure_start_stream_no_metadata(self):
+ """Test that interceptor handles failures at start of stream with RpcError with no metadata"""
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = OperationState.ACTIVE_ATTEMPT
+ op.start_time_ns = 0
+ op.first_response_latency = None
+ ActiveOperationMetric._active_operation_context.set(op)
+ exc = RpcError("test")
+
+ continuation = CrossSync.Mock()
+ continuation.side_effect = exc
+ details = ClientCallDetails()
+ request = mock.Mock()
+ with pytest.raises(RpcError) as e:
+ await instance.intercept_unary_stream(continuation, details, request)
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+ assert op.first_response_latency_ns is not None
+ op.add_response_metadata.assert_not_called()
+
+ @CrossSync.pytest
+ async def test_unary_stream_interceptor_failure_start_stream_generic(self):
+ """Test that interceptor handles failures at start of stream with generic exception"""
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = OperationState.ACTIVE_ATTEMPT
+ op.start_time_ns = 0
+ op.first_response_latency = None
+ ActiveOperationMetric._active_operation_context.set(op)
+ exc = ValueError("test")
+
+ continuation = CrossSync.Mock()
+ continuation.side_effect = exc
+ details = ClientCallDetails()
+ request = mock.Mock()
+ with pytest.raises(ValueError) as e:
+ await instance.intercept_unary_stream(continuation, details, request)
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+ assert op.first_response_latency_ns is not None
+ op.add_response_metadata.assert_not_called()
+
+ @CrossSync.pytest
+ @pytest.mark.parametrize(
+ "initial_state", [OperationState.CREATED, OperationState.BETWEEN_ATTEMPTS]
+ )
+ async def test_unary_unary_interceptor_start_operation(self, initial_state):
+ """if called with a newly created operation, it should be started"""
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = initial_state
+ ActiveOperationMetric._active_operation_context.set(op)
+ continuation = CrossSync.Mock()
+ call = continuation.return_value
+ call.trailing_metadata = CrossSync.Mock(return_value=[])
+ call.initial_metadata = CrossSync.Mock(return_value=[])
+ details = ClientCallDetails()
+ request = mock.Mock()
+ await instance.intercept_unary_unary(continuation, details, request)
+ op.start_attempt.assert_called_once()
+
+ @CrossSync.pytest
+ @pytest.mark.parametrize(
+ "initial_state", [OperationState.CREATED, OperationState.BETWEEN_ATTEMPTS]
+ )
+ async def test_unary_stream_interceptor_start_operation(self, initial_state):
+ """if called with a newly created operation, it should be started"""
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = initial_state
+ ActiveOperationMetric._active_operation_context.set(op)
+
+ continuation = CrossSync.Mock(return_value=_make_mock_stream_call([1, 2]))
+ call = continuation.return_value
+ call.trailing_metadata = CrossSync.Mock(return_value=[])
+ call.initial_metadata = CrossSync.Mock(return_value=[])
+ details = ClientCallDetails()
+ request = mock.Mock()
+ await instance.intercept_unary_stream(continuation, details, request)
+ op.start_attempt.assert_called_once()
diff --git a/tests/unit/data/_async/test_mutations_batcher.py b/tests/unit/data/_async/test_mutations_batcher.py
index 29f2f1026..b139f31f1 100644
--- a/tests/unit/data/_async/test_mutations_batcher.py
+++ b/tests/unit/data/_async/test_mutations_batcher.py
@@ -1169,6 +1169,7 @@ def test__add_exceptions(self, limit, in_e, start_e, end_e):
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
core_exceptions.Aborted,
+ core_exceptions.Cancelled,
],
),
(
diff --git a/tests/unit/data/_metrics/__init__.py b/tests/unit/data/_metrics/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/unit/data/_metrics/test_data_model.py b/tests/unit/data/_metrics/test_data_model.py
new file mode 100644
index 000000000..93e73c9d8
--- /dev/null
+++ b/tests/unit/data/_metrics/test_data_model.py
@@ -0,0 +1,730 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+import mock
+
+from google.cloud.bigtable.data._metrics.data_model import OperationState as State
+from google.cloud.bigtable_v2.types import ResponseParams
+
+
+class TestActiveOperationMetric:
+ def _make_one(self, *args, **kwargs):
+ from google.cloud.bigtable.data._metrics.data_model import ActiveOperationMetric
+
+ return ActiveOperationMetric(*args, **kwargs)
+
+ @mock.patch("time.monotonic_ns")
+ def test_ctor_defaults(self, mock_monotonic_ns):
+ """
+ create an instance with default values
+ """
+ expected_timestamp = 123456789
+ mock_monotonic_ns.return_value = expected_timestamp
+ mock_type = mock.Mock()
+ metric = self._make_one(mock_type)
+ assert metric.op_type == mock_type
+ assert metric.start_time_ns == expected_timestamp
+ assert metric.active_attempt is None
+ assert metric.cluster_id is None
+ assert metric.zone is None
+ assert len(metric.completed_attempts) == 0
+ assert len(metric.handlers) == 0
+ assert metric.is_streaming is False
+ assert metric.flow_throttling_time_ns == 0
+ assert metric.state == State.CREATED
+
+ def test_ctor_explicit(self):
+ """
+ test with explicit arguments
+ """
+ expected_type = mock.Mock()
+ expected_start_time_ns = 7
+ expected_active_attempt = mock.Mock()
+ expected_cluster_id = "cluster"
+ expected_zone = "zone"
+ expected_completed_attempts = [mock.Mock()]
+ expected_state = State.COMPLETED
+ expected_handlers = [mock.Mock()]
+ expected_is_streaming = True
+ expected_flow_throttling = 12
+ metric = self._make_one(
+ op_type=expected_type,
+ start_time_ns=expected_start_time_ns,
+ active_attempt=expected_active_attempt,
+ cluster_id=expected_cluster_id,
+ zone=expected_zone,
+ state=expected_state,
+ completed_attempts=expected_completed_attempts,
+ handlers=expected_handlers,
+ is_streaming=expected_is_streaming,
+ flow_throttling_time_ns=expected_flow_throttling,
+ )
+ assert metric.op_type == expected_type
+ assert metric.start_time_ns == expected_start_time_ns
+ assert metric.active_attempt == expected_active_attempt
+ assert metric.cluster_id == expected_cluster_id
+ assert metric.zone == expected_zone
+ assert metric.completed_attempts == expected_completed_attempts
+ assert metric.state == expected_state
+ assert metric.handlers == expected_handlers
+ assert metric.is_streaming == expected_is_streaming
+ assert metric.flow_throttling_time_ns == expected_flow_throttling
+
+ def test_state_machine_w_methods(self):
+ """
+ Exercise the state machine by calling methods to move between states
+ """
+ metric = self._make_one(mock.Mock())
+ assert metric.state == State.CREATED
+ metric.start()
+ assert metric.state == State.CREATED
+ metric.start_attempt()
+ assert metric.state == State.ACTIVE_ATTEMPT
+ metric.end_attempt_with_status(Exception())
+ assert metric.state == State.BETWEEN_ATTEMPTS
+ metric.start_attempt()
+ assert metric.state == State.ACTIVE_ATTEMPT
+ metric.end_with_success()
+ assert metric.state == State.COMPLETED
+
+ def test_state_machine(self):
+ """
+ Exercise state machine by moving through states
+ """
+ metric = self._make_one(mock.Mock())
+ assert metric.state == State.CREATED
+ metric.start_attempt()
+ assert metric.state == State.ACTIVE_ATTEMPT
+ metric.end_attempt_with_status(0)
+ assert metric.state == State.BETWEEN_ATTEMPTS
+ metric.end_with_success()
+ assert metric.state == State.COMPLETED
+
+ @pytest.mark.parametrize(
+ "method,args,valid_states,error_method_name",
+ [
+ ("start", (), (State.CREATED,), None),
+ ("start_attempt", (), (State.CREATED, State.BETWEEN_ATTEMPTS), None),
+ ("add_response_metadata", ({},), (State.ACTIVE_ATTEMPT,), None),
+ ("end_attempt_with_status", (mock.Mock(),), (State.ACTIVE_ATTEMPT,), None),
+ (
+ "end_with_status",
+ (mock.Mock(),),
+ (
+ State.CREATED,
+ State.ACTIVE_ATTEMPT,
+ State.BETWEEN_ATTEMPTS,
+ ),
+ None,
+ ),
+ (
+ "end_with_success",
+ (),
+ (
+ State.CREATED,
+ State.ACTIVE_ATTEMPT,
+ State.BETWEEN_ATTEMPTS,
+ ),
+ "end_with_status",
+ ),
+ ],
+ ids=lambda x: x if isinstance(x, str) else "",
+ )
+ def test_error_invalid_states(self, method, args, valid_states, error_method_name):
+ """
+ each method only works for certain states. Make sure _handle_error is called for invalid states
+ """
+ cls = type(self._make_one(mock.Mock()))
+ invalid_states = set(State) - set(valid_states)
+ error_method_name = error_method_name or method
+ for state in invalid_states:
+ with mock.patch.object(cls, "_handle_error") as mock_handle_error:
+ mock_handle_error.return_value = None
+ metric = self._make_one(mock.Mock(), state=state)
+ return_obj = getattr(metric, method)(*args)
+ assert return_obj is None
+ assert mock_handle_error.call_count == 1
+ assert (
+ mock_handle_error.call_args[0][0]
+ == f"Invalid state for {error_method_name}: {state}"
+ )
+
+ @mock.patch("time.monotonic_ns")
+ def test_start(self, mock_monotonic_ns):
+ """
+ calling start op operation should reset start_time
+ """
+ expected_timestamp = 123456789
+ mock_monotonic_ns.return_value = expected_timestamp
+ orig_time = 0
+ metric = self._make_one(mock.Mock(), start_time_ns=orig_time)
+ assert metric.start_time_ns == 0
+ metric.start()
+ assert metric.start_time_ns != orig_time
+ assert metric.start_time_ns == expected_timestamp
+ # should remain in CREATED state after completing
+ assert metric.state == State.CREATED
+
+ @mock.patch("time.monotonic_ns")
+ def test_start_attempt(self, mock_monotonic_ns):
+ """
+ calling start_attempt should create a new emptu atempt metric
+ """
+ from google.cloud.bigtable.data._metrics.data_model import ActiveAttemptMetric
+
+ expected_timestamp = 123456789
+ mock_monotonic_ns.return_value = expected_timestamp
+ metric = self._make_one(mock.Mock())
+ assert metric.active_attempt is None
+ metric.start_attempt()
+ assert isinstance(metric.active_attempt, ActiveAttemptMetric)
+ # make sure it was initialized with the correct values
+ assert metric.active_attempt.start_time_ns == expected_timestamp
+ assert metric.active_attempt.gfe_latency_ns is None
+ # should be in ACTIVE_ATTEMPT state after completing
+ assert metric.state == State.ACTIVE_ATTEMPT
+
+ def test_start_attempt_with_backoff_generator(self):
+ """
+ If operation has a backoff generator, it should be used to attach backoff
+ times to attempts
+ """
+ from google.cloud.bigtable.data._helpers import TrackedBackoffGenerator
+
+ generator = TrackedBackoffGenerator()
+ # pre-seed generator with exepcted values
+ generator.history = list(range(10))
+ metric = self._make_one(mock.Mock(), backoff_generator=generator)
+ metric.start_attempt()
+ assert len(metric.completed_attempts) == 0
+ # first attempt should always be 0
+ assert metric.active_attempt.backoff_before_attempt_ns == 0
+ # later attempts should have their attempt number as backoff time
+ for i in range(10):
+ metric.end_attempt_with_status(mock.Mock())
+ assert len(metric.completed_attempts) == i + 1
+ metric.start_attempt()
+ # expect the backoff to be converted froms seconds to ns
+ assert metric.active_attempt.backoff_before_attempt_ns == (i * 1e9)
+
+ @pytest.mark.parametrize(
+ "start_cluster,start_zone,metadata_proto,end_cluster,end_zone",
+ [
+ (None, None, None, None, None),
+ ("orig_cluster", "orig_zone", None, "orig_cluster", "orig_zone"),
+ (None, None, ResponseParams(), None, None),
+ (
+ "orig_cluster",
+ "orig_zone",
+ ResponseParams(),
+ "orig_cluster",
+ "orig_zone",
+ ),
+ (
+ None,
+ None,
+ ResponseParams(cluster_id="test-cluster", zone_id="us-central1-b"),
+ "test-cluster",
+ "us-central1-b",
+ ),
+ (
+ None,
+ "filled",
+ ResponseParams(cluster_id="cluster", zone_id="zone"),
+ "cluster",
+ "zone",
+ ),
+ (None, "filled", ResponseParams(cluster_id="cluster"), "cluster", "filled"),
+ (None, "filled", ResponseParams(zone_id="zone"), None, "zone"),
+ (
+ "filled",
+ None,
+ ResponseParams(cluster_id="cluster", zone_id="zone"),
+ "cluster",
+ "zone",
+ ),
+ ("filled", None, ResponseParams(cluster_id="cluster"), "cluster", None),
+ ("filled", None, ResponseParams(zone_id="zone"), "filled", "zone"),
+ ],
+ )
+ def test_add_response_metadata_cbt_header(
+ self, start_cluster, start_zone, metadata_proto, end_cluster, end_zone
+ ):
+ """
+ calling add_response_metadata should update fields based on grpc response metadata
+ The x-goog-ext-425905942-bin field contains cluster and zone info
+ """
+ import grpc
+
+ cls = type(self._make_one(mock.Mock()))
+ with mock.patch.object(cls, "_handle_error") as mock_handle_error:
+ metric = self._make_one(
+ mock.Mock(),
+ cluster_id=start_cluster,
+ zone=start_zone,
+ state=State.ACTIVE_ATTEMPT,
+ )
+ metric.active_attempt = mock.Mock()
+ metric.active_attempt.gfe_latency_ns = None
+ metadata = grpc.aio.Metadata()
+ if metadata_proto is not None:
+ metadata["x-goog-ext-425905942-bin"] = ResponseParams.serialize(
+ metadata_proto
+ )
+ metric.add_response_metadata(metadata)
+ assert metric.cluster_id == end_cluster
+ assert metric.zone == end_zone
+ # should remain in ACTIVE_ATTEMPT state after completing
+ assert metric.state == State.ACTIVE_ATTEMPT
+ # no errors encountered
+ assert mock_handle_error.call_count == 0
+ # gfe latency should not be touched
+ assert metric.active_attempt.gfe_latency_ns is None
+
+ @pytest.mark.parametrize(
+ "metadata_field",
+ [
+ b"bad-input",
+ "cluster zone", # expect bytes
+ ],
+ )
+ def test_add_response_metadata_cbt_header_w_error(self, metadata_field):
+ """
+ If the x-goog-ext-425905942-bin field is present, but not structured properly,
+ _handle_error should be called
+
+ Extra fields should not result in parsingerror
+ """
+ import grpc
+
+ cls = type(self._make_one(mock.Mock()))
+ with mock.patch.object(cls, "_handle_error") as mock_handle_error:
+ metric = self._make_one(mock.Mock(), state=State.ACTIVE_ATTEMPT)
+ metric.cluster_id = None
+ metric.zone = None
+ metric.active_attempt = mock.Mock()
+ metadata = grpc.aio.Metadata()
+ metadata["x-goog-ext-425905942-bin"] = metadata_field
+ metric.add_response_metadata(metadata)
+ # should remain in ACTIVE_ATTEMPT state after completing
+ assert metric.state == State.ACTIVE_ATTEMPT
+ # no errors encountered
+ assert mock_handle_error.call_count == 1
+ assert (
+ "Failed to decode x-goog-ext-425905942-bin metadata:"
+ in mock_handle_error.call_args[0][0]
+ )
+ assert str(metadata_field) in mock_handle_error.call_args[0][0]
+
+ @pytest.mark.parametrize(
+ "metadata_field,expected_latency_ns",
+ [
+ (None, None),
+ ("gfet4t7; dur=1000", 1000e6),
+ ("gfet4t7; dur=1000.0", 1000e6),
+ ("gfet4t7; dur=1000.1", 1000.1e6),
+ ("gcp; dur=15, gfet4t7; dur=300", 300e6),
+ ("gfet4t7;dur=350,gcp;dur=12", 350e6),
+ ("ignore_megfet4t7;dur=90ignore_me", 90e6),
+ ("gfet4t7;dur=2000", 2000e6),
+ ("gfet4t7; dur=0.001", 1000),
+ ("gfet4t7; dur=0.000001", 1),
+ ("gfet4t7; dur=0.0000001", 0), # below recording resolution
+ ("gfet4t7; dur=0", 0),
+ ("gfet4t7; dur=empty", None),
+ ("gfet4t7;", None),
+ ("", None),
+ ],
+ )
+ def test_add_response_metadata_server_timing_header(
+ self, metadata_field, expected_latency_ns
+ ):
+ """
+ calling add_response_metadata should update fields based on grpc response metadata
+ The server-timing field contains gfle latency info
+ """
+ import grpc
+
+ cls = type(self._make_one(mock.Mock()))
+ with mock.patch.object(cls, "_handle_error") as mock_handle_error:
+ metric = self._make_one(mock.Mock(), state=State.ACTIVE_ATTEMPT)
+ metric.active_attempt = mock.Mock()
+ metric.active_attempt.gfe_latency_ns = None
+ metadata = grpc.aio.Metadata()
+ if metadata_field:
+ metadata["server-timing"] = metadata_field
+ metric.add_response_metadata(metadata)
+ if metric.active_attempt.gfe_latency_ns is None:
+ assert expected_latency_ns is None
+ else:
+ assert metric.active_attempt.gfe_latency_ns == int(expected_latency_ns)
+ # should remain in ACTIVE_ATTEMPT state after completing
+ assert metric.state == State.ACTIVE_ATTEMPT
+ # no errors encountered
+ assert mock_handle_error.call_count == 0
+ # cluster and zone should not be touched
+ assert metric.cluster_id is None
+ assert metric.zone is None
+
+ @mock.patch("time.monotonic_ns")
+ def test_end_attempt_with_status(self, mock_monotonic_ns):
+ """
+ ending the attempt should:
+ - add one to completed_attempts
+ - reset active_attempt to None
+ - update state
+ - notify handlers
+ """
+ expected_mock_time = 123456789
+ mock_monotonic_ns.return_value = expected_mock_time
+ expected_start_time = 1
+ expected_status = object()
+ expected_gfe_latency_ns = 5
+ expected_app_blocking = 12
+ expected_backoff = 2
+ handlers = [mock.Mock(), mock.Mock()]
+
+ metric = self._make_one(mock.Mock(), handlers=handlers)
+ assert metric.active_attempt is None
+ assert len(metric.completed_attempts) == 0
+ metric.start_attempt()
+ metric.active_attempt.start_time_ns = expected_start_time
+ metric.active_attempt.gfe_latency_ns = expected_gfe_latency_ns
+ metric.active_attempt.application_blocking_time_ns = expected_app_blocking
+ metric.active_attempt.backoff_before_attempt_ns = expected_backoff
+ metric.end_attempt_with_status(expected_status)
+ assert len(metric.completed_attempts) == 1
+ got_attempt = metric.completed_attempts[0]
+ expected_duration = expected_mock_time - expected_start_time
+ assert got_attempt.duration_ns == expected_duration
+ assert got_attempt.end_status == expected_status
+ assert got_attempt.gfe_latency_ns == expected_gfe_latency_ns
+ assert got_attempt.application_blocking_time_ns == expected_app_blocking
+ assert got_attempt.backoff_before_attempt_ns == expected_backoff
+ # state should be changed to BETWEEN_ATTEMPTS
+ assert metric.state == State.BETWEEN_ATTEMPTS
+ # check handlers
+ for h in handlers:
+ assert h.on_attempt_complete.call_count == 1
+ assert h.on_attempt_complete.call_args[0][0] == got_attempt
+ assert h.on_attempt_complete.call_args[0][1] == metric
+
+ def test_end_attempt_with_status_w_exception(self):
+ """
+ exception inputs should be converted to grpc status objects
+ """
+ input_status = ValueError("test")
+ expected_status = object()
+
+ metric = self._make_one(mock.Mock())
+ metric.start_attempt()
+ with mock.patch.object(
+ metric, "_exc_to_status", return_value=expected_status
+ ) as mock_exc_to_status:
+ metric.end_attempt_with_status(input_status)
+ assert mock_exc_to_status.call_count == 1
+ assert mock_exc_to_status.call_args[0][0] == input_status
+ assert metric.completed_attempts[0].end_status == expected_status
+
+ @mock.patch("time.monotonic_ns")
+ def test_end_attempt_with_negative_duration_ns(self, mock_monotonic_ns):
+ """
+ If duration_ns is negative, it should be set to 0 and _handle_error should be called
+ """
+ cls = type(self._make_one(mock.Mock()))
+ with mock.patch.object(cls, "_handle_error") as mock_handle_error:
+ metric = self._make_one(mock.Mock())
+ metric.start_attempt()
+ metric.active_attempt.start_time_ns = 100
+ mock_monotonic_ns.return_value = 50 # Simulate time going backwards
+ metric.end_attempt_with_status(mock.Mock())
+
+ assert mock_handle_error.call_count == 1
+ assert (
+ "received negative value for duration"
+ in mock_handle_error.call_args[0][0]
+ )
+ assert metric.completed_attempts[0].duration_ns == 0
+
+ @mock.patch("time.monotonic_ns")
+ def test_end_with_status(self, mock_monotonic_ns):
+ """
+ ending the operation should:
+ - end active attempt
+ - mark operation as completed
+ - update handlers
+ """
+ from google.cloud.bigtable.data._metrics.data_model import ActiveAttemptMetric
+
+ expected_mock_time = 123456789
+ mock_monotonic_ns.return_value = expected_mock_time
+ expected_attempt_start_time = 0
+ expected_attempt_gfe_latency_ns = 5
+ expected_flow_time = 16
+
+ expected_first_response_latency_ns = 9
+ expected_status = object()
+ expected_type = object()
+ expected_start_time = 1
+ expected_cluster = object()
+ expected_zone = object()
+ is_streaming = object()
+
+ handlers = [mock.Mock(), mock.Mock()]
+ metric = self._make_one(
+ expected_type,
+ handlers=handlers,
+ start_time_ns=expected_start_time,
+ state=State.ACTIVE_ATTEMPT,
+ )
+ metric.cluster_id = expected_cluster
+ metric.zone = expected_zone
+ metric.is_streaming = is_streaming
+ metric.flow_throttling_time_ns = expected_flow_time
+ metric.first_response_latency_ns = expected_first_response_latency_ns
+ attempt = ActiveAttemptMetric(
+ start_time_ns=expected_attempt_start_time,
+ gfe_latency_ns=expected_attempt_gfe_latency_ns,
+ )
+ metric.active_attempt = attempt
+ metric.end_with_status(expected_status)
+ # test that ActiveOperation was updated to terminal state
+ assert metric.state == State.COMPLETED
+ assert metric.active_attempt is None
+ assert len(metric.completed_attempts) == 1
+ # check that finalized operation was passed to handlers
+ for h in handlers:
+ assert h.on_operation_complete.call_count == 1
+ assert len(h.on_operation_complete.call_args[0]) == 1
+ called_with = h.on_operation_complete.call_args[0][0]
+ assert called_with.op_type == expected_type
+ expected_duration = expected_mock_time - expected_start_time
+ assert called_with.duration_ns == expected_duration
+ assert called_with.final_status == expected_status
+ assert called_with.cluster_id == expected_cluster
+ assert called_with.zone == expected_zone
+ assert called_with.is_streaming == is_streaming
+ assert called_with.flow_throttling_time_ns == expected_flow_time
+ assert (
+ called_with.first_response_latency_ns
+ == expected_first_response_latency_ns
+ )
+ # check the attempt
+ assert len(called_with.completed_attempts) == 1
+ final_attempt = called_with.completed_attempts[0]
+ assert final_attempt.gfe_latency_ns == expected_attempt_gfe_latency_ns
+ assert final_attempt.end_status == expected_status
+ expected_duration = expected_mock_time - expected_attempt_start_time
+ assert final_attempt.duration_ns == expected_duration
+
+ @mock.patch("time.monotonic_ns")
+ def test_end_with_negative_duration_ns(self, mock_monotonic_ns):
+ """
+ If operation duration_ns is negative, it should be set to 0 and _handle_error should be called
+ """
+ cls = type(self._make_one(mock.Mock()))
+ with mock.patch.object(cls, "_handle_error") as mock_handle_error:
+ metric = self._make_one(mock.Mock(), handlers=[mock.Mock()])
+ metric.start_time_ns = 100
+ mock_monotonic_ns.return_value = 50 # Simulate time going backwards
+ metric.end_with_status(mock.Mock())
+
+ assert mock_handle_error.call_count == 1
+ assert (
+ "received negative value for duration"
+ in mock_handle_error.call_args[0][0]
+ )
+ final_op = metric.handlers[0].on_operation_complete.call_args[0][0]
+ assert final_op.duration_ns == 0
+
+ def test_end_with_status_w_exception(self):
+ """
+ exception inputs should be converted to grpc status objects
+ """
+ input_status = ValueError("test")
+ expected_status = object()
+ handlers = [mock.Mock()]
+
+ metric = self._make_one(mock.Mock(), handlers=handlers)
+ metric.start_attempt()
+ with mock.patch.object(
+ metric, "_exc_to_status", return_value=expected_status
+ ) as mock_exc_to_status:
+ metric.end_with_status(input_status)
+ assert mock_exc_to_status.call_count == 1
+ assert mock_exc_to_status.call_args[0][0] == input_status
+ assert metric.completed_attempts[0].end_status == expected_status
+ final_op = handlers[0].on_operation_complete.call_args[0][0]
+ assert final_op.final_status == expected_status
+
+ def test_end_with_status_with_default_cluster_zone(self):
+ """
+ ending the operation should use default cluster and zone if not set
+ """
+ from google.cloud.bigtable.data._metrics.data_model import (
+ DEFAULT_CLUSTER_ID,
+ DEFAULT_ZONE,
+ )
+
+ handlers = [mock.Mock()]
+ metric = self._make_one(mock.Mock(), handlers=handlers)
+ assert metric.cluster_id is None
+ assert metric.zone is None
+ metric.end_with_status(mock.Mock())
+ assert metric.state == State.COMPLETED
+ # check that finalized operation was passed to handlers
+ for h in handlers:
+ assert h.on_operation_complete.call_count == 1
+ called_with = h.on_operation_complete.call_args[0][0]
+ assert called_with.cluster_id == DEFAULT_CLUSTER_ID
+ assert called_with.zone == DEFAULT_ZONE
+
+ def test_end_with_success(self):
+ """
+ end with success should be a pass-through helper for end_with_status
+ """
+ from grpc import StatusCode
+
+ inner_result = object()
+
+ metric = self._make_one(mock.Mock())
+ with mock.patch.object(metric, "end_with_status") as mock_end_with_status:
+ mock_end_with_status.return_value = inner_result
+ got_result = metric.end_with_success()
+ assert mock_end_with_status.call_count == 1
+ assert mock_end_with_status.call_args[0][0] == StatusCode.OK
+ assert got_result is inner_result
+
+ def test_end_on_empty_operation(self):
+ """
+ Should be able to end an operation without any attempts
+ """
+ from grpc import StatusCode
+
+ handlers = [mock.Mock()]
+ metric = self._make_one(mock.Mock(), handlers=handlers)
+ metric.end_with_success()
+ assert metric.state == State.COMPLETED
+ final_op = handlers[0].on_operation_complete.call_args[0][0]
+ assert final_op.final_status == StatusCode.OK
+ assert final_op.completed_attempts == []
+
+ def test__exc_to_status(self):
+ """
+ Should return grpc_status_code if grpc error, otherwise UNKNOWN
+
+ If BigtableExceptionGroup, use the most recent exception in the group
+ """
+ from grpc import StatusCode
+ from google.api_core import exceptions as core_exc
+ from google.cloud.bigtable.data import exceptions as bt_exc
+
+ cls = type(self._make_one(object()))
+ # unknown for non-grpc errors
+ assert cls._exc_to_status(ValueError()) == StatusCode.UNKNOWN
+ assert cls._exc_to_status(RuntimeError()) == StatusCode.UNKNOWN
+ # grpc status code for grpc errors
+ assert (
+ cls._exc_to_status(core_exc.InvalidArgument("msg"))
+ == StatusCode.INVALID_ARGUMENT
+ )
+ assert cls._exc_to_status(core_exc.NotFound("msg")) == StatusCode.NOT_FOUND
+ assert (
+ cls._exc_to_status(core_exc.AlreadyExists("msg"))
+ == StatusCode.ALREADY_EXISTS
+ )
+ assert (
+ cls._exc_to_status(core_exc.PermissionDenied("msg"))
+ == StatusCode.PERMISSION_DENIED
+ )
+ cause_exc = core_exc.AlreadyExists("msg")
+ w_cause = core_exc.DeadlineExceeded("msg")
+ w_cause.__cause__ = cause_exc
+ assert cls._exc_to_status(w_cause) == StatusCode.DEADLINE_EXCEEDED
+ # use cause if available
+ w_cause = ValueError("msg")
+ w_cause.__cause__ = cause_exc
+ cause_exc.grpc_status_code = object()
+ custom_excs = [
+ bt_exc.FailedMutationEntryError(1, mock.Mock(), cause=cause_exc),
+ bt_exc.FailedQueryShardError(1, {}, cause=cause_exc),
+ w_cause,
+ ]
+ for exc in custom_excs:
+ assert cls._exc_to_status(exc) == cause_exc.grpc_status_code, exc
+ # extract most recent exception for bigtable exception groups
+ exc_groups = [
+ bt_exc._BigtableExceptionGroup("", [ValueError(), cause_exc]),
+ bt_exc.RetryExceptionGroup([RuntimeError(), cause_exc]),
+ bt_exc.ShardedReadRowsExceptionGroup(
+ [bt_exc.FailedQueryShardError(1, {}, cause=cause_exc)], [], 2
+ ),
+ bt_exc.MutationsExceptionGroup(
+ [bt_exc.FailedMutationEntryError(1, mock.Mock(), cause=cause_exc)], 2
+ ),
+ ]
+ for exc in exc_groups:
+ assert cls._exc_to_status(exc) == cause_exc.grpc_status_code, exc
+
+ def test__handle_error(self):
+ """
+ handle_error should write log
+ """
+ input_message = "test message"
+ expected_message = f"Error in Bigtable Metrics: {input_message}"
+ with mock.patch(
+ "google.cloud.bigtable.data._metrics.data_model.LOGGER"
+ ) as logger_mock:
+ type(self._make_one(object()))._handle_error(input_message)
+ assert logger_mock.warning.call_count == 1
+ assert logger_mock.warning.call_args[0][0] == expected_message
+ assert len(logger_mock.warning.call_args[0]) == 1
+
+ @pytest.mark.asyncio
+ async def test_context_manager(self):
+ """
+ Should implement context manager protocol
+ """
+ metric = self._make_one(object())
+ with mock.patch.object(metric, "end_with_success") as end_with_success_mock:
+ end_with_success_mock.side_effect = lambda: metric.end_with_status(object())
+ with metric as context:
+ assert context == metric
+ # inside context manager, still active
+ assert end_with_success_mock.call_count == 0
+ assert metric.state == State.CREATED
+ # outside context manager, should be ended
+ assert end_with_success_mock.call_count == 1
+ assert metric.state == State.COMPLETED
+
+ @pytest.mark.asyncio
+ async def test_context_manager_exception(self):
+ """
+ Exception within context manager causes end_with_status to be called with error
+ """
+ expected_exc = ValueError("expected")
+ metric = self._make_one(object())
+ with mock.patch.object(metric, "end_with_status") as end_with_status_mock:
+ try:
+ with metric:
+ # inside context manager, still active
+ assert end_with_status_mock.call_count == 0
+ assert metric.state == State.CREATED
+ raise expected_exc
+ except ValueError as e:
+ assert e == expected_exc
+ # outside context manager, should be ended
+ assert end_with_status_mock.call_count == 1
+ assert end_with_status_mock.call_args[0][0] == expected_exc
diff --git a/tests/unit/data/_metrics/test_metrics_controller.py b/tests/unit/data/_metrics/test_metrics_controller.py
new file mode 100644
index 000000000..125c2be1c
--- /dev/null
+++ b/tests/unit/data/_metrics/test_metrics_controller.py
@@ -0,0 +1,96 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+
+
+class TestBigtableClientSideMetricsController:
+ def _make_one(self, *args, **kwargs):
+ from google.cloud.bigtable.data._metrics import (
+ BigtableClientSideMetricsController,
+ )
+
+ return BigtableClientSideMetricsController(*args, **kwargs)
+
+ def test_ctor_defaults(self):
+ """
+ should create instance with GCP Exporter handler by default
+ """
+ instance = self._make_one()
+ assert len(instance.handlers) == 0
+
+ def ctor_custom_handlers(self):
+ """
+ if handlers are passed to init, use those instead
+ """
+ custom_handler = object()
+ custom_interceptor = object()
+ controller = self._make_one(custom_interceptor, handlers=[custom_handler])
+ assert controller.interceptor == custom_interceptor
+ assert len(controller.handlers) == 1
+ assert controller.handlers[0] is custom_handler
+
+ def test_add_handler(self):
+ """
+ New handlers should be added to list
+ """
+ controller = self._make_one(handlers=[object()])
+ initial_handler_count = len(controller.handlers)
+ new_handler = object()
+ controller.add_handler(new_handler)
+ assert len(controller.handlers) == initial_handler_count + 1
+ assert controller.handlers[-1] is new_handler
+
+ def test_create_operation_mock(self):
+ """
+ All args should be passed through, as well as the handlers
+ """
+ from google.cloud.bigtable.data._metrics import ActiveOperationMetric
+
+ controller = self._make_one(handlers=[object()])
+ arg = object()
+ kwargs = {"a": 1, "b": 2}
+ with mock.patch(
+ "google.cloud.bigtable.data._metrics.ActiveOperationMetric.__init__"
+ ) as mock_op:
+ mock_op.return_value = None
+ op = controller.create_operation(arg, **kwargs)
+ assert isinstance(op, ActiveOperationMetric)
+ assert mock_op.call_count == 1
+ mock_op.assert_called_with(arg, **kwargs, handlers=controller.handlers)
+
+ def test_create_operation(self):
+ from google.cloud.bigtable.data._metrics import ActiveOperationMetric
+
+ handler = object()
+ expected_type = object()
+ expected_is_streaming = True
+ expected_zone = object()
+ controller = self._make_one(handlers=[handler])
+ op = controller.create_operation(
+ expected_type, is_streaming=expected_is_streaming, zone=expected_zone
+ )
+ assert isinstance(op, ActiveOperationMetric)
+ assert op.op_type is expected_type
+ assert op.is_streaming is expected_is_streaming
+ assert op.zone is expected_zone
+ assert len(op.handlers) == 1
+ assert op.handlers[0] is handler
+
+ def test_close(self):
+ handlers = [mock.Mock() for _ in range(3)]
+ controller = self._make_one(handlers=handlers)
+ controller.close()
+ for handler in handlers:
+ handler.close.assert_called_once()
diff --git a/tests/unit/data/_metrics/test_tracked_retry.py b/tests/unit/data/_metrics/test_tracked_retry.py
new file mode 100644
index 000000000..39713dc69
--- /dev/null
+++ b/tests/unit/data/_metrics/test_tracked_retry.py
@@ -0,0 +1,232 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+import inspect
+import mock
+import sys
+from grpc import StatusCode
+from google.api_core import exceptions as core_exceptions
+from google.api_core.retry import RetryFailureReason
+import google.api_core.retry as retry_module
+
+
+class TestTrackRetryableError:
+ def _call_fut(self, operation):
+ from google.cloud.bigtable.data._metrics.tracked_retry import (
+ _track_retryable_error,
+ )
+
+ return _track_retryable_error(operation)
+
+ def test_basic_exception(self):
+ """should call operation.end_attempt_with_status with the exception for basic exceptions."""
+ operation = mock.Mock()
+ wrapper = self._call_fut(operation)
+
+ exc = RuntimeError("test")
+ wrapper(exc)
+
+ operation.end_attempt_with_status.assert_called_once_with(exc)
+
+ def test_mutate_rows_incomplete(self):
+ """should call operation.end_attempt_with_status with StatusCode.OK for _MutateRowsIncomplete exceptions."""
+ from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete
+
+ operation = mock.Mock()
+ wrapper = self._call_fut(operation)
+
+ exc = _MutateRowsIncomplete("test")
+ wrapper(exc)
+
+ operation.end_attempt_with_status.assert_called_once_with(StatusCode.OK)
+
+ def test_rpc_error_metadata(self):
+ """should extract and add metadata from GoogleAPICallError."""
+ operation = mock.Mock()
+ wrapper = self._call_fut(operation)
+
+ rpc_error = mock.Mock()
+ rpc_error.trailing_metadata.return_value = (("key1", "val1"),)
+ rpc_error.initial_metadata.return_value = (("key2", "val2"),)
+
+ exc = core_exceptions.GoogleAPICallError("test", errors=[rpc_error])
+ wrapper(exc)
+
+ operation.add_response_metadata.assert_called_once_with(
+ {"key1": "val1", "key2": "val2"}
+ )
+ operation.end_attempt_with_status.assert_called_once_with(exc)
+
+ def test_metadata_error_ignored(self):
+ """should ignore errors during metadata collection."""
+ operation = mock.Mock()
+ operation.add_response_metadata.side_effect = RuntimeError("metadata error")
+ wrapper = self._call_fut(operation)
+
+ rpc_error = mock.Mock()
+ rpc_error.trailing_metadata.return_value = ()
+ rpc_error.initial_metadata.return_value = ()
+ exc = core_exceptions.GoogleAPICallError("test", errors=[rpc_error])
+
+ # should not raise
+ wrapper(exc)
+
+ operation.end_attempt_with_status.assert_called_once_with(exc)
+
+
+class TestTrackTerminalError:
+ def _call_fut(self, operation, factory):
+ from google.cloud.bigtable.data._metrics.tracked_retry import (
+ _track_terminal_error,
+ )
+
+ return _track_terminal_error(operation, factory)
+
+ def test_basic_pass_through(self):
+ """should call the exception_factory and end the operation with its result."""
+ operation = mock.Mock()
+ factory = mock.Mock()
+ expected_exc = RuntimeError("source")
+ expected_cause = RuntimeError("cause")
+ factory.return_value = (expected_exc, expected_cause)
+
+ wrapper = self._call_fut(operation, factory)
+
+ exc_list = [RuntimeError("attempt1")]
+ reason = RetryFailureReason.TIMEOUT
+ timeout_val = 1.0
+
+ result = wrapper(exc_list, reason, timeout_val)
+
+ assert result == (expected_exc, expected_cause)
+ factory.assert_called_once_with(exc_list, reason, timeout_val)
+ operation.end_with_status.assert_called_once_with(expected_exc)
+
+ def test_timeout_active_attempt(self):
+ """should end attempt if fails on timeout."""
+ from google.cloud.bigtable.data._metrics import OperationState
+
+ operation = mock.Mock()
+ operation.state = OperationState.ACTIVE_ATTEMPT
+ factory = mock.Mock()
+ factory.return_value = (RuntimeError("timeout"), None)
+
+ wrapper = self._call_fut(operation, factory)
+
+ last_exc = RuntimeError("last attempt error")
+ exc_list = [last_exc]
+
+ wrapper(exc_list, RetryFailureReason.TIMEOUT, 1.0)
+
+ # expect call to end_attempt_with_status via the _track_retryable_error logic
+ operation.end_attempt_with_status.assert_called_once_with(last_exc)
+ operation.end_with_status.assert_called_once()
+
+ def test_rpc_error_metadata(self):
+ """should extract and add metadata from GoogleAPICallError in terminal errors."""
+ operation = mock.Mock()
+ factory = mock.Mock()
+
+ rpc_error = mock.Mock()
+ rpc_error.trailing_metadata.return_value = (("k", "v"),)
+ rpc_error.initial_metadata.return_value = ()
+ source_exc = core_exceptions.GoogleAPICallError("test", errors=[rpc_error])
+
+ factory.return_value = (source_exc, None)
+
+ wrapper = self._call_fut(operation, factory)
+ wrapper([], RetryFailureReason.NON_RETRYABLE_ERROR, None)
+
+ operation.add_response_metadata.assert_called_once_with({"k": "v"})
+ operation.end_with_status.assert_called_once_with(source_exc)
+
+
+class TestTrackedRetry:
+ def _call_fut(self, **kwargs):
+ from google.cloud.bigtable.data._metrics.tracked_retry import tracked_retry
+
+ return tracked_retry(**kwargs)
+
+ def test_call_args(self):
+ """should correctly pass arguments to the retry_fn."""
+ operation = mock.Mock()
+ retry_fn = mock.Mock()
+ retry_fn.return_value = "result"
+
+ result = self._call_fut(retry_fn=retry_fn, operation=operation, other_arg=123)
+
+ assert result == "result"
+ retry_fn.assert_called_once()
+ call_kwargs = retry_fn.call_args[1]
+
+ assert call_kwargs["sleep_generator"] == operation.backoff_generator
+ assert "on_error" in call_kwargs
+ assert "exception_factory" in call_kwargs
+ assert call_kwargs["other_arg"] == 123
+
+ def test_tracked_retry_wraps_components(self):
+ """should wrap on_error and exception_factory with tracking logic."""
+ from google.cloud.bigtable.data._metrics import tracked_retry
+
+ module = sys.modules[tracked_retry.__module__]
+
+ with mock.patch.object(module, "_track_retryable_error") as mock_track_retry:
+ with mock.patch.object(
+ module, "_track_terminal_error"
+ ) as mock_track_terminal:
+ operation = mock.Mock()
+ retry_fn = mock.Mock()
+ custom_factory = mock.Mock()
+
+ self._call_fut(
+ retry_fn=retry_fn,
+ operation=operation,
+ exception_factory=custom_factory,
+ arg=1,
+ )
+
+ mock_track_retry.assert_called_once_with(operation)
+ mock_track_terminal.assert_called_once_with(operation, custom_factory)
+
+ retry_fn.assert_called_once_with(
+ sleep_generator=operation.backoff_generator,
+ on_error=mock_track_retry.return_value,
+ exception_factory=mock_track_terminal.return_value,
+ arg=1,
+ )
+
+ @pytest.mark.parametrize(
+ "fn_name,type_verifier",
+ [
+ ("retry_target", callable),
+ ("retry_target_stream", inspect.isgenerator),
+ ("retry_target_async", inspect.iscoroutine),
+ ("retry_target_stream_async", inspect.isasyncgen),
+ ],
+ )
+ def test_wrapping_api_core(self, fn_name, type_verifier):
+ """Test building tracked retry from different supported retry functions"""
+ from google.cloud.bigtable.data._metrics import ActiveOperationMetric
+
+ operation = ActiveOperationMetric("type")
+ fn = getattr(retry_module, fn_name)
+ tracked_retry = self._call_fut(
+ retry_fn=fn,
+ operation=operation,
+ target=mock.Mock(),
+ timeout=None,
+ predicate=lambda x: False,
+ )
+ assert type_verifier(tracked_retry)
diff --git a/tests/unit/data/_sync_autogen/test__swappable_channel.py b/tests/unit/data/_sync_autogen/test__swappable_channel.py
new file mode 100644
index 000000000..04f3f61c8
--- /dev/null
+++ b/tests/unit/data/_sync_autogen/test__swappable_channel.py
@@ -0,0 +1,100 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# try/except added for compatibility with python < 3.8
+
+# This file is automatically generated by CrossSync. Do not edit manually.
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+import pytest
+from grpc import ChannelConnectivity
+from google.cloud.bigtable.data._sync_autogen._swappable_channel import (
+ SwappableChannel as TargetType,
+)
+
+
+class TestSwappableChannel:
+ @staticmethod
+ def _get_target_class():
+ return TargetType
+
+ def _make_one(self, *args, **kwargs):
+ return self._get_target_class()(*args, **kwargs)
+
+ def test_ctor(self):
+ channel_fn = mock.Mock()
+ instance = self._make_one(channel_fn)
+ assert instance._channel_fn == channel_fn
+ channel_fn.assert_called_once_with()
+ assert instance._channel == channel_fn.return_value
+
+ def test_swap_channel(self):
+ channel_fn = mock.Mock()
+ instance = self._make_one(channel_fn)
+ old_channel = instance._channel
+ new_channel = object()
+ result = instance.swap_channel(new_channel)
+ assert result == old_channel
+ assert instance._channel == new_channel
+
+ def test_create_channel(self):
+ channel_fn = mock.Mock()
+ instance = self._make_one(channel_fn)
+ channel_fn.reset_mock()
+ new_channel = instance.create_channel()
+ channel_fn.assert_called_once_with()
+ assert new_channel == channel_fn.return_value
+
+ @pytest.mark.parametrize(
+ "method_name,args,kwargs",
+ [
+ ("unary_unary", (1,), {"kw": 2}),
+ ("unary_stream", (3,), {"kw": 4}),
+ ("stream_unary", (5,), {"kw": 6}),
+ ("stream_stream", (7,), {"kw": 8}),
+ ("get_state", (), {"try_to_connect": True}),
+ ],
+ )
+ def test_forwarded_methods(self, method_name, args, kwargs):
+ channel_fn = mock.Mock()
+ instance = self._make_one(channel_fn)
+ method = getattr(instance, method_name)
+ result = method(*args, **kwargs)
+ mock_method = getattr(channel_fn.return_value, method_name)
+ mock_method.assert_called_once_with(*args, **kwargs)
+ assert result == mock_method.return_value
+
+ @pytest.mark.parametrize(
+ "method_name,args,kwargs",
+ [
+ ("channel_ready", (), {}),
+ ("wait_for_state_change", (ChannelConnectivity.READY,), {}),
+ ],
+ )
+ def test_forwarded_async_methods(self, method_name, args, kwargs):
+ def dummy_coro(*a, **k):
+ return mock.sentinel.result
+
+ channel = mock.Mock()
+ mock_method = getattr(channel, method_name)
+ mock_method.side_effect = dummy_coro
+ channel_fn = mock.Mock(return_value=channel)
+ instance = self._make_one(channel_fn)
+ method = getattr(instance, method_name)
+ result = method(*args, **kwargs)
+ mock_method.assert_called_once_with(*args, **kwargs)
+ assert result == mock.sentinel.result
diff --git a/tests/unit/data/_sync_autogen/test_client.py b/tests/unit/data/_sync_autogen/test_client.py
index 38866c9dd..54be1f17c 100644
--- a/tests/unit/data/_sync_autogen/test_client.py
+++ b/tests/unit/data/_sync_autogen/test_client.py
@@ -25,6 +25,7 @@
from google.cloud.bigtable_v2.types import ReadRowsResponse
from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery
from google.api_core import exceptions as core_exceptions
+from google.api_core import client_options
from google.cloud.bigtable.data.exceptions import InvalidChunk
from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete
from google.cloud.bigtable.data.mutations import DeleteAllFromRow
@@ -45,8 +46,14 @@
str_val,
)
from google.api_core import grpc_helpers
+from google.cloud.bigtable.data._sync_autogen._swappable_channel import SwappableChannel
+from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import (
+ BigtableMetricsInterceptor,
+)
CrossSync._Sync_Impl.add_mapping("grpc_helpers", grpc_helpers)
+CrossSync._Sync_Impl.add_mapping("SwappableChannel", SwappableChannel)
+CrossSync._Sync_Impl.add_mapping("MetricsInterceptor", BigtableMetricsInterceptor)
@CrossSync._Sync_Impl.add_mapping_decorator("TestBigtableDataClient")
@@ -82,6 +89,9 @@ def test_ctor(self):
assert not client._active_instances
assert client._channel_refresh_task is not None
assert client.transport._credentials == expected_credentials
+ assert isinstance(
+ client._metrics_interceptor, CrossSync._Sync_Impl.MetricsInterceptor
+ )
client.close()
def test_ctor_super_inits(self):
@@ -181,6 +191,9 @@ def test__start_background_channel_refresh(self):
client, "_ping_and_warm_instances", CrossSync._Sync_Impl.Mock()
) as ping_and_warm:
client._emulator_host = None
+ client.transport._grpc_channel = CrossSync._Sync_Impl.SwappableChannel(
+ mock.Mock
+ )
client._start_background_channel_refresh()
assert client._channel_refresh_task is not None
assert isinstance(client._channel_refresh_task, CrossSync._Sync_Impl.Task)
@@ -296,36 +309,29 @@ def test__manage_channel_first_sleep(
def test__manage_channel_ping_and_warm(self):
"""_manage channel should call ping and warm internally"""
- import time
import threading
- from google.cloud.bigtable_v2.services.bigtable.transports.grpc import (
- _LoggingClientInterceptor as Interceptor,
- )
- client_mock = mock.Mock()
- client_mock.transport._interceptor = Interceptor()
- client_mock._is_closed.is_set.return_value = False
- client_mock._channel_init_time = time.monotonic()
- orig_channel = client_mock.transport.grpc_channel
+ client = self._make_client(project="project-id", use_emulator=True)
+ orig_channel = client.transport.grpc_channel
sleep_tuple = (
(asyncio, "sleep")
if CrossSync._Sync_Impl.is_async
else (threading.Event, "wait")
)
- with mock.patch.object(*sleep_tuple):
- orig_channel.close.side_effect = asyncio.CancelledError
+ with mock.patch.object(*sleep_tuple) as sleep_mock:
+ sleep_mock.side_effect = [None, asyncio.CancelledError]
ping_and_warm = (
- client_mock._ping_and_warm_instances
+ client._ping_and_warm_instances
) = CrossSync._Sync_Impl.Mock()
try:
- self._get_target_class()._manage_channel(client_mock, 10)
+ client._manage_channel(10)
except asyncio.CancelledError:
pass
assert ping_and_warm.call_count == 2
- assert client_mock.transport._grpc_channel != orig_channel
+ assert client.transport.grpc_channel._channel != orig_channel
called_with = [call[1]["channel"] for call in ping_and_warm.call_args_list]
assert orig_channel in called_with
- assert client_mock.transport.grpc_channel in called_with
+ assert client.transport.grpc_channel._channel in called_with
@pytest.mark.parametrize(
"refresh_interval, num_cycles, expected_sleep",
@@ -335,8 +341,6 @@ def test__manage_channel_sleeps(self, refresh_interval, num_cycles, expected_sle
import time
import random
- channel = mock.Mock()
- channel.close = CrossSync._Sync_Impl.Mock()
with mock.patch.object(random, "uniform") as uniform:
uniform.side_effect = lambda min_, max_: min_
with mock.patch.object(time, "time") as time_mock:
@@ -345,8 +349,7 @@ def test__manage_channel_sleeps(self, refresh_interval, num_cycles, expected_sle
sleep.side_effect = [None for i in range(num_cycles - 1)] + [
asyncio.CancelledError
]
- client = self._make_client(project="project-id")
- client.transport._grpc_channel = channel
+ client = self._make_client(project="project-id", use_emulator=True)
with mock.patch.object(
client.transport, "create_channel", CrossSync._Sync_Impl.Mock
):
@@ -399,25 +402,26 @@ def test__manage_channel_refresh(self, num_cycles):
expected_refresh = 0.5
grpc_lib = grpc.aio if CrossSync._Sync_Impl.is_async else grpc
new_channel = grpc_lib.insecure_channel("localhost:8080")
+ create_channel_mock = mock.Mock()
+ create_channel_mock.return_value = new_channel
+ refreshable_channel = CrossSync._Sync_Impl.SwappableChannel(create_channel_mock)
with mock.patch.object(CrossSync._Sync_Impl, "event_wait") as sleep:
sleep.side_effect = [None for i in range(num_cycles)] + [RuntimeError]
- with mock.patch.object(
- CrossSync._Sync_Impl.grpc_helpers, "create_channel"
- ) as create_channel:
- create_channel.return_value = new_channel
- client = self._make_client(project="project-id")
- create_channel.reset_mock()
- try:
- client._manage_channel(
- refresh_interval_min=expected_refresh,
- refresh_interval_max=expected_refresh,
- grace_period=0,
- )
- except RuntimeError:
- pass
- assert sleep.call_count == num_cycles + 1
- assert create_channel.call_count == num_cycles
- client.close()
+ client = self._make_client(project="project-id")
+ client.transport._grpc_channel = refreshable_channel
+ create_channel_mock.reset_mock()
+ sleep.reset_mock()
+ try:
+ client._manage_channel(
+ refresh_interval_min=expected_refresh,
+ refresh_interval_max=expected_refresh,
+ grace_period=0,
+ )
+ except RuntimeError:
+ pass
+ assert sleep.call_count == num_cycles + 1
+ assert create_channel_mock.call_count == num_cycles
+ client.close()
def test__register_instance(self):
"""test instance registration"""
@@ -431,7 +435,7 @@ def test__register_instance(self):
client_mock._ping_and_warm_instances = CrossSync._Sync_Impl.Mock()
table_mock = mock.Mock()
self._get_target_class()._register_instance(
- client_mock, "instance-1", table_mock
+ client_mock, "instance-1", table_mock.app_profile_id, id(table_mock)
)
assert client_mock._start_background_channel_refresh.call_count == 1
expected_key = ("prefix/instance-1", table_mock.app_profile_id)
@@ -442,7 +446,7 @@ def test__register_instance(self):
client_mock._channel_refresh_task = mock.Mock()
table_mock2 = mock.Mock()
self._get_target_class()._register_instance(
- client_mock, "instance-2", table_mock2
+ client_mock, "instance-2", table_mock2.app_profile_id, id(table_mock2)
)
assert client_mock._start_background_channel_refresh.call_count == 1
assert (
@@ -481,7 +485,7 @@ def test__register_instance_duplicate(self):
table_mock = mock.Mock()
expected_key = ("prefix/instance-1", table_mock.app_profile_id)
self._get_target_class()._register_instance(
- client_mock, "instance-1", table_mock
+ client_mock, "instance-1", table_mock.app_profile_id, id(table_mock)
)
assert len(active_instances) == 1
assert expected_key == tuple(list(active_instances)[0])
@@ -489,7 +493,7 @@ def test__register_instance_duplicate(self):
assert expected_key == tuple(list(instance_owners)[0])
assert client_mock._ping_and_warm_instances.call_count == 1
self._get_target_class()._register_instance(
- client_mock, "instance-1", table_mock
+ client_mock, "instance-1", table_mock.app_profile_id, id(table_mock)
)
assert len(active_instances) == 1
assert expected_key == tuple(list(active_instances)[0])
@@ -526,7 +530,7 @@ def test__register_instance_state(
for instance, profile in insert_instances:
table_mock.app_profile_id = profile
self._get_target_class()._register_instance(
- client_mock, instance, table_mock
+ client_mock, instance, profile, id(table_mock)
)
assert len(active_instances) == len(expected_active)
assert len(instance_owners) == len(expected_owner_keys)
@@ -548,8 +552,8 @@ def test__register_instance_state(
def test__remove_instance_registration(self):
client = self._make_client(project="project-id")
table = mock.Mock()
- client._register_instance("instance-1", table)
- client._register_instance("instance-2", table)
+ client._register_instance("instance-1", table.app_profile_id, id(table))
+ client._register_instance("instance-2", table.app_profile_id, id(table))
assert len(client._active_instances) == 2
assert len(client._instance_owners.keys()) == 2
instance_1_path = client._gapic_client.instance_path(
@@ -564,13 +568,15 @@ def test__remove_instance_registration(self):
assert list(client._instance_owners[instance_1_key])[0] == id(table)
assert len(client._instance_owners[instance_2_key]) == 1
assert list(client._instance_owners[instance_2_key])[0] == id(table)
- success = client._remove_instance_registration("instance-1", table)
+ success = client._remove_instance_registration(
+ "instance-1", table.app_profile_id, id(table)
+ )
assert success
assert len(client._active_instances) == 1
assert len(client._instance_owners[instance_1_key]) == 0
assert len(client._instance_owners[instance_2_key]) == 1
assert client._active_instances == {instance_2_key}
- success = client._remove_instance_registration("fake-key", table)
+ success = client._remove_instance_registration("fake-key", "profile", id(table))
assert not success
assert len(client._active_instances) == 1
client.close()
@@ -836,6 +842,80 @@ def test_context_manager(self):
close_mock.assert_called_once()
true_close()
+ def test_default_universe_domain(self):
+ """When not passed, universe_domain should default to googleapis.com"""
+ with self._make_client(project="project-id", credentials=None) as client:
+ assert client.universe_domain == "googleapis.com"
+ assert client.api_endpoint == "bigtable.googleapis.com"
+
+ def test_custom_universe_domain(self):
+ """test with a customized universe domain value and emulator enabled"""
+ universe_domain = "test-universe.test"
+ options = client_options.ClientOptions(universe_domain=universe_domain)
+ with self._make_client(
+ project="project_id",
+ client_options=options,
+ use_emulator=True,
+ credentials=None,
+ ) as client:
+ assert client.universe_domain == universe_domain
+ assert client.api_endpoint == f"bigtable.{universe_domain}"
+
+ def test_configured_universe_domain_matches_GDU(self):
+ """that configured universe domain succeeds with matched GDU credentials."""
+ universe_domain = "googleapis.com"
+ options = client_options.ClientOptions(universe_domain=universe_domain)
+ with self._make_client(
+ project="project_id", client_options=options, credentials=None
+ ) as client:
+ assert client.universe_domain == "googleapis.com"
+ assert client.api_endpoint == "bigtable.googleapis.com"
+
+ def test_credential_universe_domain_matches_GDU(self):
+ """Test with credentials"""
+ creds = AnonymousCredentials()
+ creds._universe_domain = "googleapis.com"
+ with self._make_client(project="project_id", credentials=creds) as client:
+ assert client.universe_domain == "googleapis.com"
+ assert client.api_endpoint == "bigtable.googleapis.com"
+
+ def test_anomynous_credential_universe_domain(self):
+ """Anomynopus credentials should use default universe domain"""
+ creds = AnonymousCredentials()
+ with self._make_client(project="project_id", credentials=creds) as client:
+ assert client.universe_domain == "googleapis.com"
+ assert client.api_endpoint == "bigtable.googleapis.com"
+
+ def test_configured_universe_domain_mismatched_credentials(self):
+ """Test that configured universe domain errors with mismatched universe
+ domain credentials."""
+ universe_domain = "test-universe.test"
+ options = client_options.ClientOptions(universe_domain=universe_domain)
+ creds = AnonymousCredentials()
+ creds._universe_domain = "different-universe"
+ with pytest.raises(ValueError) as exc:
+ self._make_client(
+ project="project_id",
+ client_options=options,
+ use_emulator=False,
+ credentials=creds,
+ )
+ err_msg = f"The configured universe domain ({universe_domain}) does not match the universe domain found in the credentials ({creds.universe_domain}). If you haven't configured the universe domain explicitly, `googleapis.com` is the default."
+ assert exc.value.args[0] == err_msg
+
+ def test_configured_universe_domain_matches_credentials(self):
+ """Test that configured universe domain succeeds with matching universe
+ domain credentials."""
+ universe_domain = "test-universe.test"
+ options = client_options.ClientOptions(universe_domain=universe_domain)
+ creds = AnonymousCredentials()
+ creds._universe_domain = universe_domain
+ with self._make_client(
+ project="project_id", credentials=creds, client_options=options
+ ) as client:
+ assert client.universe_domain == universe_domain
+ assert client.api_endpoint == f"bigtable.{universe_domain}"
+
@CrossSync._Sync_Impl.add_mapping_decorator("TestTable")
class TestTable:
@@ -860,6 +940,9 @@ def _make_one(
def test_ctor(self):
from google.cloud.bigtable.data._helpers import _WarmedInstanceKey
+ from google.cloud.bigtable.data._metrics import (
+ BigtableClientSideMetricsController,
+ )
expected_table_id = "table-id"
expected_instance_id = "instance-id"
@@ -900,6 +983,7 @@ def test_ctor(self):
instance_key = _WarmedInstanceKey(table.instance_name, table.app_profile_id)
assert instance_key in client._active_instances
assert client._instance_owners[instance_key] == {id(table)}
+ assert isinstance(table._metrics, BigtableClientSideMetricsController)
assert table.default_operation_timeout == expected_operation_timeout
assert table.default_attempt_timeout == expected_attempt_timeout
assert (
@@ -990,6 +1074,7 @@ def test_ctor_invalid_timeout_values(self):
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
core_exceptions.Aborted,
+ core_exceptions.Cancelled,
],
),
(
@@ -1085,16 +1170,26 @@ def test_call_metadata(self, include_app_profile, fn_name, fn_args, gapic_fn):
assert len(metadata) == 1
assert metadata[0][0] == "x-goog-request-params"
routing_str = metadata[0][1]
- assert self._expected_routing_header(table) in routing_str
+ assert f"table_name={table.table_name}" in routing_str
if include_app_profile:
assert "app_profile_id=profile" in routing_str
else:
assert "app_profile_id=" in routing_str
- @staticmethod
- def _expected_routing_header(table):
- """the expected routing header for this _ApiSurface type"""
- return f"table_name={table.table_name}"
+ def test_close(self):
+ client = self._make_client()
+ table = self._make_one(client)
+ with mock.patch.object(
+ table._metrics, "close", mock.Mock()
+ ) as metric_close_mock:
+ with mock.patch.object(
+ client, "_remove_instance_registration"
+ ) as remove_mock:
+ table.close()
+ remove_mock.assert_called_once_with(
+ table.instance_id, table.app_profile_id, id(table)
+ )
+ metric_close_mock.assert_called_once()
@CrossSync._Sync_Impl.add_mapping_decorator("TestAuthorizedView")
@@ -1120,13 +1215,11 @@ def _make_one(
client, instance_id, table_id, view_id, app_profile_id, **kwargs
)
- @staticmethod
- def _expected_routing_header(view):
- """the expected routing header for this _ApiSurface type"""
- return f"authorized_view_name={view.authorized_view_name}"
-
def test_ctor(self):
from google.cloud.bigtable.data._helpers import _WarmedInstanceKey
+ from google.cloud.bigtable.data._metrics import (
+ BigtableClientSideMetricsController,
+ )
expected_table_id = "table-id"
expected_instance_id = "instance-id"
@@ -1174,6 +1267,7 @@ def test_ctor(self):
instance_key = _WarmedInstanceKey(view.instance_name, view.app_profile_id)
assert instance_key in client._active_instances
assert client._instance_owners[instance_key] == {id(view)}
+ assert isinstance(view._metrics, BigtableClientSideMetricsController)
assert view.default_operation_timeout == expected_operation_timeout
assert view.default_attempt_timeout == expected_attempt_timeout
assert (
@@ -1369,8 +1463,7 @@ def test_read_rows_timeout(self, operation_timeout):
)
@pytest.mark.parametrize(
- "per_request_t, operation_t, expected_num",
- [(0.05, 0.08, 2), (0.05, 0.14, 3), (0.05, 0.24, 5)],
+ "per_request_t, operation_t, expected_num", [(0.1, 0.19, 2), (0.1, 0.29, 3)]
)
def test_read_rows_attempt_timeout(self, per_request_t, operation_t, expected_num):
"""Ensures that the attempt_timeout is respected and that the number of
@@ -1444,7 +1537,6 @@ def test_read_rows_retryable_error(self, exc_type):
@pytest.mark.parametrize(
"exc_type",
[
- core_exceptions.Cancelled,
core_exceptions.PreconditionFailed,
core_exceptions.NotFound,
core_exceptions.PermissionDenied,
diff --git a/tests/unit/data/_sync_autogen/test_metrics_interceptor.py b/tests/unit/data/_sync_autogen/test_metrics_interceptor.py
new file mode 100644
index 000000000..c4efcc5b9
--- /dev/null
+++ b/tests/unit/data/_sync_autogen/test_metrics_interceptor.py
@@ -0,0 +1,307 @@
+# Copyright 2025 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# This file is automatically generated by CrossSync. Do not edit manually.
+
+import pytest
+from grpc import RpcError
+from grpc import ClientCallDetails
+from google.cloud.bigtable.data._metrics.data_model import ActiveOperationMetric
+from google.cloud.bigtable.data._metrics.data_model import OperationState
+from google.cloud.bigtable.data._cross_sync import CrossSync
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import (
+ BigtableMetricsInterceptor,
+)
+
+
+def _make_mock_stream_call(values, exc=None):
+ """Create a mock call object that can be used for streaming calls"""
+ call = CrossSync._Sync_Impl.Mock()
+
+ def gen():
+ for val in values:
+ yield val
+ if exc:
+ raise exc
+
+ call.__iter__ = mock.Mock(return_value=gen())
+ return call
+
+
+class TestMetricsInterceptor:
+ @staticmethod
+ def _get_target_class():
+ return BigtableMetricsInterceptor
+
+ def _make_one(self, *args, **kwargs):
+ return self._get_target_class()(*args, **kwargs)
+
+ def test_unary_unary_interceptor_op_not_found(self):
+ """Test that interceptor call continuation if op is not found"""
+ instance = self._make_one()
+ continuation = CrossSync._Sync_Impl.Mock()
+ details = ClientCallDetails()
+ details.metadata = []
+ request = mock.Mock()
+ instance.intercept_unary_unary(continuation, details, request)
+ continuation.assert_called_once_with(details, request)
+
+ def test_unary_unary_interceptor_success(self):
+ """Test that interceptor handles successful unary-unary calls"""
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = OperationState.ACTIVE_ATTEMPT
+ ActiveOperationMetric._active_operation_context.set(op)
+ continuation = CrossSync._Sync_Impl.Mock()
+ call = continuation.return_value
+ call.trailing_metadata = CrossSync._Sync_Impl.Mock(return_value=[("a", "b")])
+ call.initial_metadata = CrossSync._Sync_Impl.Mock(return_value=[("c", "d")])
+ details = ClientCallDetails()
+ request = mock.Mock()
+ result = instance.intercept_unary_unary(continuation, details, request)
+ assert result == call
+ continuation.assert_called_once_with(details, request)
+ op.add_response_metadata.assert_called_once_with({"a": "b", "c": "d"})
+ op.end_attempt_with_status.assert_not_called()
+
+ def test_unary_unary_interceptor_failure(self):
+ """test a failed RpcError with metadata"""
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = OperationState.ACTIVE_ATTEMPT
+ ActiveOperationMetric._active_operation_context.set(op)
+ exc = RpcError("test")
+ exc.trailing_metadata = CrossSync._Sync_Impl.Mock(return_value=[("a", "b")])
+ exc.initial_metadata = CrossSync._Sync_Impl.Mock(return_value=[("c", "d")])
+ continuation = CrossSync._Sync_Impl.Mock(side_effect=exc)
+ details = ClientCallDetails()
+ request = mock.Mock()
+ with pytest.raises(RpcError) as e:
+ instance.intercept_unary_unary(continuation, details, request)
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+ op.add_response_metadata.assert_called_once_with({"a": "b", "c": "d"})
+
+ def test_unary_unary_interceptor_failure_no_metadata(self):
+ """test with RpcError without without metadata attached"""
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = OperationState.ACTIVE_ATTEMPT
+ ActiveOperationMetric._active_operation_context.set(op)
+ exc = RpcError("test")
+ continuation = CrossSync._Sync_Impl.Mock(side_effect=exc)
+ call = continuation.return_value
+ call.trailing_metadata = CrossSync._Sync_Impl.Mock(return_value=[("a", "b")])
+ call.initial_metadata = CrossSync._Sync_Impl.Mock(return_value=[("c", "d")])
+ details = ClientCallDetails()
+ request = mock.Mock()
+ with pytest.raises(RpcError) as e:
+ instance.intercept_unary_unary(continuation, details, request)
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+ op.add_response_metadata.assert_not_called()
+
+ def test_unary_unary_interceptor_failure_generic(self):
+ """test generic exception"""
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = OperationState.ACTIVE_ATTEMPT
+ ActiveOperationMetric._active_operation_context.set(op)
+ exc = ValueError("test")
+ continuation = CrossSync._Sync_Impl.Mock(side_effect=exc)
+ call = continuation.return_value
+ call.trailing_metadata = CrossSync._Sync_Impl.Mock(return_value=[("a", "b")])
+ call.initial_metadata = CrossSync._Sync_Impl.Mock(return_value=[("c", "d")])
+ details = ClientCallDetails()
+ request = mock.Mock()
+ with pytest.raises(ValueError) as e:
+ instance.intercept_unary_unary(continuation, details, request)
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+ op.add_response_metadata.assert_not_called()
+
+ def test_unary_stream_interceptor_op_not_found(self):
+ """Test that interceptor calls continuation if op is not found"""
+ instance = self._make_one()
+ continuation = CrossSync._Sync_Impl.Mock()
+ details = ClientCallDetails()
+ details.metadata = []
+ request = mock.Mock()
+ instance.intercept_unary_stream(continuation, details, request)
+ continuation.assert_called_once_with(details, request)
+
+ def test_unary_stream_interceptor_success(self):
+ """Test that interceptor handles successful unary-stream calls"""
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = OperationState.ACTIVE_ATTEMPT
+ op.start_time_ns = 0
+ op.first_response_latency = None
+ ActiveOperationMetric._active_operation_context.set(op)
+ continuation = CrossSync._Sync_Impl.Mock(
+ return_value=_make_mock_stream_call([1, 2])
+ )
+ call = continuation.return_value
+ call.trailing_metadata = CrossSync._Sync_Impl.Mock(return_value=[("a", "b")])
+ call.initial_metadata = CrossSync._Sync_Impl.Mock(return_value=[("c", "d")])
+ details = ClientCallDetails()
+ request = mock.Mock()
+ wrapper = instance.intercept_unary_stream(continuation, details, request)
+ results = [val for val in wrapper]
+ assert results == [1, 2]
+ continuation.assert_called_once_with(details, request)
+ assert op.first_response_latency_ns is not None
+ op.add_response_metadata.assert_called_once_with({"a": "b", "c": "d"})
+ op.end_attempt_with_status.assert_not_called()
+
+ def test_unary_stream_interceptor_failure_mid_stream(self):
+ """Test that interceptor handles failures mid-stream"""
+ from grpc.aio import AioRpcError, Metadata
+
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = OperationState.ACTIVE_ATTEMPT
+ op.start_time_ns = 0
+ op.first_response_latency = None
+ ActiveOperationMetric._active_operation_context.set(op)
+ exc = AioRpcError(0, Metadata(), Metadata(("a", "b"), ("c", "d")))
+ continuation = CrossSync._Sync_Impl.Mock(
+ return_value=_make_mock_stream_call([1], exc=exc)
+ )
+ details = ClientCallDetails()
+ request = mock.Mock()
+ wrapper = instance.intercept_unary_stream(continuation, details, request)
+ with pytest.raises(AioRpcError) as e:
+ [val for val in wrapper]
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+ assert op.first_response_latency_ns is not None
+ op.add_response_metadata.assert_called_once_with({"a": "b", "c": "d"})
+
+ def test_unary_stream_interceptor_failure_start_stream(self):
+ """Test that interceptor handles failures at start of stream with RpcError with metadata"""
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = OperationState.ACTIVE_ATTEMPT
+ op.start_time_ns = 0
+ op.first_response_latency = None
+ ActiveOperationMetric._active_operation_context.set(op)
+ exc = RpcError("test")
+ exc.trailing_metadata = CrossSync._Sync_Impl.Mock(return_value=[("a", "b")])
+ exc.initial_metadata = CrossSync._Sync_Impl.Mock(return_value=[("c", "d")])
+ continuation = CrossSync._Sync_Impl.Mock()
+ continuation.side_effect = exc
+ details = ClientCallDetails()
+ request = mock.Mock()
+ with pytest.raises(RpcError) as e:
+ instance.intercept_unary_stream(continuation, details, request)
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+ assert op.first_response_latency_ns is not None
+ op.add_response_metadata.assert_called_once_with({"a": "b", "c": "d"})
+
+ def test_unary_stream_interceptor_failure_start_stream_no_metadata(self):
+ """Test that interceptor handles failures at start of stream with RpcError with no metadata"""
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = OperationState.ACTIVE_ATTEMPT
+ op.start_time_ns = 0
+ op.first_response_latency = None
+ ActiveOperationMetric._active_operation_context.set(op)
+ exc = RpcError("test")
+ continuation = CrossSync._Sync_Impl.Mock()
+ continuation.side_effect = exc
+ details = ClientCallDetails()
+ request = mock.Mock()
+ with pytest.raises(RpcError) as e:
+ instance.intercept_unary_stream(continuation, details, request)
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+ assert op.first_response_latency_ns is not None
+ op.add_response_metadata.assert_not_called()
+
+ def test_unary_stream_interceptor_failure_start_stream_generic(self):
+ """Test that interceptor handles failures at start of stream with generic exception"""
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = OperationState.ACTIVE_ATTEMPT
+ op.start_time_ns = 0
+ op.first_response_latency = None
+ ActiveOperationMetric._active_operation_context.set(op)
+ exc = ValueError("test")
+ continuation = CrossSync._Sync_Impl.Mock()
+ continuation.side_effect = exc
+ details = ClientCallDetails()
+ request = mock.Mock()
+ with pytest.raises(ValueError) as e:
+ instance.intercept_unary_stream(continuation, details, request)
+ assert e.value == exc
+ continuation.assert_called_once_with(details, request)
+ assert op.first_response_latency_ns is not None
+ op.add_response_metadata.assert_not_called()
+
+ @pytest.mark.parametrize(
+ "initial_state", [OperationState.CREATED, OperationState.BETWEEN_ATTEMPTS]
+ )
+ def test_unary_unary_interceptor_start_operation(self, initial_state):
+ """if called with a newly created operation, it should be started"""
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = initial_state
+ ActiveOperationMetric._active_operation_context.set(op)
+ continuation = CrossSync._Sync_Impl.Mock()
+ call = continuation.return_value
+ call.trailing_metadata = CrossSync._Sync_Impl.Mock(return_value=[])
+ call.initial_metadata = CrossSync._Sync_Impl.Mock(return_value=[])
+ details = ClientCallDetails()
+ request = mock.Mock()
+ instance.intercept_unary_unary(continuation, details, request)
+ op.start_attempt.assert_called_once()
+
+ @pytest.mark.parametrize(
+ "initial_state", [OperationState.CREATED, OperationState.BETWEEN_ATTEMPTS]
+ )
+ def test_unary_stream_interceptor_start_operation(self, initial_state):
+ """if called with a newly created operation, it should be started"""
+ instance = self._make_one()
+ op = mock.Mock()
+ op.uuid = "test-uuid"
+ op.state = initial_state
+ ActiveOperationMetric._active_operation_context.set(op)
+ continuation = CrossSync._Sync_Impl.Mock(
+ return_value=_make_mock_stream_call([1, 2])
+ )
+ call = continuation.return_value
+ call.trailing_metadata = CrossSync._Sync_Impl.Mock(return_value=[])
+ call.initial_metadata = CrossSync._Sync_Impl.Mock(return_value=[])
+ details = ClientCallDetails()
+ request = mock.Mock()
+ instance.intercept_unary_stream(continuation, details, request)
+ op.start_attempt.assert_called_once()
diff --git a/tests/unit/data/_sync_autogen/test_mutations_batcher.py b/tests/unit/data/_sync_autogen/test_mutations_batcher.py
index 72db64146..92d16b349 100644
--- a/tests/unit/data/_sync_autogen/test_mutations_batcher.py
+++ b/tests/unit/data/_sync_autogen/test_mutations_batcher.py
@@ -1021,6 +1021,7 @@ def test__add_exceptions(self, limit, in_e, start_e, end_e):
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
core_exceptions.Aborted,
+ core_exceptions.Cancelled,
],
),
(
diff --git a/tests/unit/data/execute_query/_async/test_query_iterator.py b/tests/unit/data/execute_query/_async/test_query_iterator.py
index 982365556..df6321f7f 100644
--- a/tests/unit/data/execute_query/_async/test_query_iterator.py
+++ b/tests/unit/data/execute_query/_async/test_query_iterator.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import gc
from google.cloud.bigtable.data import exceptions
from google.cloud.bigtable.data.execute_query.metadata import (
_pb_metadata_to_metadata_types,
@@ -284,3 +285,123 @@ async def test_iterator_returns_error_if_metadata_requested_too_early(
with pytest.raises(exceptions.EarlyMetadataCallError):
iterator.metadata
+
+ @CrossSync.pytest
+ async def test_iterator_closes_on_full_consumption(self, proto_byte_stream):
+ """
+ Tests that the iterator's close() method is called after all results
+ have been successfully consumed.
+ """
+ client_mock = mock.Mock()
+ client_mock._register_instance = CrossSync.Mock()
+ client_mock._remove_instance_registration = CrossSync.Mock()
+ client_mock._executor = concurrent.futures.ThreadPoolExecutor()
+ mock_async_iterator = MockIterator(proto_byte_stream)
+
+ with mock.patch.object(
+ CrossSync, "retry_target_stream", return_value=mock_async_iterator
+ ):
+ iterator = self._make_one(
+ client=client_mock,
+ instance_id="test-instance",
+ app_profile_id="test_profile",
+ request_body={},
+ prepare_metadata=_pb_metadata_to_metadata_types(
+ metadata(
+ column("test1", int64_type()), column("test2", int64_type())
+ )
+ ),
+ attempt_timeout=10,
+ operation_timeout=10,
+ )
+ # Consume the entire iterator
+ results = [row async for row in iterator]
+ assert len(results) == 3
+
+ # The close method should be called automatically by the finally block
+ client_mock._remove_instance_registration.assert_called_once()
+ assert iterator.is_closed
+
+ @CrossSync.pytest
+ async def test_iterator_closes_on_early_break(self, proto_byte_stream):
+ """
+ Tests that the iterator's close() method is called if the user breaks
+ out of the iteration loop early.
+ """
+ client_mock = mock.Mock()
+ client_mock._register_instance = CrossSync.Mock()
+ client_mock._remove_instance_registration = CrossSync.Mock()
+ mock_async_iterator = MockIterator(proto_byte_stream)
+ iterator = None
+ with mock.patch.object(
+ CrossSync, "retry_target_stream", return_value=mock_async_iterator
+ ):
+ iterator = CrossSync.ExecuteQueryIterator(
+ client=client_mock,
+ instance_id="test-instance",
+ app_profile_id="test_profile",
+ request_body={},
+ prepare_metadata=_pb_metadata_to_metadata_types(
+ metadata(
+ column("test1", int64_type()), column("test2", int64_type())
+ )
+ ),
+ attempt_timeout=10,
+ operation_timeout=10,
+ )
+ async for _ in iterator:
+ break
+
+ del iterator
+ await CrossSync.sleep(1)
+ # GC outside the loop bc the mock ends up holding a reference to
+ # the iterator
+ gc.collect()
+ await CrossSync.sleep(1)
+
+ # The close method should be called by the finally block when the
+ # generator is closed
+ client_mock._remove_instance_registration.assert_called_once()
+
+ @CrossSync.pytest
+ async def test_iterator_closes_on_error(self, proto_byte_stream):
+ """
+ Tests that the iterator's close() method is called if an exception
+ is raised during iteration.
+ """
+ client_mock = mock.Mock()
+ client_mock._register_instance = CrossSync.Mock()
+ client_mock._remove_instance_registration = CrossSync.Mock()
+
+ class MockErrorIterator(MockIterator):
+ @CrossSync.convert(
+ sync_name="__next__", replace_symbols={"__anext__": "__next__"}
+ )
+ async def __anext__(self):
+ if self.idx >= 1:
+ raise ValueError("Injected-test-error")
+ return await super().__anext__()
+
+ mock_async_iterator = MockErrorIterator(proto_byte_stream)
+ with mock.patch.object(
+ CrossSync, "retry_target_stream", return_value=mock_async_iterator
+ ):
+ iterator = self._make_one(
+ client=client_mock,
+ instance_id="test-instance",
+ app_profile_id="test_profile",
+ request_body={},
+ prepare_metadata=_pb_metadata_to_metadata_types(
+ metadata(
+ column("test1", int64_type()), column("test2", int64_type())
+ )
+ ),
+ attempt_timeout=10,
+ operation_timeout=10,
+ )
+ with pytest.raises(ValueError, match="Injected-test-error"):
+ async for _ in iterator:
+ pass
+
+ # The close method should be called by the finally block on error
+ client_mock._remove_instance_registration.assert_called_once()
diff --git a/tests/unit/data/execute_query/_sync_autogen/test_query_iterator.py b/tests/unit/data/execute_query/_sync_autogen/test_query_iterator.py
index d4f3ec26f..3915693cd 100644
--- a/tests/unit/data/execute_query/_sync_autogen/test_query_iterator.py
+++ b/tests/unit/data/execute_query/_sync_autogen/test_query_iterator.py
@@ -15,6 +15,7 @@
# This file is automatically generated by CrossSync. Do not edit manually.
+import gc
from google.cloud.bigtable.data import exceptions
from google.cloud.bigtable.data.execute_query.metadata import (
_pb_metadata_to_metadata_types,
@@ -248,3 +249,105 @@ def test_iterator_returns_error_if_metadata_requested_too_early(
)
with pytest.raises(exceptions.EarlyMetadataCallError):
iterator.metadata
+
+ def test_iterator_closes_on_full_consumption(self, proto_byte_stream):
+ """Tests that the iterator's close() method is called after all results
+ have been successfully consumed."""
+ client_mock = mock.Mock()
+ client_mock._register_instance = CrossSync._Sync_Impl.Mock()
+ client_mock._remove_instance_registration = CrossSync._Sync_Impl.Mock()
+ client_mock._executor = concurrent.futures.ThreadPoolExecutor()
+ mock_async_iterator = MockIterator(proto_byte_stream)
+ with mock.patch.object(
+ CrossSync._Sync_Impl,
+ "retry_target_stream",
+ return_value=mock_async_iterator,
+ ):
+ iterator = self._make_one(
+ client=client_mock,
+ instance_id="test-instance",
+ app_profile_id="test_profile",
+ request_body={},
+ prepare_metadata=_pb_metadata_to_metadata_types(
+ metadata(
+ column("test1", int64_type()), column("test2", int64_type())
+ )
+ ),
+ attempt_timeout=10,
+ operation_timeout=10,
+ )
+ results = [row for row in iterator]
+ assert len(results) == 3
+ client_mock._remove_instance_registration.assert_called_once()
+ assert iterator.is_closed
+
+ def test_iterator_closes_on_early_break(self, proto_byte_stream):
+ """Tests that the iterator's close() method is called if the user breaks
+ out of the iteration loop early."""
+ client_mock = mock.Mock()
+ client_mock._register_instance = CrossSync._Sync_Impl.Mock()
+ client_mock._remove_instance_registration = CrossSync._Sync_Impl.Mock()
+ mock_async_iterator = MockIterator(proto_byte_stream)
+ iterator = None
+ with mock.patch.object(
+ CrossSync._Sync_Impl,
+ "retry_target_stream",
+ return_value=mock_async_iterator,
+ ):
+ iterator = CrossSync._Sync_Impl.ExecuteQueryIterator(
+ client=client_mock,
+ instance_id="test-instance",
+ app_profile_id="test_profile",
+ request_body={},
+ prepare_metadata=_pb_metadata_to_metadata_types(
+ metadata(
+ column("test1", int64_type()), column("test2", int64_type())
+ )
+ ),
+ attempt_timeout=10,
+ operation_timeout=10,
+ )
+ for _ in iterator:
+ break
+ del iterator
+ CrossSync._Sync_Impl.sleep(1)
+ gc.collect()
+ CrossSync._Sync_Impl.sleep(1)
+ client_mock._remove_instance_registration.assert_called_once()
+
+ def test_iterator_closes_on_error(self, proto_byte_stream):
+ """Tests that the iterator's close() method is called if an exception
+ is raised during iteration."""
+ client_mock = mock.Mock()
+ client_mock._register_instance = CrossSync._Sync_Impl.Mock()
+ client_mock._remove_instance_registration = CrossSync._Sync_Impl.Mock()
+
+ class MockErrorIterator(MockIterator):
+ def __next__(self):
+ if self.idx >= 1:
+ raise ValueError("Injected-test-error")
+ return super().__next__()
+
+ mock_async_iterator = MockErrorIterator(proto_byte_stream)
+ with mock.patch.object(
+ CrossSync._Sync_Impl,
+ "retry_target_stream",
+ return_value=mock_async_iterator,
+ ):
+ iterator = self._make_one(
+ client=client_mock,
+ instance_id="test-instance",
+ app_profile_id="test_profile",
+ request_body={},
+ prepare_metadata=_pb_metadata_to_metadata_types(
+ metadata(
+ column("test1", int64_type()), column("test2", int64_type())
+ )
+ ),
+ attempt_timeout=10,
+ operation_timeout=10,
+ )
+ with pytest.raises(ValueError, match="Injected-test-error"):
+ for _ in iterator:
+ pass
+ client_mock._remove_instance_registration.assert_called_once()
diff --git a/tests/unit/data/execute_query/sql_helpers.py b/tests/unit/data/execute_query/sql_helpers.py
index 5d5569dba..119bb2d50 100644
--- a/tests/unit/data/execute_query/sql_helpers.py
+++ b/tests/unit/data/execute_query/sql_helpers.py
@@ -204,6 +204,18 @@ def date_type() -> Type:
return t
+def proto_type() -> Type:
+ t = Type()
+ t.proto_type = {}
+ return t
+
+
+def enum_type() -> Type:
+ t = Type()
+ t.enum_type = {}
+ return t
+
+
def array_type(elem_type: Type) -> Type:
t = Type()
arr_type = Type.Array()
diff --git a/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py b/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py
index ee0322272..0a1be1423 100644
--- a/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py
+++ b/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py
@@ -25,6 +25,7 @@
from google.cloud.bigtable.data.execute_query.metadata import SqlType
from google.cloud.bigtable.data.execute_query.values import Struct
from google.protobuf import timestamp_pb2
+from samples.testdata import singer_pb2
timestamp = int(
datetime.datetime(2024, 5, 12, 17, 44, 12, tzinfo=datetime.timezone.utc).timestamp()
@@ -267,6 +268,18 @@ def test_execute_query_parameters_not_supported_types():
{"test1": SqlType.Struct([("field1", SqlType.Int64())])},
)
+ with pytest.raises(NotImplementedError, match="not supported"):
+ _format_execute_query_params(
+ {"test1": singer_pb2.Singer()},
+ {"test1": SqlType.Proto()},
+ )
+
+ with pytest.raises(NotImplementedError, match="not supported"):
+ _format_execute_query_params(
+ {"test1": singer_pb2.Genre.ROCK},
+ {"test1": SqlType.Enum()},
+ )
+
def test_instance_execute_query_parameters_not_match():
with pytest.raises(ValueError, match="test2"):
diff --git a/tests/unit/data/execute_query/test_query_result_parsing_utils.py b/tests/unit/data/execute_query/test_query_result_parsing_utils.py
index 627570c37..ea03dfe9a 100644
--- a/tests/unit/data/execute_query/test_query_result_parsing_utils.py
+++ b/tests/unit/data/execute_query/test_query_result_parsing_utils.py
@@ -28,7 +28,8 @@
import datetime
-from tests.unit.data.execute_query.sql_helpers import int64_type
+from tests.unit.data.execute_query.sql_helpers import int64_type, proto_type, enum_type
+from samples.testdata import singer_pb2
TYPE_BYTES = {"bytes_type": {}}
TYPE_TIMESTAMP = {"timestamp_type": {}}
@@ -82,9 +83,61 @@ def test_basic_types(
assert type(metadata_type) is expected_metadata_type
value = PBValue(value_dict)
assert (
- _parse_pb_value_to_python_value(value._pb, metadata_type) == expected_value
+ _parse_pb_value_to_python_value(value._pb, metadata_type, "my_field")
+ == expected_value
)
+ def test__proto(self):
+ _type = PBType({"proto_type": {}})
+ metadata_type = _pb_type_to_metadata_type(_type)
+ assert type(metadata_type) is SqlType.Proto
+
+ singer = singer_pb2.Singer(name="John")
+ value = PBValue({"bytes_value": singer.SerializeToString()})
+
+ # without proto definition
+ result = _parse_pb_value_to_python_value(
+ value._pb, metadata_type, "proto_field"
+ )
+ assert result == singer.SerializeToString()
+ result = _parse_pb_value_to_python_value(
+ value._pb,
+ metadata_type,
+ None,
+ {"proto_field": singer_pb2.Singer()},
+ )
+ assert result == singer.SerializeToString()
+
+ # with proto definition
+ result = _parse_pb_value_to_python_value(
+ value._pb,
+ metadata_type,
+ "proto_field",
+ {"proto_field": singer_pb2.Singer()},
+ )
+ assert result == singer
+
+ def test__enum(self):
+ _type = PBType({"enum_type": {}})
+ metadata_type = _pb_type_to_metadata_type(_type)
+ assert type(metadata_type) is SqlType.Enum
+
+ value = PBValue({"int_value": 1})
+
+ # without enum definition
+ result = _parse_pb_value_to_python_value(value._pb, metadata_type, "enum_field")
+ assert result == 1
+ result = _parse_pb_value_to_python_value(
+ value._pb, metadata_type, None, {"enum_field": singer_pb2.Genre}
+ )
+ assert result == 1
+
+ # with enum definition
+ result = _parse_pb_value_to_python_value(
+ value._pb, metadata_type, "enum_field", {"enum_field": singer_pb2.Genre}
+ )
+ assert result == "JAZZ"
+
# Larger test cases were extracted for readability
def test__array(self):
_type = PBType({"array_type": {"element_type": int64_type()}})
@@ -103,7 +156,79 @@ def test__array(self):
}
}
)
- assert _parse_pb_value_to_python_value(value._pb, metadata_type) == [1, 2, 3, 4]
+ assert _parse_pb_value_to_python_value(
+ value._pb, metadata_type, "array_field"
+ ) == [1, 2, 3, 4]
+
+ def test__array_of_protos(self):
+ _type = PBType({"array_type": {"element_type": proto_type()}})
+ metadata_type = _pb_type_to_metadata_type(_type)
+ assert type(metadata_type) is SqlType.Array
+ assert type(metadata_type.element_type) is SqlType.Proto
+
+ singer1 = singer_pb2.Singer(name="John")
+ singer2 = singer_pb2.Singer(name="Taylor")
+ value = PBValue(
+ {
+ "array_value": {
+ "values": [
+ {"bytes_value": singer1.SerializeToString()},
+ {"bytes_value": singer2.SerializeToString()},
+ ]
+ }
+ }
+ )
+
+ # without proto definition
+ result = _parse_pb_value_to_python_value(
+ value._pb, metadata_type, "array_field"
+ )
+ assert result == [singer1.SerializeToString(), singer2.SerializeToString()]
+ result = _parse_pb_value_to_python_value(
+ value._pb, metadata_type, None, {"array_field": singer_pb2.Singer()}
+ )
+ assert result == [singer1.SerializeToString(), singer2.SerializeToString()]
+
+ # with proto definition
+ result = _parse_pb_value_to_python_value(
+ value._pb,
+ metadata_type,
+ "array_field",
+ {"array_field": singer_pb2.Singer()},
+ )
+ assert result == [singer1, singer2]
+
+ def test__array_of_enums(self):
+ _type = PBType({"array_type": {"element_type": enum_type()}})
+ metadata_type = _pb_type_to_metadata_type(_type)
+ assert type(metadata_type) is SqlType.Array
+ assert type(metadata_type.element_type) is SqlType.Enum
+
+ value = PBValue(
+ {
+ "array_value": {
+ "values": [
+ {"int_value": 0}, # POP
+ {"int_value": 1}, # JAZZ
+ ]
+ }
+ }
+ )
+
+ # without enum definition
+ result = _parse_pb_value_to_python_value(
+ value._pb, metadata_type, "array_field"
+ )
+ assert result == [0, 1]
+
+ # with enum definition
+ result = _parse_pb_value_to_python_value(
+ value._pb,
+ metadata_type,
+ "array_field",
+ {"array_field": singer_pb2.Genre},
+ )
+ assert result == ["POP", "JAZZ"]
def test__struct(self):
_type = PBType(
@@ -164,7 +289,9 @@ def test__struct(self):
with pytest.raises(KeyError, match="Ambigious field name"):
metadata_type["field3"]
- result = _parse_pb_value_to_python_value(value._pb, metadata_type)
+ result = _parse_pb_value_to_python_value(
+ value._pb, metadata_type, "struct_field"
+ )
assert isinstance(result, Struct)
assert result["field1"] == result[0] == 1
assert result[1] == "test2"
@@ -177,6 +304,87 @@ def test__struct(self):
assert result[2] == [2, 3, 4, 5]
assert result[3] == "test4"
+ def test__struct_with_proto_and_enum(self):
+ singer1 = singer_pb2.Singer(name="John")
+ singer2 = singer_pb2.Singer(name="Taylor")
+ _type = PBType(
+ {
+ "struct_type": {
+ "fields": [
+ {
+ "field_name": "field1",
+ "type_": proto_type(),
+ },
+ {
+ "field_name": None,
+ "type_": proto_type(),
+ },
+ {
+ "field_name": "field2",
+ "type_": enum_type(),
+ },
+ {
+ "field_name": None,
+ "type_": enum_type(),
+ },
+ ]
+ }
+ }
+ )
+ value = PBValue(
+ {
+ "array_value": {
+ "values": [
+ {"bytes_value": singer1.SerializeToString()},
+ {"bytes_value": singer2.SerializeToString()},
+ {"int_value": 0},
+ {"int_value": 1},
+ ]
+ }
+ }
+ )
+
+ metadata_type = _pb_type_to_metadata_type(_type)
+ assert type(metadata_type) is SqlType.Struct
+ assert type(metadata_type["field1"]) is SqlType.Proto
+ assert type(metadata_type["field2"]) is SqlType.Enum
+ assert type(metadata_type[0]) is SqlType.Proto
+ assert type(metadata_type[1]) is SqlType.Proto
+ assert type(metadata_type[2]) is SqlType.Enum
+ assert type(metadata_type[3]) is SqlType.Enum
+
+ # without proto definition
+ result = _parse_pb_value_to_python_value(
+ value._pb, metadata_type, "struct_field"
+ )
+ assert isinstance(result, Struct)
+ assert result["field1"] == singer1.SerializeToString()
+ assert result["field2"] == 0
+ assert result[0] == singer1.SerializeToString()
+ assert result[1] == singer2.SerializeToString()
+ assert result[2] == 0
+ assert result[3] == 1
+
+ # with proto definition
+ result = _parse_pb_value_to_python_value(
+ value._pb,
+ metadata_type,
+ "struct_field",
+ {
+ "struct_field.field1": singer_pb2.Singer(),
+ "struct_field.field2": singer_pb2.Genre,
+ },
+ )
+ assert isinstance(result, Struct)
+ assert result["field1"] == singer1
+ assert result["field2"] == "POP"
+ assert result[0] == singer1
+ # unnamed proto fields won't get parsed
+ assert result[1] == singer2.SerializeToString()
+ assert result[2] == "POP"
+ # unnamed enum fields won't get parsed
+ assert result[3] == 1
+
def test__array_of_structs(self):
_type = PBType(
{
@@ -254,7 +462,9 @@ def test__array_of_structs(self):
assert type(metadata_type.element_type[1]) is SqlType.String
assert type(metadata_type.element_type["field3"]) is SqlType.Bool
- result = _parse_pb_value_to_python_value(value._pb, metadata_type)
+ result = _parse_pb_value_to_python_value(
+ value._pb, metadata_type, "array_field"
+ )
assert isinstance(result, list)
assert len(result) == 4
@@ -278,6 +488,106 @@ def test__array_of_structs(self):
assert result[3][1] == "test4"
assert not result[3]["field3"]
+ def test__array_of_structs_with_proto_and_enum(self):
+ singer1 = singer_pb2.Singer(name="John")
+ singer2 = singer_pb2.Singer(name="Taylor")
+ _type = PBType(
+ {
+ "array_type": {
+ "element_type": {
+ "struct_type": {
+ "fields": [
+ {
+ "field_name": "proto_field",
+ "type_": proto_type(),
+ },
+ {
+ "field_name": "enum_field",
+ "type_": enum_type(),
+ },
+ {
+ "field_name": None,
+ "type_": proto_type(),
+ },
+ ]
+ }
+ }
+ }
+ }
+ )
+ value = PBValue(
+ {
+ "array_value": {
+ "values": [
+ {
+ "array_value": {
+ "values": [
+ {"bytes_value": singer1.SerializeToString()},
+ {"int_value": 0}, # POP
+ {"bytes_value": singer1.SerializeToString()},
+ ]
+ }
+ },
+ {
+ "array_value": {
+ "values": [
+ {"bytes_value": singer2.SerializeToString()},
+ {"int_value": 1}, # JAZZ
+ {"bytes_value": singer2.SerializeToString()},
+ ]
+ }
+ },
+ ]
+ }
+ }
+ )
+
+ metadata_type = _pb_type_to_metadata_type(_type)
+ assert type(metadata_type) is SqlType.Array
+ assert type(metadata_type.element_type) is SqlType.Struct
+ assert type(metadata_type.element_type["proto_field"]) is SqlType.Proto
+ assert type(metadata_type.element_type["enum_field"]) is SqlType.Enum
+ assert type(metadata_type.element_type[2]) is SqlType.Proto
+
+ # without proto definition
+ result = _parse_pb_value_to_python_value(
+ value._pb, metadata_type, "array_field"
+ )
+ assert isinstance(result, list)
+ assert len(result) == 2
+ assert isinstance(result[0], Struct)
+ assert result[0]["proto_field"] == singer1.SerializeToString()
+ assert result[0]["enum_field"] == 0
+ assert result[0][2] == singer1.SerializeToString()
+ assert isinstance(result[1], Struct)
+ assert result[1]["proto_field"] == singer2.SerializeToString()
+ assert result[1]["enum_field"] == 1
+ assert result[1][2] == singer2.SerializeToString()
+
+ # with proto definition
+ result = _parse_pb_value_to_python_value(
+ value._pb,
+ metadata_type,
+ "array_field",
+ {
+ "array_field.proto_field": singer_pb2.Singer(),
+ "array_field.enum_field": singer_pb2.Genre,
+ "array_field": singer_pb2.Singer(), # unused
+ },
+ )
+ assert isinstance(result, list)
+ assert len(result) == 2
+ assert isinstance(result[0], Struct)
+ assert result[0]["proto_field"] == singer1
+ assert result[0]["enum_field"] == "POP"
+ # unnamed proto fields won't get parsed
+ assert result[0][2] == singer1.SerializeToString()
+ assert isinstance(result[1], Struct)
+ assert result[1]["proto_field"] == singer2
+ assert result[1]["enum_field"] == "JAZZ"
+ # unnamed proto fields won't get parsed
+ assert result[1][2] == singer2.SerializeToString()
+
def test__map(self):
_type = PBType(
{
@@ -333,7 +643,7 @@ def test__map(self):
assert type(metadata_type.key_type) is SqlType.Int64
assert type(metadata_type.value_type) is SqlType.String
- result = _parse_pb_value_to_python_value(value._pb, metadata_type)
+ result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field")
assert isinstance(result, dict)
assert len(result) == 4
@@ -387,13 +697,135 @@ def test__map_repeated_values(self):
)
metadata_type = _pb_type_to_metadata_type(_type)
- result = _parse_pb_value_to_python_value(value._pb, metadata_type)
+ result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field")
assert len(result) == 1
assert result == {
1: "test3",
}
+ def test__map_with_protos(self):
+ singer1 = singer_pb2.Singer(name="John")
+ singer2 = singer_pb2.Singer(name="Taylor")
+ _type = PBType(
+ {
+ "map_type": {
+ "key_type": int64_type(),
+ "value_type": proto_type(),
+ }
+ }
+ )
+ value = PBValue(
+ {
+ "array_value": {
+ "values": [
+ {
+ "array_value": {
+ "values": [
+ {"int_value": 1},
+ {"bytes_value": singer1.SerializeToString()},
+ ]
+ }
+ },
+ {
+ "array_value": {
+ "values": [
+ {"int_value": 2},
+ {"bytes_value": singer2.SerializeToString()},
+ ]
+ }
+ },
+ ]
+ }
+ }
+ )
+
+ metadata_type = _pb_type_to_metadata_type(_type)
+ assert type(metadata_type) is SqlType.Map
+ assert type(metadata_type.key_type) is SqlType.Int64
+ assert type(metadata_type.value_type) is SqlType.Proto
+
+ # without proto definition
+ result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field")
+ assert isinstance(result, dict)
+ assert len(result) == 2
+ assert result[1] == singer1.SerializeToString()
+ assert result[2] == singer2.SerializeToString()
+
+ # with proto definition
+ result = _parse_pb_value_to_python_value(
+ value._pb,
+ metadata_type,
+ "map_field",
+ {
+ "map_field.value": singer_pb2.Singer(),
+ },
+ )
+ assert isinstance(result, dict)
+ assert len(result) == 2
+ assert result[1] == singer1
+ assert result[2] == singer2
+
+ def test__map_with_enums(self):
+ _type = PBType(
+ {
+ "map_type": {
+ "key_type": int64_type(),
+ "value_type": enum_type(),
+ }
+ }
+ )
+ value = PBValue(
+ {
+ "array_value": {
+ "values": [
+ {
+ "array_value": {
+ "values": [
+ {"int_value": 1},
+ {"int_value": 0}, # POP
+ ]
+ }
+ },
+ {
+ "array_value": {
+ "values": [
+ {"int_value": 2},
+ {"int_value": 1}, # JAZZ
+ ]
+ }
+ },
+ ]
+ }
+ }
+ )
+
+ metadata_type = _pb_type_to_metadata_type(_type)
+ assert type(metadata_type) is SqlType.Map
+ assert type(metadata_type.key_type) is SqlType.Int64
+ assert type(metadata_type.value_type) is SqlType.Enum
+
+ # without enum definition
+ result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field")
+ assert isinstance(result, dict)
+ assert len(result) == 2
+ assert result[1] == 0
+ assert result[2] == 1
+
+ # with enum definition
+ result = _parse_pb_value_to_python_value(
+ value._pb,
+ metadata_type,
+ "map_field",
+ {
+ "map_field.value": singer_pb2.Genre,
+ },
+ )
+ assert isinstance(result, dict)
+ assert len(result) == 2
+ assert result[1] == "POP"
+ assert result[2] == "JAZZ"
+
def test__map_of_maps_of_structs(self):
_type = PBType(
{
@@ -539,7 +971,7 @@ def test__map_of_maps_of_structs(self):
assert type(metadata_type.value_type.value_type) is SqlType.Struct
assert type(metadata_type.value_type.value_type["field1"]) is SqlType.Int64
assert type(metadata_type.value_type.value_type["field2"]) is SqlType.String
- result = _parse_pb_value_to_python_value(value._pb, metadata_type)
+ result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field")
assert result[1]["1_1"]["field1"] == 1
assert result[1]["1_1"]["field2"] == "test1"
@@ -553,23 +985,31 @@ def test__map_of_maps_of_structs(self):
assert result[2]["2_2"]["field1"] == 4
assert result[2]["2_2"]["field2"] == "test4"
- def test__map_of_lists_of_structs(self):
+ def test__map_of_maps_of_structs_with_proto_and_enum(self):
+ singer1 = singer_pb2.Singer(name="John")
+ singer2 = singer_pb2.Singer(name="Taylor")
+
_type = PBType(
{
"map_type": {
- "key_type": TYPE_BYTES,
+ "key_type": int64_type(),
"value_type": {
- "array_type": {
- "element_type": {
+ "map_type": {
+ "key_type": {"string_type": {}},
+ "value_type": {
"struct_type": {
"fields": [
{
- "field_name": "timestamp",
- "type_": TYPE_TIMESTAMP,
+ "field_name": "int_field",
+ "type_": int64_type(),
},
{
- "field_name": "value",
- "type_": TYPE_BYTES,
+ "field_name": "singer",
+ "type_": proto_type(),
+ },
+ {
+ "field_name": "genre",
+ "type_": enum_type(),
},
]
}
@@ -582,20 +1022,225 @@ def test__map_of_lists_of_structs(self):
value = PBValue(
{
"array_value": {
- "values": [ # list of (byte, list) tuples
+ "values": [ # list of (int, map) tuples
{
"array_value": {
- "values": [ # (byte, list) tuple
- {"bytes_value": b"key1"},
+ "values": [ # (int, map) tuples
+ {"int_value": 1},
{
"array_value": {
- "values": [ # list of structs
+ "values": [ # list of (str, struct) tuples
{
"array_value": {
- "values": [ # (timestamp, bytes) tuple
+ "values": [ # (str, struct) tuples
+ {"string_value": "1_1"},
{
- "timestamp_value": {
- "seconds": 1111111111
+ "array_value": {
+ "values": [
+ {
+ "int_value": 12
+ },
+ {
+ "bytes_value": singer1.SerializeToString()
+ },
+ {
+ "int_value": 0
+ },
+ ]
+ }
+ },
+ ]
+ }
+ },
+ {
+ "array_value": {
+ "values": [ # (str, struct) tuples
+ {"string_value": "1_2"},
+ {
+ "array_value": {
+ "values": [
+ {
+ "int_value": 34
+ },
+ {
+ "bytes_value": singer2.SerializeToString()
+ },
+ {
+ "int_value": 1
+ },
+ ]
+ }
+ },
+ ]
+ }
+ },
+ ]
+ }
+ },
+ ]
+ }
+ },
+ {
+ "array_value": {
+ "values": [ # (int, map) tuples
+ {"int_value": 2},
+ {
+ "array_value": {
+ "values": [ # list of (str, struct) tuples
+ {
+ "array_value": {
+ "values": [ # (str, struct) tuples
+ {"string_value": "2_1"},
+ {
+ "array_value": {
+ "values": [
+ {
+ "int_value": 56
+ },
+ {
+ "bytes_value": singer1.SerializeToString()
+ },
+ {
+ "int_value": 2
+ },
+ ]
+ }
+ },
+ ]
+ }
+ },
+ {
+ "array_value": {
+ "values": [ # (str, struct) tuples
+ {"string_value": "2_2"},
+ {
+ "array_value": {
+ "values": [
+ {
+ "int_value": 78
+ },
+ {
+ "bytes_value": singer2.SerializeToString()
+ },
+ {
+ "int_value": 3
+ },
+ ]
+ }
+ },
+ ]
+ }
+ },
+ ]
+ }
+ },
+ ]
+ }
+ },
+ ]
+ }
+ }
+ )
+
+ metadata_type = _pb_type_to_metadata_type(_type)
+ assert type(metadata_type) is SqlType.Map
+ assert type(metadata_type.key_type) is SqlType.Int64
+ assert type(metadata_type.value_type) is SqlType.Map
+ assert type(metadata_type.value_type.key_type) is SqlType.String
+ assert type(metadata_type.value_type.value_type) is SqlType.Struct
+ assert type(metadata_type.value_type.value_type["int_field"]) is SqlType.Int64
+ assert type(metadata_type.value_type.value_type["singer"]) is SqlType.Proto
+ assert type(metadata_type.value_type.value_type["genre"]) is SqlType.Enum
+
+ # without proto definition
+ result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field")
+
+ assert result[1]["1_1"]["int_field"] == 12
+ assert result[1]["1_1"]["singer"] == singer1.SerializeToString()
+ assert result[1]["1_1"]["genre"] == 0
+
+ assert result[1]["1_2"]["int_field"] == 34
+ assert result[1]["1_2"]["singer"] == singer2.SerializeToString()
+ assert result[1]["1_2"]["genre"] == 1
+
+ assert result[2]["2_1"]["int_field"] == 56
+ assert result[2]["2_1"]["singer"] == singer1.SerializeToString()
+ assert result[2]["2_1"]["genre"] == 2
+
+ assert result[2]["2_2"]["int_field"] == 78
+ assert result[2]["2_2"]["singer"] == singer2.SerializeToString()
+ assert result[2]["2_2"]["genre"] == 3
+
+ # with proto definition
+ result = _parse_pb_value_to_python_value(
+ value._pb,
+ metadata_type,
+ "map_field",
+ {
+ "map_field.value.value.singer": singer_pb2.Singer(),
+ "map_field.value.value.genre": singer_pb2.Genre,
+ },
+ )
+
+ assert result[1]["1_1"]["int_field"] == 12
+ assert result[1]["1_1"]["singer"] == singer1
+ assert result[1]["1_1"]["genre"] == "POP"
+
+ assert result[1]["1_2"]["int_field"] == 34
+ assert result[1]["1_2"]["singer"] == singer2
+ assert result[1]["1_2"]["genre"] == "JAZZ"
+
+ assert result[2]["2_1"]["int_field"] == 56
+ assert result[2]["2_1"]["singer"] == singer1
+ assert result[2]["2_1"]["genre"] == "FOLK"
+
+ assert result[2]["2_2"]["int_field"] == 78
+ assert result[2]["2_2"]["singer"] == singer2
+ assert result[2]["2_2"]["genre"] == "ROCK"
+
+ def test__map_of_lists_of_structs(self):
+ _type = PBType(
+ {
+ "map_type": {
+ "key_type": TYPE_BYTES,
+ "value_type": {
+ "array_type": {
+ "element_type": {
+ "struct_type": {
+ "fields": [
+ {
+ "field_name": "timestamp",
+ "type_": TYPE_TIMESTAMP,
+ },
+ {
+ "field_name": "value",
+ "type_": TYPE_BYTES,
+ },
+ ]
+ }
+ },
+ }
+ },
+ }
+ }
+ )
+ value = PBValue(
+ {
+ "array_value": {
+ "values": [ # list of (byte, list) tuples
+ {
+ "array_value": {
+ "values": [ # (byte, list) tuple
+ {"bytes_value": b"key1"},
+ {
+ "array_value": {
+ "values": [ # list of structs
+ {
+ "array_value": {
+ "values": [ # (timestamp, bytes) tuple
+ {
+ "timestamp_value": {
+ "seconds": 1111111111
}
},
{
@@ -679,7 +1324,7 @@ def test__map_of_lists_of_structs(self):
is SqlType.Timestamp
)
assert type(metadata_type.value_type.element_type["value"]) is SqlType.Bytes
- result = _parse_pb_value_to_python_value(value._pb, metadata_type)
+ result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field")
timestamp1 = DatetimeWithNanoseconds(
2005, 3, 18, 1, 58, 31, tzinfo=datetime.timezone.utc
@@ -703,6 +1348,341 @@ def test__map_of_lists_of_structs(self):
assert result[b"key2"][1]["timestamp"] == timestamp4
assert result[b"key2"][1]["value"] == b"key2-value2"
+ def test__map_of_lists_of_structs_with_protos(self):
+ singer1 = singer_pb2.Singer(name="John")
+ singer2 = singer_pb2.Singer(name="Taylor")
+ singer3 = singer_pb2.Singer(name="Jay")
+ singer4 = singer_pb2.Singer(name="Eric")
+
+ _type = PBType(
+ {
+ "map_type": {
+ "key_type": TYPE_BYTES,
+ "value_type": {
+ "array_type": {
+ "element_type": {
+ "struct_type": {
+ "fields": [
+ {
+ "field_name": "timestamp",
+ "type_": TYPE_TIMESTAMP,
+ },
+ {
+ "field_name": "value",
+ "type_": proto_type(),
+ },
+ ]
+ }
+ },
+ }
+ },
+ }
+ }
+ )
+ value = PBValue(
+ {
+ "array_value": {
+ "values": [ # list of (byte, list) tuples
+ {
+ "array_value": {
+ "values": [ # (byte, list) tuple
+ {"bytes_value": b"key1"},
+ {
+ "array_value": {
+ "values": [ # list of structs
+ {
+ "array_value": {
+ "values": [ # (timestamp, bytes) tuple
+ {
+ "timestamp_value": {
+ "seconds": 1111111111
+ }
+ },
+ {
+ "bytes_value": singer1.SerializeToString()
+ },
+ ]
+ }
+ },
+ {
+ "array_value": {
+ "values": [ # (timestamp, bytes) tuple
+ {
+ "timestamp_value": {
+ "seconds": 2222222222
+ }
+ },
+ {
+ "bytes_value": singer2.SerializeToString()
+ },
+ ]
+ }
+ },
+ ]
+ }
+ },
+ ]
+ }
+ },
+ {
+ "array_value": {
+ "values": [ # (byte, list) tuple
+ {"bytes_value": b"key2"},
+ {
+ "array_value": {
+ "values": [ # list of structs
+ {
+ "array_value": {
+ "values": [ # (timestamp, bytes) tuple
+ {
+ "timestamp_value": {
+ "seconds": 3333333333
+ }
+ },
+ {
+ "bytes_value": singer3.SerializeToString()
+ },
+ ]
+ }
+ },
+ {
+ "array_value": {
+ "values": [ # (timestamp, bytes) tuple
+ {
+ "timestamp_value": {
+ "seconds": 4444444444
+ }
+ },
+ {
+ "bytes_value": singer4.SerializeToString()
+ },
+ ]
+ }
+ },
+ ]
+ }
+ },
+ ]
+ }
+ },
+ ]
+ }
+ }
+ )
+ metadata_type = _pb_type_to_metadata_type(_type)
+ assert type(metadata_type) is SqlType.Map
+ assert type(metadata_type.key_type) is SqlType.Bytes
+ assert type(metadata_type.value_type) is SqlType.Array
+ assert type(metadata_type.value_type.element_type) is SqlType.Struct
+ assert (
+ type(metadata_type.value_type.element_type["timestamp"])
+ is SqlType.Timestamp
+ )
+ assert type(metadata_type.value_type.element_type["value"]) is SqlType.Proto
+
+ timestamp1 = DatetimeWithNanoseconds(
+ 2005, 3, 18, 1, 58, 31, tzinfo=datetime.timezone.utc
+ )
+ timestamp2 = DatetimeWithNanoseconds(
+ 2040, 6, 2, 3, 57, 2, tzinfo=datetime.timezone.utc
+ )
+ timestamp3 = DatetimeWithNanoseconds(
+ 2075, 8, 18, 5, 55, 33, tzinfo=datetime.timezone.utc
+ )
+ timestamp4 = DatetimeWithNanoseconds(
+ 2110, 11, 3, 7, 54, 4, tzinfo=datetime.timezone.utc
+ )
+
+ # without proto definition
+ result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field")
+ assert result[b"key1"][0]["timestamp"] == timestamp1
+ assert result[b"key1"][0]["value"] == singer1.SerializeToString()
+ assert result[b"key1"][1]["timestamp"] == timestamp2
+ assert result[b"key1"][1]["value"] == singer2.SerializeToString()
+ assert result[b"key2"][0]["timestamp"] == timestamp3
+ assert result[b"key2"][0]["value"] == singer3.SerializeToString()
+ assert result[b"key2"][1]["timestamp"] == timestamp4
+ assert result[b"key2"][1]["value"] == singer4.SerializeToString()
+
+ # with proto definition
+ result = _parse_pb_value_to_python_value(
+ value._pb,
+ metadata_type,
+ "map_field",
+ {
+ "map_field.value.value": singer_pb2.Singer(),
+ },
+ )
+ assert result[b"key1"][0]["timestamp"] == timestamp1
+ assert result[b"key1"][0]["value"] == singer1
+ assert result[b"key1"][1]["timestamp"] == timestamp2
+ assert result[b"key1"][1]["value"] == singer2
+ assert result[b"key2"][0]["timestamp"] == timestamp3
+ assert result[b"key2"][0]["value"] == singer3
+ assert result[b"key2"][1]["timestamp"] == timestamp4
+ assert result[b"key2"][1]["value"] == singer4
+
+ def test__map_of_lists_of_structs_with_enums(self):
+ _type = PBType(
+ {
+ "map_type": {
+ "key_type": TYPE_BYTES,
+ "value_type": {
+ "array_type": {
+ "element_type": {
+ "struct_type": {
+ "fields": [
+ {
+ "field_name": "timestamp",
+ "type_": TYPE_TIMESTAMP,
+ },
+ {
+ "field_name": "value",
+ "type_": enum_type(),
+ },
+ ]
+ }
+ },
+ }
+ },
+ }
+ }
+ )
+ value = PBValue(
+ {
+ "array_value": {
+ "values": [ # list of (byte, list) tuples
+ {
+ "array_value": {
+ "values": [ # (byte, list) tuple
+ {"bytes_value": b"key1"},
+ {
+ "array_value": {
+ "values": [ # list of structs
+ {
+ "array_value": {
+ "values": [ # (timestamp, bytes) tuple
+ {
+ "timestamp_value": {
+ "seconds": 1111111111
+ }
+ },
+ {"int_value": 0},
+ ]
+ }
+ },
+ {
+ "array_value": {
+ "values": [ # (timestamp, bytes) tuple
+ {
+ "timestamp_value": {
+ "seconds": 2222222222
+ }
+ },
+ {"int_value": 1},
+ ]
+ }
+ },
+ ]
+ }
+ },
+ ]
+ }
+ },
+ {
+ "array_value": {
+ "values": [ # (byte, list) tuple
+ {"bytes_value": b"key2"},
+ {
+ "array_value": {
+ "values": [ # list of structs
+ {
+ "array_value": {
+ "values": [ # (timestamp, bytes) tuple
+ {
+ "timestamp_value": {
+ "seconds": 3333333333
+ }
+ },
+ {"int_value": 2},
+ ]
+ }
+ },
+ {
+ "array_value": {
+ "values": [ # (timestamp, bytes) tuple
+ {
+ "timestamp_value": {
+ "seconds": 4444444444
+ }
+ },
+ {"int_value": 3},
+ ]
+ }
+ },
+ ]
+ }
+ },
+ ]
+ }
+ },
+ ]
+ }
+ }
+ )
+ metadata_type = _pb_type_to_metadata_type(_type)
+ assert type(metadata_type) is SqlType.Map
+ assert type(metadata_type.key_type) is SqlType.Bytes
+ assert type(metadata_type.value_type) is SqlType.Array
+ assert type(metadata_type.value_type.element_type) is SqlType.Struct
+ assert (
+ type(metadata_type.value_type.element_type["timestamp"])
+ is SqlType.Timestamp
+ )
+ assert type(metadata_type.value_type.element_type["value"]) is SqlType.Enum
+
+ timestamp1 = DatetimeWithNanoseconds(
+ 2005, 3, 18, 1, 58, 31, tzinfo=datetime.timezone.utc
+ )
+ timestamp2 = DatetimeWithNanoseconds(
+ 2040, 6, 2, 3, 57, 2, tzinfo=datetime.timezone.utc
+ )
+ timestamp3 = DatetimeWithNanoseconds(
+ 2075, 8, 18, 5, 55, 33, tzinfo=datetime.timezone.utc
+ )
+ timestamp4 = DatetimeWithNanoseconds(
+ 2110, 11, 3, 7, 54, 4, tzinfo=datetime.timezone.utc
+ )
+
+ # without enum definition
+ result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field")
+ assert result[b"key1"][0]["timestamp"] == timestamp1
+ assert result[b"key1"][0]["value"] == 0
+ assert result[b"key1"][1]["timestamp"] == timestamp2
+ assert result[b"key1"][1]["value"] == 1
+ assert result[b"key2"][0]["timestamp"] == timestamp3
+ assert result[b"key2"][0]["value"] == 2
+ assert result[b"key2"][1]["timestamp"] == timestamp4
+ assert result[b"key2"][1]["value"] == 3
+
+ # with enum definition
+ result = _parse_pb_value_to_python_value(
+ value._pb,
+ metadata_type,
+ "map_field",
+ {
+ "map_field.value.value": singer_pb2.Genre,
+ },
+ )
+ assert result[b"key1"][0]["timestamp"] == timestamp1
+ assert result[b"key1"][0]["value"] == "POP"
+ assert result[b"key1"][1]["timestamp"] == timestamp2
+ assert result[b"key1"][1]["value"] == "JAZZ"
+ assert result[b"key2"][0]["timestamp"] == timestamp3
+ assert result[b"key2"][0]["value"] == "FOLK"
+ assert result[b"key2"][1]["timestamp"] == timestamp4
+ assert result[b"key2"][1]["value"] == "ROCK"
+
def test__invalid_type_throws_exception(self):
_type = PBType({"string_type": {}})
value = PBValue({"int_value": 1})
@@ -712,4 +1692,4 @@ def test__invalid_type_throws_exception(self):
ValueError,
match="string_value field for String type not found in a Value.",
):
- _parse_pb_value_to_python_value(value._pb, metadata_type)
+ _parse_pb_value_to_python_value(value._pb, metadata_type, "string_field")
diff --git a/tests/unit/data/execute_query/test_query_result_row_reader.py b/tests/unit/data/execute_query/test_query_result_row_reader.py
index 6adb1e3c7..ab98b54bd 100644
--- a/tests/unit/data/execute_query/test_query_result_row_reader.py
+++ b/tests/unit/data/execute_query/test_query_result_row_reader.py
@@ -32,7 +32,9 @@
metadata,
proto_rows_bytes,
str_val,
+ bytes_val,
)
+from samples.testdata import singer_pb2
class TestQueryResultRowReader:
@@ -116,8 +118,8 @@ def test__received_values_are_passed_to_parser_in_batches(self):
reader.consume([proto_rows_bytes(int_val(1), int_val(2))], metadata)
parse_mock.assert_has_calls(
[
- mock.call(PBValue(int_val(1)), SqlType.Int64()),
- mock.call(PBValue(int_val(2)), SqlType.Int64()),
+ mock.call(PBValue(int_val(1)), SqlType.Int64(), "test1", None),
+ mock.call(PBValue(int_val(2)), SqlType.Int64(), "test2", None),
]
)
@@ -137,7 +139,7 @@ def test__parser_errors_are_forwarded(self):
parse_mock.assert_has_calls(
[
- mock.call(PBValue(values[0]), SqlType.Int64()),
+ mock.call(PBValue(values[0]), SqlType.Int64(), "test1", None),
]
)
@@ -243,6 +245,40 @@ def test_multiple_batches(self):
assert row4["test1"] == 7
assert row4["test2"] == 8
+ def test_multiple_batches_with_proto_and_enum_types(self):
+ singer1 = singer_pb2.Singer(name="John")
+ singer2 = singer_pb2.Singer(name="Taylor")
+ singer3 = singer_pb2.Singer(name="Jay")
+ singer4 = singer_pb2.Singer(name="Eric")
+
+ reader = _QueryResultRowReader()
+ batches = [
+ proto_rows_bytes(
+ bytes_val(singer1.SerializeToString()),
+ int_val(0),
+ bytes_val(singer2.SerializeToString()),
+ int_val(1),
+ ),
+ proto_rows_bytes(bytes_val(singer3.SerializeToString()), int_val(2)),
+ proto_rows_bytes(bytes_val(singer4.SerializeToString()), int_val(3)),
+ ]
+
+ results = reader.consume(
+ batches,
+ Metadata([("singer", SqlType.Proto()), ("genre", SqlType.Enum())]),
+ {"singer": singer_pb2.Singer(), "genre": singer_pb2.Genre},
+ )
+ assert len(results) == 4
+ [row1, row2, row3, row4] = results
+ assert row1["singer"] == singer1
+ assert row1["genre"] == "POP"
+ assert row2["singer"] == singer2
+ assert row2["genre"] == "JAZZ"
+ assert row3["singer"] == singer3
+ assert row3["genre"] == "FOLK"
+ assert row4["singer"] == singer4
+ assert row4["genre"] == "ROCK"
+
class TestMetadata:
def test__duplicate_column_names(self):
diff --git a/tests/unit/data/test__helpers.py b/tests/unit/data/test__helpers.py
index 96c726a20..c8540024d 100644
--- a/tests/unit/data/test__helpers.py
+++ b/tests/unit/data/test__helpers.py
@@ -266,3 +266,98 @@ def test_get_retryable_errors(self, input_codes, input_table, expected):
setattr(fake_table, f"{key}_retryable_errors", input_table[key])
result = _helpers._get_retryable_errors(input_codes, fake_table)
assert result == expected
+
+
+class TestTrackedBackoffGenerator:
+ def test_tracked_backoff_generator_history(self):
+ """
+ Should be able to retrieve historical results from backoff generator
+ """
+ generator = _helpers.TrackedBackoffGenerator(
+ initial=0, multiplier=2, maximum=10
+ )
+ got_list = [next(generator) for _ in range(20)]
+
+ # check all values are correct
+ for i in range(19, 0, -1):
+ assert generator.get_attempt_backoff(i) == got_list[i]
+ # check a random value out of order
+ assert generator.get_attempt_backoff(5) == got_list[5]
+
+ @mock.patch("random.uniform", side_effect=lambda a, b: b)
+ def test_tracked_backoff_generator_defaults(self, mock_uniform):
+ """
+ Should generate values with default parameters
+
+ initial=0.01, multiplier=2, maximum=60
+ """
+ generator = _helpers.TrackedBackoffGenerator()
+ expected_values = [0.01, 0.02, 0.04, 0.08, 0.16]
+ for expected in expected_values:
+ assert next(generator) == pytest.approx(expected)
+
+ @mock.patch("random.uniform", side_effect=lambda a, b: b)
+ def test_tracked_backoff_generator_with_maximum(self, mock_uniform):
+ """
+ Should cap the backoff at the maximum value
+ """
+ generator = _helpers.TrackedBackoffGenerator(initial=1, multiplier=2, maximum=5)
+ expected_values = [1, 2, 4, 5, 5, 5]
+ for expected in expected_values:
+ assert next(generator) == expected
+
+ def test_get_attempt_backoff_out_of_bounds(self):
+ """
+ get_attempt_backoff should raise IndexError for out of bounds index
+ """
+ generator = _helpers.TrackedBackoffGenerator()
+ next(generator)
+ next(generator)
+ with pytest.raises(IndexError):
+ generator.get_attempt_backoff(2)
+ with pytest.raises(IndexError):
+ generator.get_attempt_backoff(-3)
+
+ def test_set_next_full_set(self):
+ """
+ try always using set_next to populate generator
+ """
+ generator = _helpers.TrackedBackoffGenerator()
+ for idx, val in enumerate(range(100, 0, -1)):
+ generator.set_next(val)
+ got = next(generator)
+ assert got == val
+ assert generator.get_attempt_backoff(idx) == val
+
+ def test_set_next_negative_value(self):
+ generator = _helpers.TrackedBackoffGenerator()
+ with pytest.raises(ValueError):
+ generator.set_next(-1)
+
+ @mock.patch("random.uniform", side_effect=lambda a, b: b)
+ def test_interleaved_set_next(self, mock_uniform):
+ import itertools
+
+ generator = _helpers.TrackedBackoffGenerator(
+ initial=1, multiplier=2, maximum=128
+ )
+ # values we expect generator to create
+ expected_values = [2**i for i in range(8)]
+ # values we will insert
+ inserted_values = [9, 61, 0, 4, 33, 12, 18, 2]
+ for idx in range(8):
+ assert next(generator) == expected_values[idx]
+ generator.set_next(inserted_values[idx])
+ assert next(generator) == inserted_values[idx]
+ # check to make sure history is as we expect
+ generator.history = itertools.chain.from_iterable(
+ zip(expected_values, inserted_values)
+ )
+
+ @mock.patch("random.uniform", side_effect=lambda a, b: b)
+ def test_set_next_replacement(self, mock_uniform):
+ generator = _helpers.TrackedBackoffGenerator(initial=1)
+ generator.set_next(99)
+ generator.set_next(88)
+ assert next(generator) == 88
+ assert next(generator) == 1
diff --git a/tests/unit/data/test_mutations.py b/tests/unit/data/test_mutations.py
index 485c86e42..17050162c 100644
--- a/tests/unit/data/test_mutations.py
+++ b/tests/unit/data/test_mutations.py
@@ -117,6 +117,17 @@ def test_size(self, test_dict):
{"delete_from_family": {"family_name": "foo"}},
),
(mutations.DeleteAllFromRow, {"delete_from_row": {}}),
+ (
+ mutations.AddToCell,
+ {
+ "add_to_cell": {
+ "family_name": "foo",
+ "column_qualifier": {"raw_value": b"bar"},
+ "timestamp": {"raw_timestamp_micros": 12345},
+ "input": {"int_value": 123},
+ }
+ },
+ ),
],
)
def test__from_dict(self, expected_class, input_dict):
@@ -162,6 +173,7 @@ def test__from_dict_wrong_subclass(self):
mutations.DeleteRangeFromColumn("foo", b"bar"),
mutations.DeleteAllFromFamily("foo"),
mutations.DeleteAllFromRow(),
+ mutations.AddToCell("foo", b"bar", 123, 456),
]
for instance in subclasses:
others = [other for other in subclasses if other != instance]
@@ -706,3 +718,105 @@ def test__from_dict(self):
assert len(instance.mutations) == 1
assert isinstance(instance.mutations[0], mutations.DeleteAllFromFamily)
assert instance.mutations[0].family_to_delete == "test_family"
+
+
+class TestAddToCell:
+ def _target_class(self):
+ from google.cloud.bigtable.data.mutations import AddToCell
+
+ return AddToCell
+
+ def _make_one(self, *args, **kwargs):
+ return self._target_class()(*args, **kwargs)
+
+ @pytest.mark.parametrize("input_val", [2**64, -(2**64)])
+ def test_ctor_large_int(self, input_val):
+ with pytest.raises(ValueError) as e:
+ self._make_one(
+ family="f", qualifier=b"b", value=input_val, timestamp_micros=123
+ )
+ assert "int values must be between" in str(e.value)
+
+ @pytest.mark.parametrize("input_val", ["", "a", "abc", "hello world!"])
+ def test_ctor_str_value(self, input_val):
+ with pytest.raises(TypeError) as e:
+ self._make_one(
+ family="f", qualifier=b"b", value=input_val, timestamp_micros=123
+ )
+ assert "value must be int" in str(e.value)
+
+ def test_ctor(self):
+ """Ensure constructor sets expected values"""
+ expected_family = "test-family"
+ expected_qualifier = b"test-qualifier"
+ expected_value = 1234
+ expected_timestamp = 1234567890
+ instance = self._make_one(
+ expected_family, expected_qualifier, expected_value, expected_timestamp
+ )
+ assert instance.family == expected_family
+ assert instance.qualifier == expected_qualifier
+ assert instance.value == expected_value
+ assert instance.timestamp == expected_timestamp
+
+ def test_ctor_negative_timestamp(self):
+ """Only non-negative timestamps are valid"""
+ with pytest.raises(ValueError) as e:
+ self._make_one("test-family", b"test-qualifier", 1234, -2)
+ assert "timestamp must be non-negative" in str(e.value)
+
+ def test__to_dict(self):
+ """ensure dict representation is as expected"""
+ expected_family = "test-family"
+ expected_qualifier = b"test-qualifier"
+ expected_value = 1234
+ expected_timestamp = 123456789
+ instance = self._make_one(
+ expected_family, expected_qualifier, expected_value, expected_timestamp
+ )
+ got_dict = instance._to_dict()
+ assert list(got_dict.keys()) == ["add_to_cell"]
+ got_inner_dict = got_dict["add_to_cell"]
+ assert got_inner_dict["family_name"] == expected_family
+ assert got_inner_dict["column_qualifier"]["raw_value"] == expected_qualifier
+ assert got_inner_dict["timestamp"]["raw_timestamp_micros"] == expected_timestamp
+ assert got_inner_dict["input"]["int_value"] == expected_value
+ assert len(got_inner_dict.keys()) == 4
+
+ def test__to_pb(self):
+ """ensure proto representation is as expected"""
+ import google.cloud.bigtable_v2.types.data as data_pb
+
+ expected_family = "test-family"
+ expected_qualifier = b"test-qualifier"
+ expected_value = 1234
+ expected_timestamp = 123456789
+ instance = self._make_one(
+ expected_family, expected_qualifier, expected_value, expected_timestamp
+ )
+ got_pb = instance._to_pb()
+ assert isinstance(got_pb, data_pb.Mutation)
+ assert got_pb.add_to_cell.family_name == expected_family
+ assert got_pb.add_to_cell.column_qualifier.raw_value == expected_qualifier
+ assert got_pb.add_to_cell.timestamp.raw_timestamp_micros == expected_timestamp
+ assert got_pb.add_to_cell.input.int_value == expected_value
+
+ @pytest.mark.parametrize(
+ "timestamp",
+ [
+ (1234567890),
+ (1),
+ (0),
+ ],
+ )
+ def test_is_idempotent(self, timestamp):
+ """is_idempotent is not based on the timestamp"""
+ instance = self._make_one("test-family", b"test-qualifier", 1234, timestamp)
+ assert not instance.is_idempotent()
+
+ def test___str__(self):
+ """Str representation of mutations should be to_dict"""
+ instance = self._make_one("test-family", b"test-qualifier", 1234, 1234567890)
+ str_value = instance.__str__()
+ dict_value = instance._to_dict()
+ assert str_value == str(dict_value)
diff --git a/tests/unit/data/test_sync_up_to_date.py b/tests/unit/data/test_sync_up_to_date.py
index d4623a6c8..e6bce9cf6 100644
--- a/tests/unit/data/test_sync_up_to_date.py
+++ b/tests/unit/data/test_sync_up_to_date.py
@@ -90,7 +90,7 @@ def test_verify_headers(sync_file):
\#\ distributed\ under\ the\ License\ is\ distributed\ on\ an\ \"AS\ IS\"\ BASIS,\n
\#\ WITHOUT\ WARRANTIES\ OR\ CONDITIONS\ OF\ ANY\ KIND,\ either\ express\ or\ implied\.\n
\#\ See\ the\ License\ for\ the\ specific\ language\ governing\ permissions\ and\n
- \#\ limitations\ under\ the\ License\.
+ \#\ limitations\ under\ the\ License
"""
pattern = re.compile(license_regex, re.VERBOSE)
diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py
index 2ad52bf52..b0ba35f0c 100644
--- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py
+++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py
@@ -182,12 +182,19 @@ def test__read_environment_variables():
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
- with pytest.raises(ValueError) as excinfo:
- BigtableInstanceAdminClient._read_environment_variables()
- assert (
- str(excinfo.value)
- == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with pytest.raises(ValueError) as excinfo:
+ BigtableInstanceAdminClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+ else:
+ assert BigtableInstanceAdminClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
assert BigtableInstanceAdminClient._read_environment_variables() == (
@@ -226,6 +233,105 @@ def test__read_environment_variables():
)
+def test_use_client_cert_effective():
+ # Test case 1: Test when `should_use_client_cert` returns True.
+ # We mock the `should_use_client_cert` function to simulate a scenario where
+ # the google-auth library supports automatic mTLS and determines that a
+ # client certificate should be used.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch(
+ "google.auth.transport.mtls.should_use_client_cert", return_value=True
+ ):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is True
+
+ # Test case 2: Test when `should_use_client_cert` returns False.
+ # We mock the `should_use_client_cert` function to simulate a scenario where
+ # the google-auth library supports automatic mTLS and determines that a
+ # client certificate should NOT be used.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch(
+ "google.auth.transport.mtls.should_use_client_cert", return_value=False
+ ):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is False
+
+ # Test case 3: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "true".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is True
+
+ # Test case 4: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "false".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}
+ ):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is False
+
+ # Test case 5: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "True".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "True"}):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is True
+
+ # Test case 6: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "False".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "False"}
+ ):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is False
+
+ # Test case 7: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "TRUE".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "TRUE"}):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is True
+
+ # Test case 8: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "FALSE".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "FALSE"}
+ ):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is False
+
+ # Test case 9: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not set.
+ # In this case, the method should return False, which is the default value.
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, clear=True):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is False
+
+ # Test case 10: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to an invalid value.
+ # The method should raise a ValueError as the environment variable must be either
+ # "true" or "false".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "unsupported"}
+ ):
+ with pytest.raises(ValueError):
+ BigtableInstanceAdminClient._use_client_cert_effective()
+
+ # Test case 11: Test when `should_use_client_cert` is available and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to an invalid value.
+ # The method should return False as the environment variable is set to an invalid value.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "unsupported"}
+ ):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is False
+
+ # Test case 12: Test when `should_use_client_cert` is available and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is unset. Also,
+ # the GOOGLE_API_CONFIG environment variable is unset.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": ""}):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": ""}):
+ assert BigtableInstanceAdminClient._use_client_cert_effective() is False
+
+
def test__get_client_cert_source():
mock_provided_cert_source = mock.Mock()
mock_default_cert_source = mock.Mock()
@@ -615,17 +721,6 @@ def test_bigtable_instance_admin_client_client_options(
== "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
- # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
- with mock.patch.dict(
- os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
- ):
- with pytest.raises(ValueError) as excinfo:
- client = client_class(transport=transport_name)
- assert (
- str(excinfo.value)
- == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
-
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
@@ -861,6 +956,119 @@ def test_bigtable_instance_admin_client_get_mtls_endpoint_and_cert_source(client
assert api_endpoint == mock_api_endpoint
assert cert_source is None
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "Unsupported".
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ mock_client_cert_source = mock.Mock()
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source,
+ api_endpoint=mock_api_endpoint,
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is None
+
+ # Test cases for mTLS enablement when GOOGLE_API_USE_CLIENT_CERTIFICATE is unset.
+ test_cases = [
+ (
+ # With workloads present in config, mTLS is enabled.
+ {
+ "version": 1,
+ "cert_configs": {
+ "workload": {
+ "cert_path": "path/to/cert/file",
+ "key_path": "path/to/key/file",
+ }
+ },
+ },
+ mock_client_cert_source,
+ ),
+ (
+ # With workloads not present in config, mTLS is disabled.
+ {
+ "version": 1,
+ "cert_configs": {},
+ },
+ None,
+ ),
+ ]
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ for config_data, expected_cert_source in test_cases:
+ env = os.environ.copy()
+ env.pop("GOOGLE_API_USE_CLIENT_CERTIFICATE", None)
+ with mock.patch.dict(os.environ, env, clear=True):
+ config_filename = "mock_certificate_config.json"
+ config_file_content = json.dumps(config_data)
+ m = mock.mock_open(read_data=config_file_content)
+ with mock.patch("builtins.open", m):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": config_filename}
+ ):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source,
+ api_endpoint=mock_api_endpoint,
+ )
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source(options)
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is expected_cert_source
+
+ # Test cases for mTLS enablement when GOOGLE_API_USE_CLIENT_CERTIFICATE is unset(empty).
+ test_cases = [
+ (
+ # With workloads present in config, mTLS is enabled.
+ {
+ "version": 1,
+ "cert_configs": {
+ "workload": {
+ "cert_path": "path/to/cert/file",
+ "key_path": "path/to/key/file",
+ }
+ },
+ },
+ mock_client_cert_source,
+ ),
+ (
+ # With workloads not present in config, mTLS is disabled.
+ {
+ "version": 1,
+ "cert_configs": {},
+ },
+ None,
+ ),
+ ]
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ for config_data, expected_cert_source in test_cases:
+ env = os.environ.copy()
+ env.pop("GOOGLE_API_USE_CLIENT_CERTIFICATE", "")
+ with mock.patch.dict(os.environ, env, clear=True):
+ config_filename = "mock_certificate_config.json"
+ config_file_content = json.dumps(config_data)
+ m = mock.mock_open(read_data=config_file_content)
+ with mock.patch("builtins.open", m):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": config_filename}
+ ):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source,
+ api_endpoint=mock_api_endpoint,
+ )
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source(options)
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is expected_cert_source
+
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
@@ -911,18 +1119,6 @@ def test_bigtable_instance_admin_client_get_mtls_endpoint_and_cert_source(client
== "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
- # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
- with mock.patch.dict(
- os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
- ):
- with pytest.raises(ValueError) as excinfo:
- client_class.get_mtls_endpoint_and_cert_source()
-
- assert (
- str(excinfo.value)
- == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
-
@pytest.mark.parametrize(
"client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient]
@@ -20687,6 +20883,7 @@ def test_partial_update_instance_rest_call_success(request_type):
"create_time": {"seconds": 751, "nanos": 543},
"satisfies_pzs": True,
"satisfies_pzi": True,
+ "tags": {},
}
# The version of a generated dependency at test runtime may differ from the version used during generation.
# Delete any fields which are not present in the current runtime dependency
@@ -25947,6 +26144,7 @@ def test_bigtable_instance_admin_grpc_asyncio_transport_channel():
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize(
"transport_class",
[
diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py
index 67b4302c9..bff220693 100644
--- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py
+++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py
@@ -58,10 +58,10 @@
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- BigtableTableAdminAsyncClient,
+ BaseBigtableTableAdminAsyncClient,
)
from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- BigtableTableAdminClient,
+ BaseBigtableTableAdminClient,
)
from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers
from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import transports
@@ -138,45 +138,45 @@ def test__get_default_mtls_endpoint():
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
- assert BigtableTableAdminClient._get_default_mtls_endpoint(None) is None
+ assert BaseBigtableTableAdminClient._get_default_mtls_endpoint(None) is None
assert (
- BigtableTableAdminClient._get_default_mtls_endpoint(api_endpoint)
+ BaseBigtableTableAdminClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
- BigtableTableAdminClient._get_default_mtls_endpoint(api_mtls_endpoint)
+ BaseBigtableTableAdminClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
- BigtableTableAdminClient._get_default_mtls_endpoint(sandbox_endpoint)
+ BaseBigtableTableAdminClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
- BigtableTableAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
+ BaseBigtableTableAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
- BigtableTableAdminClient._get_default_mtls_endpoint(non_googleapi)
+ BaseBigtableTableAdminClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
def test__read_environment_variables():
- assert BigtableTableAdminClient._read_environment_variables() == (
+ assert BaseBigtableTableAdminClient._read_environment_variables() == (
False,
"auto",
None,
)
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
- assert BigtableTableAdminClient._read_environment_variables() == (
+ assert BaseBigtableTableAdminClient._read_environment_variables() == (
True,
"auto",
None,
)
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
- assert BigtableTableAdminClient._read_environment_variables() == (
+ assert BaseBigtableTableAdminClient._read_environment_variables() == (
False,
"auto",
None,
@@ -185,29 +185,36 @@ def test__read_environment_variables():
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
- with pytest.raises(ValueError) as excinfo:
- BigtableTableAdminClient._read_environment_variables()
- assert (
- str(excinfo.value)
- == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with pytest.raises(ValueError) as excinfo:
+ BaseBigtableTableAdminClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+ else:
+ assert BaseBigtableTableAdminClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
- assert BigtableTableAdminClient._read_environment_variables() == (
+ assert BaseBigtableTableAdminClient._read_environment_variables() == (
False,
"never",
None,
)
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
- assert BigtableTableAdminClient._read_environment_variables() == (
+ assert BaseBigtableTableAdminClient._read_environment_variables() == (
False,
"always",
None,
)
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}):
- assert BigtableTableAdminClient._read_environment_variables() == (
+ assert BaseBigtableTableAdminClient._read_environment_variables() == (
False,
"auto",
None,
@@ -215,33 +222,134 @@ def test__read_environment_variables():
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError) as excinfo:
- BigtableTableAdminClient._read_environment_variables()
+ BaseBigtableTableAdminClient._read_environment_variables()
assert (
str(excinfo.value)
== "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}):
- assert BigtableTableAdminClient._read_environment_variables() == (
+ assert BaseBigtableTableAdminClient._read_environment_variables() == (
False,
"auto",
"foo.com",
)
+def test_use_client_cert_effective():
+ # Test case 1: Test when `should_use_client_cert` returns True.
+ # We mock the `should_use_client_cert` function to simulate a scenario where
+ # the google-auth library supports automatic mTLS and determines that a
+ # client certificate should be used.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch(
+ "google.auth.transport.mtls.should_use_client_cert", return_value=True
+ ):
+ assert BaseBigtableTableAdminClient._use_client_cert_effective() is True
+
+ # Test case 2: Test when `should_use_client_cert` returns False.
+ # We mock the `should_use_client_cert` function to simulate a scenario where
+ # the google-auth library supports automatic mTLS and determines that a
+ # client certificate should NOT be used.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch(
+ "google.auth.transport.mtls.should_use_client_cert", return_value=False
+ ):
+ assert BaseBigtableTableAdminClient._use_client_cert_effective() is False
+
+ # Test case 3: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "true".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ assert BaseBigtableTableAdminClient._use_client_cert_effective() is True
+
+ # Test case 4: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "false".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}
+ ):
+ assert BaseBigtableTableAdminClient._use_client_cert_effective() is False
+
+ # Test case 5: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "True".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "True"}):
+ assert BaseBigtableTableAdminClient._use_client_cert_effective() is True
+
+ # Test case 6: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "False".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "False"}
+ ):
+ assert BaseBigtableTableAdminClient._use_client_cert_effective() is False
+
+ # Test case 7: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "TRUE".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "TRUE"}):
+ assert BaseBigtableTableAdminClient._use_client_cert_effective() is True
+
+ # Test case 8: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "FALSE".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "FALSE"}
+ ):
+ assert BaseBigtableTableAdminClient._use_client_cert_effective() is False
+
+ # Test case 9: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not set.
+ # In this case, the method should return False, which is the default value.
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, clear=True):
+ assert BaseBigtableTableAdminClient._use_client_cert_effective() is False
+
+ # Test case 10: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to an invalid value.
+ # The method should raise a ValueError as the environment variable must be either
+ # "true" or "false".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "unsupported"}
+ ):
+ with pytest.raises(ValueError):
+ BaseBigtableTableAdminClient._use_client_cert_effective()
+
+ # Test case 11: Test when `should_use_client_cert` is available and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to an invalid value.
+ # The method should return False as the environment variable is set to an invalid value.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "unsupported"}
+ ):
+ assert BaseBigtableTableAdminClient._use_client_cert_effective() is False
+
+ # Test case 12: Test when `should_use_client_cert` is available and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is unset. Also,
+ # the GOOGLE_API_CONFIG environment variable is unset.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": ""}):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": ""}):
+ assert (
+ BaseBigtableTableAdminClient._use_client_cert_effective() is False
+ )
+
+
def test__get_client_cert_source():
mock_provided_cert_source = mock.Mock()
mock_default_cert_source = mock.Mock()
- assert BigtableTableAdminClient._get_client_cert_source(None, False) is None
+ assert BaseBigtableTableAdminClient._get_client_cert_source(None, False) is None
assert (
- BigtableTableAdminClient._get_client_cert_source(
+ BaseBigtableTableAdminClient._get_client_cert_source(
mock_provided_cert_source, False
)
is None
)
assert (
- BigtableTableAdminClient._get_client_cert_source(
+ BaseBigtableTableAdminClient._get_client_cert_source(
mock_provided_cert_source, True
)
== mock_provided_cert_source
@@ -255,11 +363,11 @@ def test__get_client_cert_source():
return_value=mock_default_cert_source,
):
assert (
- BigtableTableAdminClient._get_client_cert_source(None, True)
+ BaseBigtableTableAdminClient._get_client_cert_source(None, True)
is mock_default_cert_source
)
assert (
- BigtableTableAdminClient._get_client_cert_source(
+ BaseBigtableTableAdminClient._get_client_cert_source(
mock_provided_cert_source, "true"
)
is mock_provided_cert_source
@@ -267,68 +375,72 @@ def test__get_client_cert_source():
@mock.patch.object(
- BigtableTableAdminClient,
+ BaseBigtableTableAdminClient,
"_DEFAULT_ENDPOINT_TEMPLATE",
- modify_default_endpoint_template(BigtableTableAdminClient),
+ modify_default_endpoint_template(BaseBigtableTableAdminClient),
)
@mock.patch.object(
- BigtableTableAdminAsyncClient,
+ BaseBigtableTableAdminAsyncClient,
"_DEFAULT_ENDPOINT_TEMPLATE",
- modify_default_endpoint_template(BigtableTableAdminAsyncClient),
+ modify_default_endpoint_template(BaseBigtableTableAdminAsyncClient),
)
def test__get_api_endpoint():
api_override = "foo.com"
mock_client_cert_source = mock.Mock()
- default_universe = BigtableTableAdminClient._DEFAULT_UNIVERSE
- default_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ default_universe = BaseBigtableTableAdminClient._DEFAULT_UNIVERSE
+ default_endpoint = BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(
UNIVERSE_DOMAIN=default_universe
)
mock_universe = "bar.com"
- mock_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ mock_endpoint = BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(
UNIVERSE_DOMAIN=mock_universe
)
assert (
- BigtableTableAdminClient._get_api_endpoint(
+ BaseBigtableTableAdminClient._get_api_endpoint(
api_override, mock_client_cert_source, default_universe, "always"
)
== api_override
)
assert (
- BigtableTableAdminClient._get_api_endpoint(
+ BaseBigtableTableAdminClient._get_api_endpoint(
None, mock_client_cert_source, default_universe, "auto"
)
- == BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT
+ == BaseBigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT
)
assert (
- BigtableTableAdminClient._get_api_endpoint(None, None, default_universe, "auto")
+ BaseBigtableTableAdminClient._get_api_endpoint(
+ None, None, default_universe, "auto"
+ )
== default_endpoint
)
assert (
- BigtableTableAdminClient._get_api_endpoint(
+ BaseBigtableTableAdminClient._get_api_endpoint(
None, None, default_universe, "always"
)
- == BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT
+ == BaseBigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT
)
assert (
- BigtableTableAdminClient._get_api_endpoint(
+ BaseBigtableTableAdminClient._get_api_endpoint(
None, mock_client_cert_source, default_universe, "always"
)
- == BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT
+ == BaseBigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT
)
assert (
- BigtableTableAdminClient._get_api_endpoint(None, None, mock_universe, "never")
+ BaseBigtableTableAdminClient._get_api_endpoint(
+ None, None, mock_universe, "never"
+ )
== mock_endpoint
)
assert (
- BigtableTableAdminClient._get_api_endpoint(
+ BaseBigtableTableAdminClient._get_api_endpoint(
None, None, default_universe, "never"
)
== default_endpoint
)
with pytest.raises(MutualTLSChannelError) as excinfo:
- BigtableTableAdminClient._get_api_endpoint(
+ BaseBigtableTableAdminClient._get_api_endpoint(
None, mock_client_cert_source, mock_universe, "auto"
)
assert (
@@ -342,22 +454,22 @@ def test__get_universe_domain():
universe_domain_env = "bar.com"
assert (
- BigtableTableAdminClient._get_universe_domain(
+ BaseBigtableTableAdminClient._get_universe_domain(
client_universe_domain, universe_domain_env
)
== client_universe_domain
)
assert (
- BigtableTableAdminClient._get_universe_domain(None, universe_domain_env)
+ BaseBigtableTableAdminClient._get_universe_domain(None, universe_domain_env)
== universe_domain_env
)
assert (
- BigtableTableAdminClient._get_universe_domain(None, None)
- == BigtableTableAdminClient._DEFAULT_UNIVERSE
+ BaseBigtableTableAdminClient._get_universe_domain(None, None)
+ == BaseBigtableTableAdminClient._DEFAULT_UNIVERSE
)
with pytest.raises(ValueError) as excinfo:
- BigtableTableAdminClient._get_universe_domain("", None)
+ BaseBigtableTableAdminClient._get_universe_domain("", None)
assert str(excinfo.value) == "Universe Domain cannot be an empty string."
@@ -377,7 +489,7 @@ def test__get_universe_domain():
def test__add_cred_info_for_auth_errors(error_code, cred_info_json, show_cred_info):
cred = mock.Mock(["get_cred_info"])
cred.get_cred_info = mock.Mock(return_value=cred_info_json)
- client = BigtableTableAdminClient(credentials=cred)
+ client = BaseBigtableTableAdminClient(credentials=cred)
client._transport._credentials = cred
error = core_exceptions.GoogleAPICallError("message", details=["foo"])
@@ -394,7 +506,7 @@ def test__add_cred_info_for_auth_errors(error_code, cred_info_json, show_cred_in
def test__add_cred_info_for_auth_errors_no_get_cred_info(error_code):
cred = mock.Mock([])
assert not hasattr(cred, "get_cred_info")
- client = BigtableTableAdminClient(credentials=cred)
+ client = BaseBigtableTableAdminClient(credentials=cred)
client._transport._credentials = cred
error = core_exceptions.GoogleAPICallError("message", details=[])
@@ -407,12 +519,12 @@ def test__add_cred_info_for_auth_errors_no_get_cred_info(error_code):
@pytest.mark.parametrize(
"client_class,transport_name",
[
- (BigtableTableAdminClient, "grpc"),
- (BigtableTableAdminAsyncClient, "grpc_asyncio"),
- (BigtableTableAdminClient, "rest"),
+ (BaseBigtableTableAdminClient, "grpc"),
+ (BaseBigtableTableAdminAsyncClient, "grpc_asyncio"),
+ (BaseBigtableTableAdminClient, "rest"),
],
)
-def test_bigtable_table_admin_client_from_service_account_info(
+def test_base_bigtable_table_admin_client_from_service_account_info(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
@@ -440,7 +552,7 @@ def test_bigtable_table_admin_client_from_service_account_info(
(transports.BigtableTableAdminRestTransport, "rest"),
],
)
-def test_bigtable_table_admin_client_service_account_always_use_jwt(
+def test_base_bigtable_table_admin_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
@@ -461,12 +573,12 @@ def test_bigtable_table_admin_client_service_account_always_use_jwt(
@pytest.mark.parametrize(
"client_class,transport_name",
[
- (BigtableTableAdminClient, "grpc"),
- (BigtableTableAdminAsyncClient, "grpc_asyncio"),
- (BigtableTableAdminClient, "rest"),
+ (BaseBigtableTableAdminClient, "grpc"),
+ (BaseBigtableTableAdminAsyncClient, "grpc_asyncio"),
+ (BaseBigtableTableAdminClient, "rest"),
],
)
-def test_bigtable_table_admin_client_from_service_account_file(
+def test_base_bigtable_table_admin_client_from_service_account_file(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
@@ -493,51 +605,59 @@ def test_bigtable_table_admin_client_from_service_account_file(
)
-def test_bigtable_table_admin_client_get_transport_class():
- transport = BigtableTableAdminClient.get_transport_class()
+def test_base_bigtable_table_admin_client_get_transport_class():
+ transport = BaseBigtableTableAdminClient.get_transport_class()
available_transports = [
transports.BigtableTableAdminGrpcTransport,
transports.BigtableTableAdminRestTransport,
]
assert transport in available_transports
- transport = BigtableTableAdminClient.get_transport_class("grpc")
+ transport = BaseBigtableTableAdminClient.get_transport_class("grpc")
assert transport == transports.BigtableTableAdminGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
- (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"),
(
- BigtableTableAdminAsyncClient,
+ BaseBigtableTableAdminClient,
+ transports.BigtableTableAdminGrpcTransport,
+ "grpc",
+ ),
+ (
+ BaseBigtableTableAdminAsyncClient,
transports.BigtableTableAdminGrpcAsyncIOTransport,
"grpc_asyncio",
),
- (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest"),
+ (
+ BaseBigtableTableAdminClient,
+ transports.BigtableTableAdminRestTransport,
+ "rest",
+ ),
],
)
@mock.patch.object(
- BigtableTableAdminClient,
+ BaseBigtableTableAdminClient,
"_DEFAULT_ENDPOINT_TEMPLATE",
- modify_default_endpoint_template(BigtableTableAdminClient),
+ modify_default_endpoint_template(BaseBigtableTableAdminClient),
)
@mock.patch.object(
- BigtableTableAdminAsyncClient,
+ BaseBigtableTableAdminAsyncClient,
"_DEFAULT_ENDPOINT_TEMPLATE",
- modify_default_endpoint_template(BigtableTableAdminAsyncClient),
+ modify_default_endpoint_template(BaseBigtableTableAdminAsyncClient),
)
-def test_bigtable_table_admin_client_client_options(
+def test_base_bigtable_table_admin_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
- with mock.patch.object(BigtableTableAdminClient, "get_transport_class") as gtc:
+ with mock.patch.object(BaseBigtableTableAdminClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
- with mock.patch.object(BigtableTableAdminClient, "get_transport_class") as gtc:
+ with mock.patch.object(BaseBigtableTableAdminClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
@@ -606,17 +726,6 @@ def test_bigtable_table_admin_client_client_options(
== "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
- # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
- with mock.patch.dict(
- os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
- ):
- with pytest.raises(ValueError) as excinfo:
- client = client_class(transport=transport_name)
- assert (
- str(excinfo.value)
- == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
-
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
@@ -661,37 +770,37 @@ def test_bigtable_table_admin_client_client_options(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
- BigtableTableAdminClient,
+ BaseBigtableTableAdminClient,
transports.BigtableTableAdminGrpcTransport,
"grpc",
"true",
),
(
- BigtableTableAdminAsyncClient,
+ BaseBigtableTableAdminAsyncClient,
transports.BigtableTableAdminGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
- BigtableTableAdminClient,
+ BaseBigtableTableAdminClient,
transports.BigtableTableAdminGrpcTransport,
"grpc",
"false",
),
(
- BigtableTableAdminAsyncClient,
+ BaseBigtableTableAdminAsyncClient,
transports.BigtableTableAdminGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
(
- BigtableTableAdminClient,
+ BaseBigtableTableAdminClient,
transports.BigtableTableAdminRestTransport,
"rest",
"true",
),
(
- BigtableTableAdminClient,
+ BaseBigtableTableAdminClient,
transports.BigtableTableAdminRestTransport,
"rest",
"false",
@@ -699,17 +808,17 @@ def test_bigtable_table_admin_client_client_options(
],
)
@mock.patch.object(
- BigtableTableAdminClient,
+ BaseBigtableTableAdminClient,
"_DEFAULT_ENDPOINT_TEMPLATE",
- modify_default_endpoint_template(BigtableTableAdminClient),
+ modify_default_endpoint_template(BaseBigtableTableAdminClient),
)
@mock.patch.object(
- BigtableTableAdminAsyncClient,
+ BaseBigtableTableAdminAsyncClient,
"_DEFAULT_ENDPOINT_TEMPLATE",
- modify_default_endpoint_template(BigtableTableAdminAsyncClient),
+ modify_default_endpoint_template(BaseBigtableTableAdminAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
-def test_bigtable_table_admin_client_mtls_env_auto(
+def test_base_bigtable_table_admin_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
@@ -812,19 +921,21 @@ def test_bigtable_table_admin_client_mtls_env_auto(
@pytest.mark.parametrize(
- "client_class", [BigtableTableAdminClient, BigtableTableAdminAsyncClient]
+ "client_class", [BaseBigtableTableAdminClient, BaseBigtableTableAdminAsyncClient]
)
@mock.patch.object(
- BigtableTableAdminClient,
+ BaseBigtableTableAdminClient,
"DEFAULT_ENDPOINT",
- modify_default_endpoint(BigtableTableAdminClient),
+ modify_default_endpoint(BaseBigtableTableAdminClient),
)
@mock.patch.object(
- BigtableTableAdminAsyncClient,
+ BaseBigtableTableAdminAsyncClient,
"DEFAULT_ENDPOINT",
- modify_default_endpoint(BigtableTableAdminAsyncClient),
+ modify_default_endpoint(BaseBigtableTableAdminAsyncClient),
)
-def test_bigtable_table_admin_client_get_mtls_endpoint_and_cert_source(client_class):
+def test_base_bigtable_table_admin_client_get_mtls_endpoint_and_cert_source(
+ client_class,
+):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
@@ -852,6 +963,119 @@ def test_bigtable_table_admin_client_get_mtls_endpoint_and_cert_source(client_cl
assert api_endpoint == mock_api_endpoint
assert cert_source is None
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "Unsupported".
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ mock_client_cert_source = mock.Mock()
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source,
+ api_endpoint=mock_api_endpoint,
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is None
+
+ # Test cases for mTLS enablement when GOOGLE_API_USE_CLIENT_CERTIFICATE is unset.
+ test_cases = [
+ (
+ # With workloads present in config, mTLS is enabled.
+ {
+ "version": 1,
+ "cert_configs": {
+ "workload": {
+ "cert_path": "path/to/cert/file",
+ "key_path": "path/to/key/file",
+ }
+ },
+ },
+ mock_client_cert_source,
+ ),
+ (
+ # With workloads not present in config, mTLS is disabled.
+ {
+ "version": 1,
+ "cert_configs": {},
+ },
+ None,
+ ),
+ ]
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ for config_data, expected_cert_source in test_cases:
+ env = os.environ.copy()
+ env.pop("GOOGLE_API_USE_CLIENT_CERTIFICATE", None)
+ with mock.patch.dict(os.environ, env, clear=True):
+ config_filename = "mock_certificate_config.json"
+ config_file_content = json.dumps(config_data)
+ m = mock.mock_open(read_data=config_file_content)
+ with mock.patch("builtins.open", m):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": config_filename}
+ ):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source,
+ api_endpoint=mock_api_endpoint,
+ )
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source(options)
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is expected_cert_source
+
+ # Test cases for mTLS enablement when GOOGLE_API_USE_CLIENT_CERTIFICATE is unset(empty).
+ test_cases = [
+ (
+ # With workloads present in config, mTLS is enabled.
+ {
+ "version": 1,
+ "cert_configs": {
+ "workload": {
+ "cert_path": "path/to/cert/file",
+ "key_path": "path/to/key/file",
+ }
+ },
+ },
+ mock_client_cert_source,
+ ),
+ (
+ # With workloads not present in config, mTLS is disabled.
+ {
+ "version": 1,
+ "cert_configs": {},
+ },
+ None,
+ ),
+ ]
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ for config_data, expected_cert_source in test_cases:
+ env = os.environ.copy()
+ env.pop("GOOGLE_API_USE_CLIENT_CERTIFICATE", "")
+ with mock.patch.dict(os.environ, env, clear=True):
+ config_filename = "mock_certificate_config.json"
+ config_file_content = json.dumps(config_data)
+ m = mock.mock_open(read_data=config_file_content)
+ with mock.patch("builtins.open", m):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": config_filename}
+ ):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source,
+ api_endpoint=mock_api_endpoint,
+ )
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source(options)
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is expected_cert_source
+
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
@@ -902,41 +1126,29 @@ def test_bigtable_table_admin_client_get_mtls_endpoint_and_cert_source(client_cl
== "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
- # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
- with mock.patch.dict(
- os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
- ):
- with pytest.raises(ValueError) as excinfo:
- client_class.get_mtls_endpoint_and_cert_source()
-
- assert (
- str(excinfo.value)
- == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
-
@pytest.mark.parametrize(
- "client_class", [BigtableTableAdminClient, BigtableTableAdminAsyncClient]
+ "client_class", [BaseBigtableTableAdminClient, BaseBigtableTableAdminAsyncClient]
)
@mock.patch.object(
- BigtableTableAdminClient,
+ BaseBigtableTableAdminClient,
"_DEFAULT_ENDPOINT_TEMPLATE",
- modify_default_endpoint_template(BigtableTableAdminClient),
+ modify_default_endpoint_template(BaseBigtableTableAdminClient),
)
@mock.patch.object(
- BigtableTableAdminAsyncClient,
+ BaseBigtableTableAdminAsyncClient,
"_DEFAULT_ENDPOINT_TEMPLATE",
- modify_default_endpoint_template(BigtableTableAdminAsyncClient),
+ modify_default_endpoint_template(BaseBigtableTableAdminAsyncClient),
)
-def test_bigtable_table_admin_client_client_api_endpoint(client_class):
+def test_base_bigtable_table_admin_client_client_api_endpoint(client_class):
mock_client_cert_source = client_cert_source_callback
api_override = "foo.com"
- default_universe = BigtableTableAdminClient._DEFAULT_UNIVERSE
- default_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ default_universe = BaseBigtableTableAdminClient._DEFAULT_UNIVERSE
+ default_endpoint = BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(
UNIVERSE_DOMAIN=default_universe
)
mock_universe = "bar.com"
- mock_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(
+ mock_endpoint = BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(
UNIVERSE_DOMAIN=mock_universe
)
@@ -1004,16 +1216,24 @@ def test_bigtable_table_admin_client_client_api_endpoint(client_class):
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
- (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"),
(
- BigtableTableAdminAsyncClient,
+ BaseBigtableTableAdminClient,
+ transports.BigtableTableAdminGrpcTransport,
+ "grpc",
+ ),
+ (
+ BaseBigtableTableAdminAsyncClient,
transports.BigtableTableAdminGrpcAsyncIOTransport,
"grpc_asyncio",
),
- (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest"),
+ (
+ BaseBigtableTableAdminClient,
+ transports.BigtableTableAdminRestTransport,
+ "rest",
+ ),
],
)
-def test_bigtable_table_admin_client_client_options_scopes(
+def test_base_bigtable_table_admin_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
@@ -1042,26 +1262,26 @@ def test_bigtable_table_admin_client_client_options_scopes(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
- BigtableTableAdminClient,
+ BaseBigtableTableAdminClient,
transports.BigtableTableAdminGrpcTransport,
"grpc",
grpc_helpers,
),
(
- BigtableTableAdminAsyncClient,
+ BaseBigtableTableAdminAsyncClient,
transports.BigtableTableAdminGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
(
- BigtableTableAdminClient,
+ BaseBigtableTableAdminClient,
transports.BigtableTableAdminRestTransport,
"rest",
None,
),
],
)
-def test_bigtable_table_admin_client_client_options_credentials_file(
+def test_base_bigtable_table_admin_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
@@ -1085,12 +1305,12 @@ def test_bigtable_table_admin_client_client_options_credentials_file(
)
-def test_bigtable_table_admin_client_client_options_from_dict():
+def test_base_bigtable_table_admin_client_client_options_from_dict():
with mock.patch(
"google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
@@ -1110,20 +1330,20 @@ def test_bigtable_table_admin_client_client_options_from_dict():
"client_class,transport_class,transport_name,grpc_helpers",
[
(
- BigtableTableAdminClient,
+ BaseBigtableTableAdminClient,
transports.BigtableTableAdminGrpcTransport,
"grpc",
grpc_helpers,
),
(
- BigtableTableAdminAsyncClient,
+ BaseBigtableTableAdminAsyncClient,
transports.BigtableTableAdminGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
-def test_bigtable_table_admin_client_create_channel_credentials_file(
+def test_base_bigtable_table_admin_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
@@ -1190,7 +1410,7 @@ def test_bigtable_table_admin_client_create_channel_credentials_file(
],
)
def test_create_table(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -1225,7 +1445,7 @@ def test_create_table(request_type, transport: str = "grpc"):
def test_create_table_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -1256,7 +1476,7 @@ def test_create_table_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -1294,7 +1514,7 @@ async def test_create_table_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -1334,7 +1554,7 @@ async def test_create_table_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.CreateTableRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -1374,7 +1594,7 @@ async def test_create_table_async_from_dict():
def test_create_table_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -1404,7 +1624,7 @@ def test_create_table_field_headers():
@pytest.mark.asyncio
async def test_create_table_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -1433,7 +1653,7 @@ async def test_create_table_field_headers_async():
def test_create_table_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -1465,7 +1685,7 @@ def test_create_table_flattened():
def test_create_table_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -1482,7 +1702,7 @@ def test_create_table_flattened_error():
@pytest.mark.asyncio
async def test_create_table_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -1517,7 +1737,7 @@ async def test_create_table_flattened_async():
@pytest.mark.asyncio
async def test_create_table_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -1540,7 +1760,7 @@ async def test_create_table_flattened_error_async():
],
)
def test_create_table_from_snapshot(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -1570,7 +1790,7 @@ def test_create_table_from_snapshot(request_type, transport: str = "grpc"):
def test_create_table_from_snapshot_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -1605,7 +1825,7 @@ def test_create_table_from_snapshot_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -1653,7 +1873,7 @@ async def test_create_table_from_snapshot_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -1698,7 +1918,7 @@ async def test_create_table_from_snapshot_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.CreateTableFromSnapshotRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -1733,7 +1953,7 @@ async def test_create_table_from_snapshot_async_from_dict():
def test_create_table_from_snapshot_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -1765,7 +1985,7 @@ def test_create_table_from_snapshot_field_headers():
@pytest.mark.asyncio
async def test_create_table_from_snapshot_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -1798,7 +2018,7 @@ async def test_create_table_from_snapshot_field_headers_async():
def test_create_table_from_snapshot_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -1832,7 +2052,7 @@ def test_create_table_from_snapshot_flattened():
def test_create_table_from_snapshot_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -1849,7 +2069,7 @@ def test_create_table_from_snapshot_flattened_error():
@pytest.mark.asyncio
async def test_create_table_from_snapshot_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -1888,7 +2108,7 @@ async def test_create_table_from_snapshot_flattened_async():
@pytest.mark.asyncio
async def test_create_table_from_snapshot_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -1911,7 +2131,7 @@ async def test_create_table_from_snapshot_flattened_error_async():
],
)
def test_list_tables(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -1942,7 +2162,7 @@ def test_list_tables(request_type, transport: str = "grpc"):
def test_list_tables_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -1973,7 +2193,7 @@ def test_list_tables_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -2011,7 +2231,7 @@ async def test_list_tables_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -2050,7 +2270,7 @@ async def test_list_tables_async_use_cached_wrapped_rpc(
async def test_list_tables_async(
transport: str = "grpc_asyncio", request_type=bigtable_table_admin.ListTablesRequest
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -2086,7 +2306,7 @@ async def test_list_tables_async_from_dict():
def test_list_tables_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -2116,7 +2336,7 @@ def test_list_tables_field_headers():
@pytest.mark.asyncio
async def test_list_tables_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -2147,7 +2367,7 @@ async def test_list_tables_field_headers_async():
def test_list_tables_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -2171,7 +2391,7 @@ def test_list_tables_flattened():
def test_list_tables_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -2186,7 +2406,7 @@ def test_list_tables_flattened_error():
@pytest.mark.asyncio
async def test_list_tables_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -2215,7 +2435,7 @@ async def test_list_tables_flattened_async():
@pytest.mark.asyncio
async def test_list_tables_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -2229,7 +2449,7 @@ async def test_list_tables_flattened_error_async():
def test_list_tables_pager(transport_name: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport_name,
)
@@ -2283,7 +2503,7 @@ def test_list_tables_pager(transport_name: str = "grpc"):
def test_list_tables_pages(transport_name: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport_name,
)
@@ -2325,7 +2545,7 @@ def test_list_tables_pages(transport_name: str = "grpc"):
@pytest.mark.asyncio
async def test_list_tables_async_pager():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -2375,7 +2595,7 @@ async def test_list_tables_async_pager():
@pytest.mark.asyncio
async def test_list_tables_async_pages():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -2430,7 +2650,7 @@ async def test_list_tables_async_pages():
],
)
def test_get_table(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -2465,7 +2685,7 @@ def test_get_table(request_type, transport: str = "grpc"):
def test_get_table_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -2494,7 +2714,7 @@ def test_get_table_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -2530,7 +2750,7 @@ async def test_get_table_async_use_cached_wrapped_rpc(transport: str = "grpc_asy
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -2569,7 +2789,7 @@ async def test_get_table_async_use_cached_wrapped_rpc(transport: str = "grpc_asy
async def test_get_table_async(
transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetTableRequest
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -2609,7 +2829,7 @@ async def test_get_table_async_from_dict():
def test_get_table_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -2639,7 +2859,7 @@ def test_get_table_field_headers():
@pytest.mark.asyncio
async def test_get_table_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -2668,7 +2888,7 @@ async def test_get_table_field_headers_async():
def test_get_table_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -2692,7 +2912,7 @@ def test_get_table_flattened():
def test_get_table_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -2707,7 +2927,7 @@ def test_get_table_flattened_error():
@pytest.mark.asyncio
async def test_get_table_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -2734,7 +2954,7 @@ async def test_get_table_flattened_async():
@pytest.mark.asyncio
async def test_get_table_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -2755,7 +2975,7 @@ async def test_get_table_flattened_error_async():
],
)
def test_update_table(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -2783,7 +3003,7 @@ def test_update_table(request_type, transport: str = "grpc"):
def test_update_table_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -2808,7 +3028,7 @@ def test_update_table_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -2851,7 +3071,7 @@ async def test_update_table_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -2896,7 +3116,7 @@ async def test_update_table_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.UpdateTableRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -2929,7 +3149,7 @@ async def test_update_table_async_from_dict():
def test_update_table_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -2959,7 +3179,7 @@ def test_update_table_field_headers():
@pytest.mark.asyncio
async def test_update_table_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -2990,7 +3210,7 @@ async def test_update_table_field_headers_async():
def test_update_table_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -3018,7 +3238,7 @@ def test_update_table_flattened():
def test_update_table_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -3034,7 +3254,7 @@ def test_update_table_flattened_error():
@pytest.mark.asyncio
async def test_update_table_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -3067,7 +3287,7 @@ async def test_update_table_flattened_async():
@pytest.mark.asyncio
async def test_update_table_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -3089,7 +3309,7 @@ async def test_update_table_flattened_error_async():
],
)
def test_delete_table(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -3117,7 +3337,7 @@ def test_delete_table(request_type, transport: str = "grpc"):
def test_delete_table_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -3146,7 +3366,7 @@ def test_delete_table_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -3184,7 +3404,7 @@ async def test_delete_table_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -3224,7 +3444,7 @@ async def test_delete_table_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.DeleteTableRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -3255,7 +3475,7 @@ async def test_delete_table_async_from_dict():
def test_delete_table_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -3285,7 +3505,7 @@ def test_delete_table_field_headers():
@pytest.mark.asyncio
async def test_delete_table_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -3314,7 +3534,7 @@ async def test_delete_table_field_headers_async():
def test_delete_table_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -3338,7 +3558,7 @@ def test_delete_table_flattened():
def test_delete_table_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -3353,7 +3573,7 @@ def test_delete_table_flattened_error():
@pytest.mark.asyncio
async def test_delete_table_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -3380,7 +3600,7 @@ async def test_delete_table_flattened_async():
@pytest.mark.asyncio
async def test_delete_table_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -3401,7 +3621,7 @@ async def test_delete_table_flattened_error_async():
],
)
def test_undelete_table(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -3429,7 +3649,7 @@ def test_undelete_table(request_type, transport: str = "grpc"):
def test_undelete_table_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -3458,7 +3678,7 @@ def test_undelete_table_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -3501,7 +3721,7 @@ async def test_undelete_table_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -3546,7 +3766,7 @@ async def test_undelete_table_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.UndeleteTableRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -3579,7 +3799,7 @@ async def test_undelete_table_async_from_dict():
def test_undelete_table_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -3609,7 +3829,7 @@ def test_undelete_table_field_headers():
@pytest.mark.asyncio
async def test_undelete_table_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -3640,7 +3860,7 @@ async def test_undelete_table_field_headers_async():
def test_undelete_table_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -3664,7 +3884,7 @@ def test_undelete_table_flattened():
def test_undelete_table_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -3679,7 +3899,7 @@ def test_undelete_table_flattened_error():
@pytest.mark.asyncio
async def test_undelete_table_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -3708,7 +3928,7 @@ async def test_undelete_table_flattened_async():
@pytest.mark.asyncio
async def test_undelete_table_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -3729,7 +3949,7 @@ async def test_undelete_table_flattened_error_async():
],
)
def test_create_authorized_view(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -3759,7 +3979,7 @@ def test_create_authorized_view(request_type, transport: str = "grpc"):
def test_create_authorized_view_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -3792,7 +4012,7 @@ def test_create_authorized_view_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -3840,7 +4060,7 @@ async def test_create_authorized_view_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -3885,7 +4105,7 @@ async def test_create_authorized_view_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.CreateAuthorizedViewRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -3920,7 +4140,7 @@ async def test_create_authorized_view_async_from_dict():
def test_create_authorized_view_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -3952,7 +4172,7 @@ def test_create_authorized_view_field_headers():
@pytest.mark.asyncio
async def test_create_authorized_view_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -3985,7 +4205,7 @@ async def test_create_authorized_view_field_headers_async():
def test_create_authorized_view_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -4019,7 +4239,7 @@ def test_create_authorized_view_flattened():
def test_create_authorized_view_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -4036,7 +4256,7 @@ def test_create_authorized_view_flattened_error():
@pytest.mark.asyncio
async def test_create_authorized_view_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -4075,7 +4295,7 @@ async def test_create_authorized_view_flattened_async():
@pytest.mark.asyncio
async def test_create_authorized_view_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -4098,7 +4318,7 @@ async def test_create_authorized_view_flattened_error_async():
],
)
def test_list_authorized_views(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -4131,7 +4351,7 @@ def test_list_authorized_views(request_type, transport: str = "grpc"):
def test_list_authorized_views_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -4164,7 +4384,7 @@ def test_list_authorized_views_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -4207,7 +4427,7 @@ async def test_list_authorized_views_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -4247,7 +4467,7 @@ async def test_list_authorized_views_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.ListAuthorizedViewsRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -4285,7 +4505,7 @@ async def test_list_authorized_views_async_from_dict():
def test_list_authorized_views_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -4317,7 +4537,7 @@ def test_list_authorized_views_field_headers():
@pytest.mark.asyncio
async def test_list_authorized_views_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -4350,7 +4570,7 @@ async def test_list_authorized_views_field_headers_async():
def test_list_authorized_views_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -4376,7 +4596,7 @@ def test_list_authorized_views_flattened():
def test_list_authorized_views_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -4391,7 +4611,7 @@ def test_list_authorized_views_flattened_error():
@pytest.mark.asyncio
async def test_list_authorized_views_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -4422,7 +4642,7 @@ async def test_list_authorized_views_flattened_async():
@pytest.mark.asyncio
async def test_list_authorized_views_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -4436,7 +4656,7 @@ async def test_list_authorized_views_flattened_error_async():
def test_list_authorized_views_pager(transport_name: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport_name,
)
@@ -4492,7 +4712,7 @@ def test_list_authorized_views_pager(transport_name: str = "grpc"):
def test_list_authorized_views_pages(transport_name: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport_name,
)
@@ -4536,7 +4756,7 @@ def test_list_authorized_views_pages(transport_name: str = "grpc"):
@pytest.mark.asyncio
async def test_list_authorized_views_async_pager():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -4588,7 +4808,7 @@ async def test_list_authorized_views_async_pager():
@pytest.mark.asyncio
async def test_list_authorized_views_async_pages():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -4645,7 +4865,7 @@ async def test_list_authorized_views_async_pages():
],
)
def test_get_authorized_view(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -4682,7 +4902,7 @@ def test_get_authorized_view(request_type, transport: str = "grpc"):
def test_get_authorized_view_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -4713,7 +4933,7 @@ def test_get_authorized_view_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -4755,7 +4975,7 @@ async def test_get_authorized_view_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -4795,7 +5015,7 @@ async def test_get_authorized_view_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.GetAuthorizedViewRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -4837,7 +5057,7 @@ async def test_get_authorized_view_async_from_dict():
def test_get_authorized_view_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -4869,7 +5089,7 @@ def test_get_authorized_view_field_headers():
@pytest.mark.asyncio
async def test_get_authorized_view_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -4902,7 +5122,7 @@ async def test_get_authorized_view_field_headers_async():
def test_get_authorized_view_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -4928,7 +5148,7 @@ def test_get_authorized_view_flattened():
def test_get_authorized_view_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -4943,7 +5163,7 @@ def test_get_authorized_view_flattened_error():
@pytest.mark.asyncio
async def test_get_authorized_view_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -4974,7 +5194,7 @@ async def test_get_authorized_view_flattened_async():
@pytest.mark.asyncio
async def test_get_authorized_view_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -4995,7 +5215,7 @@ async def test_get_authorized_view_flattened_error_async():
],
)
def test_update_authorized_view(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -5025,7 +5245,7 @@ def test_update_authorized_view(request_type, transport: str = "grpc"):
def test_update_authorized_view_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -5052,7 +5272,7 @@ def test_update_authorized_view_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -5100,7 +5320,7 @@ async def test_update_authorized_view_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -5145,7 +5365,7 @@ async def test_update_authorized_view_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.UpdateAuthorizedViewRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -5180,7 +5400,7 @@ async def test_update_authorized_view_async_from_dict():
def test_update_authorized_view_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -5212,7 +5432,7 @@ def test_update_authorized_view_field_headers():
@pytest.mark.asyncio
async def test_update_authorized_view_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -5245,7 +5465,7 @@ async def test_update_authorized_view_field_headers_async():
def test_update_authorized_view_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -5275,7 +5495,7 @@ def test_update_authorized_view_flattened():
def test_update_authorized_view_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -5291,7 +5511,7 @@ def test_update_authorized_view_flattened_error():
@pytest.mark.asyncio
async def test_update_authorized_view_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -5326,7 +5546,7 @@ async def test_update_authorized_view_flattened_async():
@pytest.mark.asyncio
async def test_update_authorized_view_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -5348,7 +5568,7 @@ async def test_update_authorized_view_flattened_error_async():
],
)
def test_delete_authorized_view(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -5378,7 +5598,7 @@ def test_delete_authorized_view(request_type, transport: str = "grpc"):
def test_delete_authorized_view_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -5411,7 +5631,7 @@ def test_delete_authorized_view_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -5454,7 +5674,7 @@ async def test_delete_authorized_view_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -5494,7 +5714,7 @@ async def test_delete_authorized_view_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.DeleteAuthorizedViewRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -5527,7 +5747,7 @@ async def test_delete_authorized_view_async_from_dict():
def test_delete_authorized_view_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -5559,7 +5779,7 @@ def test_delete_authorized_view_field_headers():
@pytest.mark.asyncio
async def test_delete_authorized_view_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -5590,7 +5810,7 @@ async def test_delete_authorized_view_field_headers_async():
def test_delete_authorized_view_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -5616,7 +5836,7 @@ def test_delete_authorized_view_flattened():
def test_delete_authorized_view_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -5631,7 +5851,7 @@ def test_delete_authorized_view_flattened_error():
@pytest.mark.asyncio
async def test_delete_authorized_view_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -5660,7 +5880,7 @@ async def test_delete_authorized_view_flattened_async():
@pytest.mark.asyncio
async def test_delete_authorized_view_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -5681,7 +5901,7 @@ async def test_delete_authorized_view_flattened_error_async():
],
)
def test_modify_column_families(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -5718,7 +5938,7 @@ def test_modify_column_families(request_type, transport: str = "grpc"):
def test_modify_column_families_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -5749,7 +5969,7 @@ def test_modify_column_families_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -5792,7 +6012,7 @@ async def test_modify_column_families_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -5832,7 +6052,7 @@ async def test_modify_column_families_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.ModifyColumnFamiliesRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -5874,7 +6094,7 @@ async def test_modify_column_families_async_from_dict():
def test_modify_column_families_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -5906,7 +6126,7 @@ def test_modify_column_families_field_headers():
@pytest.mark.asyncio
async def test_modify_column_families_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -5937,7 +6157,7 @@ async def test_modify_column_families_field_headers_async():
def test_modify_column_families_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -5973,7 +6193,7 @@ def test_modify_column_families_flattened():
def test_modify_column_families_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -5993,7 +6213,7 @@ def test_modify_column_families_flattened_error():
@pytest.mark.asyncio
async def test_modify_column_families_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -6032,7 +6252,7 @@ async def test_modify_column_families_flattened_async():
@pytest.mark.asyncio
async def test_modify_column_families_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -6058,7 +6278,7 @@ async def test_modify_column_families_flattened_error_async():
],
)
def test_drop_row_range(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -6086,7 +6306,7 @@ def test_drop_row_range(request_type, transport: str = "grpc"):
def test_drop_row_range_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -6115,7 +6335,7 @@ def test_drop_row_range_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -6153,7 +6373,7 @@ async def test_drop_row_range_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -6193,7 +6413,7 @@ async def test_drop_row_range_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.DropRowRangeRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -6224,7 +6444,7 @@ async def test_drop_row_range_async_from_dict():
def test_drop_row_range_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -6254,7 +6474,7 @@ def test_drop_row_range_field_headers():
@pytest.mark.asyncio
async def test_drop_row_range_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -6290,7 +6510,7 @@ async def test_drop_row_range_field_headers_async():
],
)
def test_generate_consistency_token(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -6323,7 +6543,7 @@ def test_generate_consistency_token(request_type, transport: str = "grpc"):
def test_generate_consistency_token_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -6354,7 +6574,7 @@ def test_generate_consistency_token_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -6397,7 +6617,7 @@ async def test_generate_consistency_token_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -6437,7 +6657,7 @@ async def test_generate_consistency_token_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.GenerateConsistencyTokenRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -6475,7 +6695,7 @@ async def test_generate_consistency_token_async_from_dict():
def test_generate_consistency_token_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -6507,7 +6727,7 @@ def test_generate_consistency_token_field_headers():
@pytest.mark.asyncio
async def test_generate_consistency_token_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -6540,7 +6760,7 @@ async def test_generate_consistency_token_field_headers_async():
def test_generate_consistency_token_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -6566,7 +6786,7 @@ def test_generate_consistency_token_flattened():
def test_generate_consistency_token_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -6581,7 +6801,7 @@ def test_generate_consistency_token_flattened_error():
@pytest.mark.asyncio
async def test_generate_consistency_token_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -6612,7 +6832,7 @@ async def test_generate_consistency_token_flattened_async():
@pytest.mark.asyncio
async def test_generate_consistency_token_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -6633,7 +6853,7 @@ async def test_generate_consistency_token_flattened_error_async():
],
)
def test_check_consistency(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -6666,7 +6886,7 @@ def test_check_consistency(request_type, transport: str = "grpc"):
def test_check_consistency_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -6699,7 +6919,7 @@ def test_check_consistency_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -6739,7 +6959,7 @@ async def test_check_consistency_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -6779,7 +6999,7 @@ async def test_check_consistency_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.CheckConsistencyRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -6817,7 +7037,7 @@ async def test_check_consistency_async_from_dict():
def test_check_consistency_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -6849,7 +7069,7 @@ def test_check_consistency_field_headers():
@pytest.mark.asyncio
async def test_check_consistency_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -6882,7 +7102,7 @@ async def test_check_consistency_field_headers_async():
def test_check_consistency_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -6912,7 +7132,7 @@ def test_check_consistency_flattened():
def test_check_consistency_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -6928,7 +7148,7 @@ def test_check_consistency_flattened_error():
@pytest.mark.asyncio
async def test_check_consistency_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -6963,7 +7183,7 @@ async def test_check_consistency_flattened_async():
@pytest.mark.asyncio
async def test_check_consistency_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -6985,7 +7205,7 @@ async def test_check_consistency_flattened_error_async():
],
)
def test_snapshot_table(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -7013,7 +7233,7 @@ def test_snapshot_table(request_type, transport: str = "grpc"):
def test_snapshot_table_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -7048,7 +7268,7 @@ def test_snapshot_table_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -7091,7 +7311,7 @@ async def test_snapshot_table_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -7136,7 +7356,7 @@ async def test_snapshot_table_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.SnapshotTableRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -7169,7 +7389,7 @@ async def test_snapshot_table_async_from_dict():
def test_snapshot_table_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -7199,7 +7419,7 @@ def test_snapshot_table_field_headers():
@pytest.mark.asyncio
async def test_snapshot_table_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -7230,7 +7450,7 @@ async def test_snapshot_table_field_headers_async():
def test_snapshot_table_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -7266,7 +7486,7 @@ def test_snapshot_table_flattened():
def test_snapshot_table_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -7284,7 +7504,7 @@ def test_snapshot_table_flattened_error():
@pytest.mark.asyncio
async def test_snapshot_table_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -7325,7 +7545,7 @@ async def test_snapshot_table_flattened_async():
@pytest.mark.asyncio
async def test_snapshot_table_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -7349,7 +7569,7 @@ async def test_snapshot_table_flattened_error_async():
],
)
def test_get_snapshot(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -7386,7 +7606,7 @@ def test_get_snapshot(request_type, transport: str = "grpc"):
def test_get_snapshot_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -7415,7 +7635,7 @@ def test_get_snapshot_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -7453,7 +7673,7 @@ async def test_get_snapshot_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -7493,7 +7713,7 @@ async def test_get_snapshot_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.GetSnapshotRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -7535,7 +7755,7 @@ async def test_get_snapshot_async_from_dict():
def test_get_snapshot_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -7565,7 +7785,7 @@ def test_get_snapshot_field_headers():
@pytest.mark.asyncio
async def test_get_snapshot_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -7594,7 +7814,7 @@ async def test_get_snapshot_field_headers_async():
def test_get_snapshot_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -7618,7 +7838,7 @@ def test_get_snapshot_flattened():
def test_get_snapshot_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -7633,7 +7853,7 @@ def test_get_snapshot_flattened_error():
@pytest.mark.asyncio
async def test_get_snapshot_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -7660,7 +7880,7 @@ async def test_get_snapshot_flattened_async():
@pytest.mark.asyncio
async def test_get_snapshot_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -7681,7 +7901,7 @@ async def test_get_snapshot_flattened_error_async():
],
)
def test_list_snapshots(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -7712,7 +7932,7 @@ def test_list_snapshots(request_type, transport: str = "grpc"):
def test_list_snapshots_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -7743,7 +7963,7 @@ def test_list_snapshots_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -7781,7 +8001,7 @@ async def test_list_snapshots_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -7821,7 +8041,7 @@ async def test_list_snapshots_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.ListSnapshotsRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -7857,7 +8077,7 @@ async def test_list_snapshots_async_from_dict():
def test_list_snapshots_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -7887,7 +8107,7 @@ def test_list_snapshots_field_headers():
@pytest.mark.asyncio
async def test_list_snapshots_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -7918,7 +8138,7 @@ async def test_list_snapshots_field_headers_async():
def test_list_snapshots_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -7942,7 +8162,7 @@ def test_list_snapshots_flattened():
def test_list_snapshots_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -7957,7 +8177,7 @@ def test_list_snapshots_flattened_error():
@pytest.mark.asyncio
async def test_list_snapshots_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -7986,7 +8206,7 @@ async def test_list_snapshots_flattened_async():
@pytest.mark.asyncio
async def test_list_snapshots_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -8000,7 +8220,7 @@ async def test_list_snapshots_flattened_error_async():
def test_list_snapshots_pager(transport_name: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport_name,
)
@@ -8054,7 +8274,7 @@ def test_list_snapshots_pager(transport_name: str = "grpc"):
def test_list_snapshots_pages(transport_name: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport_name,
)
@@ -8096,7 +8316,7 @@ def test_list_snapshots_pages(transport_name: str = "grpc"):
@pytest.mark.asyncio
async def test_list_snapshots_async_pager():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -8146,7 +8366,7 @@ async def test_list_snapshots_async_pager():
@pytest.mark.asyncio
async def test_list_snapshots_async_pages():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -8201,7 +8421,7 @@ async def test_list_snapshots_async_pages():
],
)
def test_delete_snapshot(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -8229,7 +8449,7 @@ def test_delete_snapshot(request_type, transport: str = "grpc"):
def test_delete_snapshot_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -8258,7 +8478,7 @@ def test_delete_snapshot_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -8296,7 +8516,7 @@ async def test_delete_snapshot_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -8336,7 +8556,7 @@ async def test_delete_snapshot_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.DeleteSnapshotRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -8367,7 +8587,7 @@ async def test_delete_snapshot_async_from_dict():
def test_delete_snapshot_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -8397,7 +8617,7 @@ def test_delete_snapshot_field_headers():
@pytest.mark.asyncio
async def test_delete_snapshot_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -8426,7 +8646,7 @@ async def test_delete_snapshot_field_headers_async():
def test_delete_snapshot_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -8450,7 +8670,7 @@ def test_delete_snapshot_flattened():
def test_delete_snapshot_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -8465,7 +8685,7 @@ def test_delete_snapshot_flattened_error():
@pytest.mark.asyncio
async def test_delete_snapshot_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -8492,7 +8712,7 @@ async def test_delete_snapshot_flattened_async():
@pytest.mark.asyncio
async def test_delete_snapshot_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -8513,7 +8733,7 @@ async def test_delete_snapshot_flattened_error_async():
],
)
def test_create_backup(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -8541,7 +8761,7 @@ def test_create_backup(request_type, transport: str = "grpc"):
def test_create_backup_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -8572,7 +8792,7 @@ def test_create_backup_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -8615,7 +8835,7 @@ async def test_create_backup_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -8660,7 +8880,7 @@ async def test_create_backup_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.CreateBackupRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -8693,7 +8913,7 @@ async def test_create_backup_async_from_dict():
def test_create_backup_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -8723,7 +8943,7 @@ def test_create_backup_field_headers():
@pytest.mark.asyncio
async def test_create_backup_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -8754,7 +8974,7 @@ async def test_create_backup_field_headers_async():
def test_create_backup_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -8786,7 +9006,7 @@ def test_create_backup_flattened():
def test_create_backup_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -8803,7 +9023,7 @@ def test_create_backup_flattened_error():
@pytest.mark.asyncio
async def test_create_backup_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -8840,7 +9060,7 @@ async def test_create_backup_flattened_async():
@pytest.mark.asyncio
async def test_create_backup_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -8863,7 +9083,7 @@ async def test_create_backup_flattened_error_async():
],
)
def test_get_backup(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -8904,7 +9124,7 @@ def test_get_backup(request_type, transport: str = "grpc"):
def test_get_backup_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -8933,7 +9153,7 @@ def test_get_backup_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -8969,7 +9189,7 @@ async def test_get_backup_async_use_cached_wrapped_rpc(transport: str = "grpc_as
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -9008,7 +9228,7 @@ async def test_get_backup_async_use_cached_wrapped_rpc(transport: str = "grpc_as
async def test_get_backup_async(
transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetBackupRequest
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -9054,7 +9274,7 @@ async def test_get_backup_async_from_dict():
def test_get_backup_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -9084,7 +9304,7 @@ def test_get_backup_field_headers():
@pytest.mark.asyncio
async def test_get_backup_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -9113,7 +9333,7 @@ async def test_get_backup_field_headers_async():
def test_get_backup_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -9137,7 +9357,7 @@ def test_get_backup_flattened():
def test_get_backup_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -9152,7 +9372,7 @@ def test_get_backup_flattened_error():
@pytest.mark.asyncio
async def test_get_backup_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -9179,7 +9399,7 @@ async def test_get_backup_flattened_async():
@pytest.mark.asyncio
async def test_get_backup_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -9200,7 +9420,7 @@ async def test_get_backup_flattened_error_async():
],
)
def test_update_backup(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -9241,7 +9461,7 @@ def test_update_backup(request_type, transport: str = "grpc"):
def test_update_backup_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -9266,7 +9486,7 @@ def test_update_backup_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -9304,7 +9524,7 @@ async def test_update_backup_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -9344,7 +9564,7 @@ async def test_update_backup_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.UpdateBackupRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -9390,7 +9610,7 @@ async def test_update_backup_async_from_dict():
def test_update_backup_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -9420,7 +9640,7 @@ def test_update_backup_field_headers():
@pytest.mark.asyncio
async def test_update_backup_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -9449,7 +9669,7 @@ async def test_update_backup_field_headers_async():
def test_update_backup_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -9477,7 +9697,7 @@ def test_update_backup_flattened():
def test_update_backup_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -9493,7 +9713,7 @@ def test_update_backup_flattened_error():
@pytest.mark.asyncio
async def test_update_backup_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -9524,7 +9744,7 @@ async def test_update_backup_flattened_async():
@pytest.mark.asyncio
async def test_update_backup_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -9546,7 +9766,7 @@ async def test_update_backup_flattened_error_async():
],
)
def test_delete_backup(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -9574,7 +9794,7 @@ def test_delete_backup(request_type, transport: str = "grpc"):
def test_delete_backup_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -9603,7 +9823,7 @@ def test_delete_backup_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -9641,7 +9861,7 @@ async def test_delete_backup_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -9681,7 +9901,7 @@ async def test_delete_backup_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.DeleteBackupRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -9712,7 +9932,7 @@ async def test_delete_backup_async_from_dict():
def test_delete_backup_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -9742,7 +9962,7 @@ def test_delete_backup_field_headers():
@pytest.mark.asyncio
async def test_delete_backup_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -9771,7 +9991,7 @@ async def test_delete_backup_field_headers_async():
def test_delete_backup_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -9795,7 +10015,7 @@ def test_delete_backup_flattened():
def test_delete_backup_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -9810,7 +10030,7 @@ def test_delete_backup_flattened_error():
@pytest.mark.asyncio
async def test_delete_backup_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -9837,7 +10057,7 @@ async def test_delete_backup_flattened_async():
@pytest.mark.asyncio
async def test_delete_backup_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -9858,7 +10078,7 @@ async def test_delete_backup_flattened_error_async():
],
)
def test_list_backups(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -9889,7 +10109,7 @@ def test_list_backups(request_type, transport: str = "grpc"):
def test_list_backups_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -9924,7 +10144,7 @@ def test_list_backups_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -9962,7 +10182,7 @@ async def test_list_backups_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -10002,7 +10222,7 @@ async def test_list_backups_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.ListBackupsRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -10038,7 +10258,7 @@ async def test_list_backups_async_from_dict():
def test_list_backups_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -10068,7 +10288,7 @@ def test_list_backups_field_headers():
@pytest.mark.asyncio
async def test_list_backups_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -10099,7 +10319,7 @@ async def test_list_backups_field_headers_async():
def test_list_backups_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -10123,7 +10343,7 @@ def test_list_backups_flattened():
def test_list_backups_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -10138,7 +10358,7 @@ def test_list_backups_flattened_error():
@pytest.mark.asyncio
async def test_list_backups_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -10167,7 +10387,7 @@ async def test_list_backups_flattened_async():
@pytest.mark.asyncio
async def test_list_backups_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -10181,7 +10401,7 @@ async def test_list_backups_flattened_error_async():
def test_list_backups_pager(transport_name: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport_name,
)
@@ -10235,7 +10455,7 @@ def test_list_backups_pager(transport_name: str = "grpc"):
def test_list_backups_pages(transport_name: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport_name,
)
@@ -10277,7 +10497,7 @@ def test_list_backups_pages(transport_name: str = "grpc"):
@pytest.mark.asyncio
async def test_list_backups_async_pager():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -10327,7 +10547,7 @@ async def test_list_backups_async_pager():
@pytest.mark.asyncio
async def test_list_backups_async_pages():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -10381,8 +10601,8 @@ async def test_list_backups_async_pages():
dict,
],
)
-def test_restore_table(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+def test__restore_table(request_type, transport: str = "grpc"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -10395,7 +10615,7 @@ def test_restore_table(request_type, transport: str = "grpc"):
with mock.patch.object(type(client.transport.restore_table), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
- response = client.restore_table(request)
+ response = client._restore_table(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
@@ -10407,10 +10627,10 @@ def test_restore_table(request_type, transport: str = "grpc"):
assert isinstance(response, future.Future)
-def test_restore_table_non_empty_request_with_auto_populated_field():
+def test__restore_table_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -10429,7 +10649,7 @@ def test_restore_table_non_empty_request_with_auto_populated_field():
call.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client.restore_table(request=request)
+ client._restore_table(request=request)
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_table_admin.RestoreTableRequest(
@@ -10439,11 +10659,11 @@ def test_restore_table_non_empty_request_with_auto_populated_field():
)
-def test_restore_table_use_cached_wrapped_rpc():
+def test__restore_table_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -10462,7 +10682,7 @@ def test_restore_table_use_cached_wrapped_rpc():
)
client._transport._wrapped_methods[client._transport.restore_table] = mock_rpc
request = {}
- client.restore_table(request)
+ client._restore_table(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
@@ -10472,7 +10692,7 @@ def test_restore_table_use_cached_wrapped_rpc():
# Subsequent calls should use the cached wrapper
wrapper_fn.reset_mock()
- client.restore_table(request)
+ client._restore_table(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
@@ -10480,13 +10700,13 @@ def test_restore_table_use_cached_wrapped_rpc():
@pytest.mark.asyncio
-async def test_restore_table_async_use_cached_wrapped_rpc(
+async def test__restore_table_async_use_cached_wrapped_rpc(
transport: str = "grpc_asyncio",
):
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -10509,7 +10729,7 @@ async def test_restore_table_async_use_cached_wrapped_rpc(
] = mock_rpc
request = {}
- await client.restore_table(request)
+ await client._restore_table(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
@@ -10519,7 +10739,7 @@ async def test_restore_table_async_use_cached_wrapped_rpc(
# Subsequent calls should use the cached wrapper
wrapper_fn.reset_mock()
- await client.restore_table(request)
+ await client._restore_table(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
@@ -10527,11 +10747,11 @@ async def test_restore_table_async_use_cached_wrapped_rpc(
@pytest.mark.asyncio
-async def test_restore_table_async(
+async def test__restore_table_async(
transport: str = "grpc_asyncio",
request_type=bigtable_table_admin.RestoreTableRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -10546,7 +10766,7 @@ async def test_restore_table_async(
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
- response = await client.restore_table(request)
+ response = await client._restore_table(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
@@ -10559,12 +10779,12 @@ async def test_restore_table_async(
@pytest.mark.asyncio
-async def test_restore_table_async_from_dict():
- await test_restore_table_async(request_type=dict)
+async def test__restore_table_async_from_dict():
+ await test__restore_table_async(request_type=dict)
-def test_restore_table_field_headers():
- client = BigtableTableAdminClient(
+def test__restore_table_field_headers():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -10577,7 +10797,7 @@ def test_restore_table_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.restore_table), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
- client.restore_table(request)
+ client._restore_table(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
@@ -10593,8 +10813,8 @@ def test_restore_table_field_headers():
@pytest.mark.asyncio
-async def test_restore_table_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+async def test__restore_table_field_headers_async():
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -10609,7 +10829,7 @@ async def test_restore_table_field_headers_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
- await client.restore_table(request)
+ await client._restore_table(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
@@ -10632,7 +10852,7 @@ async def test_restore_table_field_headers_async():
],
)
def test_copy_backup(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -10660,7 +10880,7 @@ def test_copy_backup(request_type, transport: str = "grpc"):
def test_copy_backup_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -10693,7 +10913,7 @@ def test_copy_backup_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -10736,7 +10956,7 @@ async def test_copy_backup_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -10780,7 +11000,7 @@ async def test_copy_backup_async_use_cached_wrapped_rpc(
async def test_copy_backup_async(
transport: str = "grpc_asyncio", request_type=bigtable_table_admin.CopyBackupRequest
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -10813,7 +11033,7 @@ async def test_copy_backup_async_from_dict():
def test_copy_backup_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -10843,7 +11063,7 @@ def test_copy_backup_field_headers():
@pytest.mark.asyncio
async def test_copy_backup_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -10874,7 +11094,7 @@ async def test_copy_backup_field_headers_async():
def test_copy_backup_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -10910,7 +11130,7 @@ def test_copy_backup_flattened():
def test_copy_backup_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -10928,7 +11148,7 @@ def test_copy_backup_flattened_error():
@pytest.mark.asyncio
async def test_copy_backup_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -10969,7 +11189,7 @@ async def test_copy_backup_flattened_async():
@pytest.mark.asyncio
async def test_copy_backup_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -10993,7 +11213,7 @@ async def test_copy_backup_flattened_error_async():
],
)
def test_get_iam_policy(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -11026,7 +11246,7 @@ def test_get_iam_policy(request_type, transport: str = "grpc"):
def test_get_iam_policy_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -11055,7 +11275,7 @@ def test_get_iam_policy_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -11093,7 +11313,7 @@ async def test_get_iam_policy_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -11132,7 +11352,7 @@ async def test_get_iam_policy_async_use_cached_wrapped_rpc(
async def test_get_iam_policy_async(
transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -11170,7 +11390,7 @@ async def test_get_iam_policy_async_from_dict():
def test_get_iam_policy_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -11200,7 +11420,7 @@ def test_get_iam_policy_field_headers():
@pytest.mark.asyncio
async def test_get_iam_policy_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -11229,7 +11449,7 @@ async def test_get_iam_policy_field_headers_async():
def test_get_iam_policy_from_dict_foreign():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -11246,7 +11466,7 @@ def test_get_iam_policy_from_dict_foreign():
def test_get_iam_policy_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -11270,7 +11490,7 @@ def test_get_iam_policy_flattened():
def test_get_iam_policy_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -11285,7 +11505,7 @@ def test_get_iam_policy_flattened_error():
@pytest.mark.asyncio
async def test_get_iam_policy_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -11312,7 +11532,7 @@ async def test_get_iam_policy_flattened_async():
@pytest.mark.asyncio
async def test_get_iam_policy_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -11333,7 +11553,7 @@ async def test_get_iam_policy_flattened_error_async():
],
)
def test_set_iam_policy(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -11366,7 +11586,7 @@ def test_set_iam_policy(request_type, transport: str = "grpc"):
def test_set_iam_policy_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -11395,7 +11615,7 @@ def test_set_iam_policy_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -11433,7 +11653,7 @@ async def test_set_iam_policy_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -11472,7 +11692,7 @@ async def test_set_iam_policy_async_use_cached_wrapped_rpc(
async def test_set_iam_policy_async(
transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -11510,7 +11730,7 @@ async def test_set_iam_policy_async_from_dict():
def test_set_iam_policy_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -11540,7 +11760,7 @@ def test_set_iam_policy_field_headers():
@pytest.mark.asyncio
async def test_set_iam_policy_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -11569,7 +11789,7 @@ async def test_set_iam_policy_field_headers_async():
def test_set_iam_policy_from_dict_foreign():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -11587,7 +11807,7 @@ def test_set_iam_policy_from_dict_foreign():
def test_set_iam_policy_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -11611,7 +11831,7 @@ def test_set_iam_policy_flattened():
def test_set_iam_policy_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -11626,7 +11846,7 @@ def test_set_iam_policy_flattened_error():
@pytest.mark.asyncio
async def test_set_iam_policy_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -11653,7 +11873,7 @@ async def test_set_iam_policy_flattened_async():
@pytest.mark.asyncio
async def test_set_iam_policy_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -11674,7 +11894,7 @@ async def test_set_iam_policy_flattened_error_async():
],
)
def test_test_iam_permissions(request_type, transport: str = "grpc"):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -11707,7 +11927,7 @@ def test_test_iam_permissions(request_type, transport: str = "grpc"):
def test_test_iam_permissions_non_empty_request_with_auto_populated_field():
# This test is a coverage failsafe to make sure that UUID4 fields are
# automatically populated, according to AIP-4235, with non-empty requests.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -11738,7 +11958,7 @@ def test_test_iam_permissions_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -11780,7 +12000,7 @@ async def test_test_iam_permissions_async_use_cached_wrapped_rpc(
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -11820,7 +12040,7 @@ async def test_test_iam_permissions_async(
transport: str = "grpc_asyncio",
request_type=iam_policy_pb2.TestIamPermissionsRequest,
):
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
transport=transport,
)
@@ -11858,7 +12078,7 @@ async def test_test_iam_permissions_async_from_dict():
def test_test_iam_permissions_field_headers():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -11890,7 +12110,7 @@ def test_test_iam_permissions_field_headers():
@pytest.mark.asyncio
async def test_test_iam_permissions_field_headers_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -11923,7 +12143,7 @@ async def test_test_iam_permissions_field_headers_async():
def test_test_iam_permissions_from_dict_foreign():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -11942,7 +12162,7 @@ def test_test_iam_permissions_from_dict_foreign():
def test_test_iam_permissions_flattened():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -11972,7 +12192,7 @@ def test_test_iam_permissions_flattened():
def test_test_iam_permissions_flattened_error():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
@@ -11988,7 +12208,7 @@ def test_test_iam_permissions_flattened_error():
@pytest.mark.asyncio
async def test_test_iam_permissions_flattened_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -12023,7 +12243,7 @@ async def test_test_iam_permissions_flattened_async():
@pytest.mark.asyncio
async def test_test_iam_permissions_flattened_error_async():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(),
)
@@ -12037,13 +12257,80 @@ async def test_test_iam_permissions_flattened_error_async():
)
-def test_create_table_rest_use_cached_wrapped_rpc():
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ bigtable_table_admin.CreateSchemaBundleRequest,
+ dict,
+ ],
+)
+def test_create_schema_bundle(request_type, transport: str = "grpc"):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_schema_bundle), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.create_schema_bundle(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = bigtable_table_admin.CreateSchemaBundleRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_create_schema_bundle_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = bigtable_table_admin.CreateSchemaBundleRequest(
+ parent="parent_value",
+ schema_bundle_id="schema_bundle_id_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_schema_bundle), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.create_schema_bundle(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == bigtable_table_admin.CreateSchemaBundleRequest(
+ parent="parent_value",
+ schema_bundle_id="schema_bundle_id_value",
+ )
+
+
+def test_create_schema_bundle_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
+ transport="grpc",
)
# Should wrap all calls on client creation
@@ -12051,194 +12338,361 @@ def test_create_table_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert client._transport.create_table in client._transport._wrapped_methods
+ assert (
+ client._transport.create_schema_bundle in client._transport._wrapped_methods
+ )
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[client._transport.create_table] = mock_rpc
-
+ client._transport._wrapped_methods[
+ client._transport.create_schema_bundle
+ ] = mock_rpc
request = {}
- client.create_table(request)
+ client.create_schema_bundle(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- client.create_table(request)
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.create_schema_bundle(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_create_table_rest_required_fields(
- request_type=bigtable_table_admin.CreateTableRequest,
+@pytest.mark.asyncio
+async def test_create_schema_bundle_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
):
- transport_class = transports.BigtableTableAdminRestTransport
-
- request_init = {}
- request_init["parent"] = ""
- request_init["table_id"] = ""
- request = request_type(**request_init)
- pb_request = request_type.pb(request)
- jsonified_request = json.loads(
- json_format.MessageToJson(pb_request, use_integers_for_enums=False)
- )
-
- # verify fields with default values are dropped
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
- unset_fields = transport_class(
- credentials=ga_credentials.AnonymousCredentials()
- ).create_table._get_unset_required_fields(jsonified_request)
- jsonified_request.update(unset_fields)
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
- # verify required fields with default values are now present
+ # Ensure method has been cached
+ assert (
+ client._client._transport.create_schema_bundle
+ in client._client._transport._wrapped_methods
+ )
- jsonified_request["parent"] = "parent_value"
- jsonified_request["tableId"] = "table_id_value"
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.create_schema_bundle
+ ] = mock_rpc
- unset_fields = transport_class(
- credentials=ga_credentials.AnonymousCredentials()
- ).create_table._get_unset_required_fields(jsonified_request)
- jsonified_request.update(unset_fields)
+ request = {}
+ await client.create_schema_bundle(request)
- # verify required fields with non-default values are left alone
- assert "parent" in jsonified_request
- assert jsonified_request["parent"] == "parent_value"
- assert "tableId" in jsonified_request
- assert jsonified_request["tableId"] == "table_id_value"
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
- )
- request = request_type(**request_init)
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
- # Designate an appropriate value for the returned response.
- return_value = gba_table.Table()
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(Session, "request") as req:
- # We need to mock transcode() because providing default values
- # for required fields will fail the real version if the http_options
- # expect actual values for those fields.
- with mock.patch.object(path_template, "transcode") as transcode:
- # A uri without fields and an empty body will force all the
- # request fields to show up in the query_params.
- pb_request = request_type.pb(request)
- transcode_result = {
- "uri": "v1/sample_method",
- "method": "post",
- "query_params": pb_request,
- }
- transcode_result["body"] = pb_request
- transcode.return_value = transcode_result
+ await client.create_schema_bundle(request)
- response_value = Response()
- response_value.status_code = 200
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
- # Convert return value to protobuf type
- return_value = gba_table.Table.pb(return_value)
- json_return_value = json_format.MessageToJson(return_value)
- response_value._content = json_return_value.encode("UTF-8")
- req.return_value = response_value
- req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+@pytest.mark.asyncio
+async def test_create_schema_bundle_async(
+ transport: str = "grpc_asyncio",
+ request_type=bigtable_table_admin.CreateSchemaBundleRequest,
+):
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
- response = client.create_table(request)
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
- expected_params = [("$alt", "json;enum-encoding=int")]
- actual_params = req.call_args.kwargs["params"]
- assert expected_params == actual_params
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_schema_bundle), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.create_schema_bundle(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = bigtable_table_admin.CreateSchemaBundleRequest()
+ assert args[0] == request
-def test_create_table_rest_unset_required_fields():
- transport = transports.BigtableTableAdminRestTransport(
- credentials=ga_credentials.AnonymousCredentials
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_create_schema_bundle_async_from_dict():
+ await test_create_schema_bundle_async(request_type=dict)
+
+
+def test_create_schema_bundle_field_headers():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
)
- unset_fields = transport.create_table._get_unset_required_fields({})
- assert set(unset_fields) == (
- set(())
- & set(
- (
- "parent",
- "tableId",
- "table",
- )
- )
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = bigtable_table_admin.CreateSchemaBundleRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_schema_bundle), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.create_schema_bundle(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_create_schema_bundle_field_headers_async():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
)
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = bigtable_table_admin.CreateSchemaBundleRequest()
+
+ request.parent = "parent_value"
-def test_create_table_rest_flattened():
- client = BigtableTableAdminClient(
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_schema_bundle), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.create_schema_bundle(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_create_schema_bundle_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
)
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(type(client.transport._session), "request") as req:
- # Designate an appropriate value for the returned response.
- return_value = gba_table.Table()
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_schema_bundle), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.create_schema_bundle(
+ parent="parent_value",
+ schema_bundle_id="schema_bundle_id_value",
+ schema_bundle=table.SchemaBundle(name="name_value"),
+ )
- # get arguments that satisfy an http rule for this method
- sample_request = {"parent": "projects/sample1/instances/sample2"}
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].schema_bundle_id
+ mock_val = "schema_bundle_id_value"
+ assert arg == mock_val
+ arg = args[0].schema_bundle
+ mock_val = table.SchemaBundle(name="name_value")
+ assert arg == mock_val
- # get truthy value for each flattened field
- mock_args = dict(
+
+def test_create_schema_bundle_flattened_error():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_schema_bundle(
+ bigtable_table_admin.CreateSchemaBundleRequest(),
parent="parent_value",
- table_id="table_id_value",
- table=gba_table.Table(name="name_value"),
+ schema_bundle_id="schema_bundle_id_value",
+ schema_bundle=table.SchemaBundle(name="name_value"),
)
- mock_args.update(sample_request)
- # Wrap the value into a proper Response obj
- response_value = Response()
- response_value.status_code = 200
- # Convert return value to protobuf type
- return_value = gba_table.Table.pb(return_value)
- json_return_value = json_format.MessageToJson(return_value)
- response_value._content = json_return_value.encode("UTF-8")
- req.return_value = response_value
- req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.create_table(**mock_args)
+@pytest.mark.asyncio
+async def test_create_schema_bundle_flattened_async():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_schema_bundle), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.create_schema_bundle(
+ parent="parent_value",
+ schema_bundle_id="schema_bundle_id_value",
+ schema_bundle=table.SchemaBundle(name="name_value"),
+ )
# Establish that the underlying call was made with the expected
# request object values.
- assert len(req.mock_calls) == 1
- _, args, _ = req.mock_calls[0]
- assert path_template.validate(
- "%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host,
- args[1],
- )
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].schema_bundle_id
+ mock_val = "schema_bundle_id_value"
+ assert arg == mock_val
+ arg = args[0].schema_bundle
+ mock_val = table.SchemaBundle(name="name_value")
+ assert arg == mock_val
-def test_create_table_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport=transport,
+@pytest.mark.asyncio
+async def test_create_schema_bundle_flattened_error_async():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.create_table(
- bigtable_table_admin.CreateTableRequest(),
+ await client.create_schema_bundle(
+ bigtable_table_admin.CreateSchemaBundleRequest(),
parent="parent_value",
- table_id="table_id_value",
- table=gba_table.Table(name="name_value"),
+ schema_bundle_id="schema_bundle_id_value",
+ schema_bundle=table.SchemaBundle(name="name_value"),
)
-def test_create_table_from_snapshot_rest_use_cached_wrapped_rpc():
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ bigtable_table_admin.UpdateSchemaBundleRequest,
+ dict,
+ ],
+)
+def test_update_schema_bundle(request_type, transport: str = "grpc"):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_schema_bundle), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.update_schema_bundle(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = bigtable_table_admin.UpdateSchemaBundleRequest()
+ assert args[0] == request
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_update_schema_bundle_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = bigtable_table_admin.UpdateSchemaBundleRequest()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_schema_bundle), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.update_schema_bundle(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == bigtable_table_admin.UpdateSchemaBundleRequest()
+
+
+def test_update_schema_bundle_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
+ transport="grpc",
)
# Should wrap all calls on client creation
@@ -12247,8 +12701,7 @@ def test_create_table_from_snapshot_rest_use_cached_wrapped_rpc():
# Ensure method has been cached
assert (
- client._transport.create_table_from_snapshot
- in client._transport._wrapped_methods
+ client._transport.update_schema_bundle in client._transport._wrapped_methods
)
# Replace cached wrapped function with mock
@@ -12257,449 +12710,350 @@ def test_create_table_from_snapshot_rest_use_cached_wrapped_rpc():
"foo" # operation_request.operation in compute client(s) expect a string.
)
client._transport._wrapped_methods[
- client._transport.create_table_from_snapshot
+ client._transport.update_schema_bundle
] = mock_rpc
-
request = {}
- client.create_table_from_snapshot(request)
+ client.update_schema_bundle(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- # Operation methods build a cached wrapper on first rpc call
- # subsequent calls should use the cached wrapper
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
wrapper_fn.reset_mock()
- client.create_table_from_snapshot(request)
+ client.update_schema_bundle(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_create_table_from_snapshot_rest_required_fields(
- request_type=bigtable_table_admin.CreateTableFromSnapshotRequest,
+@pytest.mark.asyncio
+async def test_update_schema_bundle_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
):
- transport_class = transports.BigtableTableAdminRestTransport
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
- request_init = {}
- request_init["parent"] = ""
- request_init["table_id"] = ""
- request_init["source_snapshot"] = ""
- request = request_type(**request_init)
- pb_request = request_type.pb(request)
- jsonified_request = json.loads(
- json_format.MessageToJson(pb_request, use_integers_for_enums=False)
- )
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
- # verify fields with default values are dropped
+ # Ensure method has been cached
+ assert (
+ client._client._transport.update_schema_bundle
+ in client._client._transport._wrapped_methods
+ )
- unset_fields = transport_class(
- credentials=ga_credentials.AnonymousCredentials()
- ).create_table_from_snapshot._get_unset_required_fields(jsonified_request)
- jsonified_request.update(unset_fields)
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.update_schema_bundle
+ ] = mock_rpc
- # verify required fields with default values are now present
+ request = {}
+ await client.update_schema_bundle(request)
- jsonified_request["parent"] = "parent_value"
- jsonified_request["tableId"] = "table_id_value"
- jsonified_request["sourceSnapshot"] = "source_snapshot_value"
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
- unset_fields = transport_class(
- credentials=ga_credentials.AnonymousCredentials()
- ).create_table_from_snapshot._get_unset_required_fields(jsonified_request)
- jsonified_request.update(unset_fields)
+ # Operation methods call wrapper_fn to build a cached
+ # client._transport.operations_client instance on first rpc call.
+ # Subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
- # verify required fields with non-default values are left alone
- assert "parent" in jsonified_request
- assert jsonified_request["parent"] == "parent_value"
- assert "tableId" in jsonified_request
- assert jsonified_request["tableId"] == "table_id_value"
- assert "sourceSnapshot" in jsonified_request
- assert jsonified_request["sourceSnapshot"] == "source_snapshot_value"
+ await client.update_schema_bundle(request)
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_update_schema_bundle_async(
+ transport: str = "grpc_asyncio",
+ request_type=bigtable_table_admin.UpdateSchemaBundleRequest,
+):
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
)
- request = request_type(**request_init)
- # Designate an appropriate value for the returned response.
- return_value = operations_pb2.Operation(name="operations/spam")
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(Session, "request") as req:
- # We need to mock transcode() because providing default values
- # for required fields will fail the real version if the http_options
- # expect actual values for those fields.
- with mock.patch.object(path_template, "transcode") as transcode:
- # A uri without fields and an empty body will force all the
- # request fields to show up in the query_params.
- pb_request = request_type.pb(request)
- transcode_result = {
- "uri": "v1/sample_method",
- "method": "post",
- "query_params": pb_request,
- }
- transcode_result["body"] = pb_request
- transcode.return_value = transcode_result
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
- response_value = Response()
- response_value.status_code = 200
- json_return_value = json_format.MessageToJson(return_value)
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_schema_bundle), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.update_schema_bundle(request)
- response_value._content = json_return_value.encode("UTF-8")
- req.return_value = response_value
- req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = bigtable_table_admin.UpdateSchemaBundleRequest()
+ assert args[0] == request
- response = client.create_table_from_snapshot(request)
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
- expected_params = [("$alt", "json;enum-encoding=int")]
- actual_params = req.call_args.kwargs["params"]
- assert expected_params == actual_params
+@pytest.mark.asyncio
+async def test_update_schema_bundle_async_from_dict():
+ await test_update_schema_bundle_async(request_type=dict)
-def test_create_table_from_snapshot_rest_unset_required_fields():
- transport = transports.BigtableTableAdminRestTransport(
- credentials=ga_credentials.AnonymousCredentials
- )
- unset_fields = transport.create_table_from_snapshot._get_unset_required_fields({})
- assert set(unset_fields) == (
- set(())
- & set(
- (
- "parent",
- "tableId",
- "sourceSnapshot",
- )
- )
+def test_update_schema_bundle_field_headers():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
)
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = bigtable_table_admin.UpdateSchemaBundleRequest()
-def test_create_table_from_snapshot_rest_flattened():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
+ request.schema_bundle.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_schema_bundle), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.update_schema_bundle(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "schema_bundle.name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_update_schema_bundle_field_headers_async():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
)
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(type(client.transport._session), "request") as req:
- # Designate an appropriate value for the returned response.
- return_value = operations_pb2.Operation(name="operations/spam")
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = bigtable_table_admin.UpdateSchemaBundleRequest()
- # get arguments that satisfy an http rule for this method
- sample_request = {"parent": "projects/sample1/instances/sample2"}
+ request.schema_bundle.name = "name_value"
- # get truthy value for each flattened field
- mock_args = dict(
- parent="parent_value",
- table_id="table_id_value",
- source_snapshot="source_snapshot_value",
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_schema_bundle), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
)
- mock_args.update(sample_request)
+ await client.update_schema_bundle(request)
- # Wrap the value into a proper Response obj
- response_value = Response()
- response_value.status_code = 200
- json_return_value = json_format.MessageToJson(return_value)
- response_value._content = json_return_value.encode("UTF-8")
- req.return_value = response_value
- req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
- client.create_table_from_snapshot(**mock_args)
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "schema_bundle.name=name_value",
+ ) in kw["metadata"]
+
+
+def test_update_schema_bundle_flattened():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_schema_bundle), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.update_schema_bundle(
+ schema_bundle=table.SchemaBundle(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
# Establish that the underlying call was made with the expected
# request object values.
- assert len(req.mock_calls) == 1
- _, args, _ = req.mock_calls[0]
- assert path_template.validate(
- "%s/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot"
- % client.transport._host,
- args[1],
- )
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].schema_bundle
+ mock_val = table.SchemaBundle(name="name_value")
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
-def test_create_table_from_snapshot_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_update_schema_bundle_flattened_error():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.create_table_from_snapshot(
- bigtable_table_admin.CreateTableFromSnapshotRequest(),
- parent="parent_value",
- table_id="table_id_value",
- source_snapshot="source_snapshot_value",
+ client.update_schema_bundle(
+ bigtable_table_admin.UpdateSchemaBundleRequest(),
+ schema_bundle=table.SchemaBundle(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
-def test_list_tables_rest_use_cached_wrapped_rpc():
- # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
- # instead of constructing them on each call
- with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
- )
-
- # Should wrap all calls on client creation
- assert wrapper_fn.call_count > 0
- wrapper_fn.reset_mock()
+@pytest.mark.asyncio
+async def test_update_schema_bundle_flattened_async():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
- # Ensure method has been cached
- assert client._transport.list_tables in client._transport._wrapped_methods
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_schema_bundle), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
- # Replace cached wrapped function with mock
- mock_rpc = mock.Mock()
- mock_rpc.return_value.name = (
- "foo" # operation_request.operation in compute client(s) expect a string.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.update_schema_bundle(
+ schema_bundle=table.SchemaBundle(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
- client._transport._wrapped_methods[client._transport.list_tables] = mock_rpc
-
- request = {}
- client.list_tables(request)
- # Establish that the underlying gRPC stub method was called.
- assert mock_rpc.call_count == 1
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].schema_bundle
+ mock_val = table.SchemaBundle(name="name_value")
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
- client.list_tables(request)
- # Establish that a new wrapper was not created for this call
- assert wrapper_fn.call_count == 0
- assert mock_rpc.call_count == 2
+@pytest.mark.asyncio
+async def test_update_schema_bundle_flattened_error_async():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
-
-def test_list_tables_rest_required_fields(
- request_type=bigtable_table_admin.ListTablesRequest,
-):
- transport_class = transports.BigtableTableAdminRestTransport
-
- request_init = {}
- request_init["parent"] = ""
- request = request_type(**request_init)
- pb_request = request_type.pb(request)
- jsonified_request = json.loads(
- json_format.MessageToJson(pb_request, use_integers_for_enums=False)
- )
-
- # verify fields with default values are dropped
-
- unset_fields = transport_class(
- credentials=ga_credentials.AnonymousCredentials()
- ).list_tables._get_unset_required_fields(jsonified_request)
- jsonified_request.update(unset_fields)
-
- # verify required fields with default values are now present
-
- jsonified_request["parent"] = "parent_value"
-
- unset_fields = transport_class(
- credentials=ga_credentials.AnonymousCredentials()
- ).list_tables._get_unset_required_fields(jsonified_request)
- # Check that path parameters and body parameters are not mixing in.
- assert not set(unset_fields) - set(
- (
- "page_size",
- "page_token",
- "view",
- )
- )
- jsonified_request.update(unset_fields)
-
- # verify required fields with non-default values are left alone
- assert "parent" in jsonified_request
- assert jsonified_request["parent"] == "parent_value"
-
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
- )
- request = request_type(**request_init)
-
- # Designate an appropriate value for the returned response.
- return_value = bigtable_table_admin.ListTablesResponse()
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(Session, "request") as req:
- # We need to mock transcode() because providing default values
- # for required fields will fail the real version if the http_options
- # expect actual values for those fields.
- with mock.patch.object(path_template, "transcode") as transcode:
- # A uri without fields and an empty body will force all the
- # request fields to show up in the query_params.
- pb_request = request_type.pb(request)
- transcode_result = {
- "uri": "v1/sample_method",
- "method": "get",
- "query_params": pb_request,
- }
- transcode.return_value = transcode_result
-
- response_value = Response()
- response_value.status_code = 200
-
- # Convert return value to protobuf type
- return_value = bigtable_table_admin.ListTablesResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(return_value)
-
- response_value._content = json_return_value.encode("UTF-8")
- req.return_value = response_value
- req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
-
- response = client.list_tables(request)
-
- expected_params = [("$alt", "json;enum-encoding=int")]
- actual_params = req.call_args.kwargs["params"]
- assert expected_params == actual_params
-
-
-def test_list_tables_rest_unset_required_fields():
- transport = transports.BigtableTableAdminRestTransport(
- credentials=ga_credentials.AnonymousCredentials
- )
-
- unset_fields = transport.list_tables._get_unset_required_fields({})
- assert set(unset_fields) == (
- set(
- (
- "pageSize",
- "pageToken",
- "view",
- )
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.update_schema_bundle(
+ bigtable_table_admin.UpdateSchemaBundleRequest(),
+ schema_bundle=table.SchemaBundle(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
- & set(("parent",))
- )
-def test_list_tables_rest_flattened():
- client = BigtableTableAdminClient(
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ bigtable_table_admin.GetSchemaBundleRequest,
+ dict,
+ ],
+)
+def test_get_schema_bundle(request_type, transport: str = "grpc"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
+ transport=transport,
)
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(type(client.transport._session), "request") as req:
- # Designate an appropriate value for the returned response.
- return_value = bigtable_table_admin.ListTablesResponse()
-
- # get arguments that satisfy an http rule for this method
- sample_request = {"parent": "projects/sample1/instances/sample2"}
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
- # get truthy value for each flattened field
- mock_args = dict(
- parent="parent_value",
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_schema_bundle), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = table.SchemaBundle(
+ name="name_value",
+ etag="etag_value",
)
- mock_args.update(sample_request)
-
- # Wrap the value into a proper Response obj
- response_value = Response()
- response_value.status_code = 200
- # Convert return value to protobuf type
- return_value = bigtable_table_admin.ListTablesResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(return_value)
- response_value._content = json_return_value.encode("UTF-8")
- req.return_value = response_value
- req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.get_schema_bundle(request)
- client.list_tables(**mock_args)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = bigtable_table_admin.GetSchemaBundleRequest()
+ assert args[0] == request
- # Establish that the underlying call was made with the expected
- # request object values.
- assert len(req.mock_calls) == 1
- _, args, _ = req.mock_calls[0]
- assert path_template.validate(
- "%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host,
- args[1],
- )
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, table.SchemaBundle)
+ assert response.name == "name_value"
+ assert response.etag == "etag_value"
-def test_list_tables_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_get_schema_bundle_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport=transport,
+ transport="grpc",
)
- # Attempting to call a method with both a request object and flattened
- # fields is an error.
- with pytest.raises(ValueError):
- client.list_tables(
- bigtable_table_admin.ListTablesRequest(),
- parent="parent_value",
- )
-
-
-def test_list_tables_rest_pager(transport: str = "rest"):
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport=transport,
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = bigtable_table_admin.GetSchemaBundleRequest(
+ name="name_value",
)
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(Session, "request") as req:
- # TODO(kbandes): remove this mock unless there's a good reason for it.
- # with mock.patch.object(path_template, 'transcode') as transcode:
- # Set the response as a series of pages
- response = (
- bigtable_table_admin.ListTablesResponse(
- tables=[
- table.Table(),
- table.Table(),
- table.Table(),
- ],
- next_page_token="abc",
- ),
- bigtable_table_admin.ListTablesResponse(
- tables=[],
- next_page_token="def",
- ),
- bigtable_table_admin.ListTablesResponse(
- tables=[
- table.Table(),
- ],
- next_page_token="ghi",
- ),
- bigtable_table_admin.ListTablesResponse(
- tables=[
- table.Table(),
- table.Table(),
- ],
- ),
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_schema_bundle), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
)
- # Two responses for two calls
- response = response + response
-
- # Wrap the values into proper Response objs
- response = tuple(
- bigtable_table_admin.ListTablesResponse.to_json(x) for x in response
+ client.get_schema_bundle(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == bigtable_table_admin.GetSchemaBundleRequest(
+ name="name_value",
)
- return_values = tuple(Response() for i in response)
- for return_val, response_val in zip(return_values, response):
- return_val._content = response_val.encode("UTF-8")
- return_val.status_code = 200
- req.side_effect = return_values
-
- sample_request = {"parent": "projects/sample1/instances/sample2"}
-
- pager = client.list_tables(request=sample_request)
-
- results = list(pager)
- assert len(results) == 6
- assert all(isinstance(i, table.Table) for i in results)
-
- pages = list(client.list_tables(request=sample_request).pages)
- for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
- assert page_.raw_page.next_page_token == token
-def test_get_table_rest_use_cached_wrapped_rpc():
+def test_get_schema_bundle_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
+ transport="grpc",
)
# Should wrap all calls on client creation
@@ -12707,375 +13061,381 @@ def test_get_table_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert client._transport.get_table in client._transport._wrapped_methods
+ assert client._transport.get_schema_bundle in client._transport._wrapped_methods
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[client._transport.get_table] = mock_rpc
-
+ client._transport._wrapped_methods[
+ client._transport.get_schema_bundle
+ ] = mock_rpc
request = {}
- client.get_table(request)
+ client.get_schema_bundle(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- client.get_table(request)
+ client.get_schema_bundle(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_get_table_rest_required_fields(
- request_type=bigtable_table_admin.GetTableRequest,
+@pytest.mark.asyncio
+async def test_get_schema_bundle_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
):
- transport_class = transports.BigtableTableAdminRestTransport
-
- request_init = {}
- request_init["name"] = ""
- request = request_type(**request_init)
- pb_request = request_type.pb(request)
- jsonified_request = json.loads(
- json_format.MessageToJson(pb_request, use_integers_for_enums=False)
- )
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
- # verify fields with default values are dropped
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
- unset_fields = transport_class(
- credentials=ga_credentials.AnonymousCredentials()
- ).get_table._get_unset_required_fields(jsonified_request)
- jsonified_request.update(unset_fields)
+ # Ensure method has been cached
+ assert (
+ client._client._transport.get_schema_bundle
+ in client._client._transport._wrapped_methods
+ )
- # verify required fields with default values are now present
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.get_schema_bundle
+ ] = mock_rpc
- jsonified_request["name"] = "name_value"
+ request = {}
+ await client.get_schema_bundle(request)
- unset_fields = transport_class(
- credentials=ga_credentials.AnonymousCredentials()
- ).get_table._get_unset_required_fields(jsonified_request)
- # Check that path parameters and body parameters are not mixing in.
- assert not set(unset_fields) - set(("view",))
- jsonified_request.update(unset_fields)
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
- # verify required fields with non-default values are left alone
- assert "name" in jsonified_request
- assert jsonified_request["name"] == "name_value"
+ await client.get_schema_bundle(request)
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_get_schema_bundle_async(
+ transport: str = "grpc_asyncio",
+ request_type=bigtable_table_admin.GetSchemaBundleRequest,
+):
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
)
- request = request_type(**request_init)
- # Designate an appropriate value for the returned response.
- return_value = table.Table()
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(Session, "request") as req:
- # We need to mock transcode() because providing default values
- # for required fields will fail the real version if the http_options
- # expect actual values for those fields.
- with mock.patch.object(path_template, "transcode") as transcode:
- # A uri without fields and an empty body will force all the
- # request fields to show up in the query_params.
- pb_request = request_type.pb(request)
- transcode_result = {
- "uri": "v1/sample_method",
- "method": "get",
- "query_params": pb_request,
- }
- transcode.return_value = transcode_result
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
- response_value = Response()
- response_value.status_code = 200
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_schema_bundle), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ table.SchemaBundle(
+ name="name_value",
+ etag="etag_value",
+ )
+ )
+ response = await client.get_schema_bundle(request)
- # Convert return value to protobuf type
- return_value = table.Table.pb(return_value)
- json_return_value = json_format.MessageToJson(return_value)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = bigtable_table_admin.GetSchemaBundleRequest()
+ assert args[0] == request
- response_value._content = json_return_value.encode("UTF-8")
- req.return_value = response_value
- req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, table.SchemaBundle)
+ assert response.name == "name_value"
+ assert response.etag == "etag_value"
- response = client.get_table(request)
- expected_params = [("$alt", "json;enum-encoding=int")]
- actual_params = req.call_args.kwargs["params"]
- assert expected_params == actual_params
+@pytest.mark.asyncio
+async def test_get_schema_bundle_async_from_dict():
+ await test_get_schema_bundle_async(request_type=dict)
-def test_get_table_rest_unset_required_fields():
- transport = transports.BigtableTableAdminRestTransport(
- credentials=ga_credentials.AnonymousCredentials
+def test_get_schema_bundle_field_headers():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
)
- unset_fields = transport.get_table._get_unset_required_fields({})
- assert set(unset_fields) == (set(("view",)) & set(("name",)))
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = bigtable_table_admin.GetSchemaBundleRequest()
+ request.name = "name_value"
-def test_get_table_rest_flattened():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_schema_bundle), "__call__"
+ ) as call:
+ call.return_value = table.SchemaBundle()
+ client.get_schema_bundle(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_schema_bundle_field_headers_async():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
)
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(type(client.transport._session), "request") as req:
- # Designate an appropriate value for the returned response.
- return_value = table.Table()
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = bigtable_table_admin.GetSchemaBundleRequest()
- # get arguments that satisfy an http rule for this method
- sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ request.name = "name_value"
- # get truthy value for each flattened field
- mock_args = dict(
- name="name_value",
- )
- mock_args.update(sample_request)
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_schema_bundle), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.SchemaBundle())
+ await client.get_schema_bundle(request)
- # Wrap the value into a proper Response obj
- response_value = Response()
- response_value.status_code = 200
- # Convert return value to protobuf type
- return_value = table.Table.pb(return_value)
- json_return_value = json_format.MessageToJson(return_value)
- response_value._content = json_return_value.encode("UTF-8")
- req.return_value = response_value
- req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
- client.get_table(**mock_args)
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_get_schema_bundle_flattened():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_schema_bundle), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = table.SchemaBundle()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.get_schema_bundle(
+ name="name_value",
+ )
# Establish that the underlying call was made with the expected
# request object values.
- assert len(req.mock_calls) == 1
- _, args, _ = req.mock_calls[0]
- assert path_template.validate(
- "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host,
- args[1],
- )
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
-def test_get_table_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_get_schema_bundle_flattened_error():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.get_table(
- bigtable_table_admin.GetTableRequest(),
+ client.get_schema_bundle(
+ bigtable_table_admin.GetSchemaBundleRequest(),
name="name_value",
)
-def test_update_table_rest_use_cached_wrapped_rpc():
- # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
- # instead of constructing them on each call
- with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
- )
-
- # Should wrap all calls on client creation
- assert wrapper_fn.call_count > 0
- wrapper_fn.reset_mock()
+@pytest.mark.asyncio
+async def test_get_schema_bundle_flattened_async():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
- # Ensure method has been cached
- assert client._transport.update_table in client._transport._wrapped_methods
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_schema_bundle), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = table.SchemaBundle()
- # Replace cached wrapped function with mock
- mock_rpc = mock.Mock()
- mock_rpc.return_value.name = (
- "foo" # operation_request.operation in compute client(s) expect a string.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.SchemaBundle())
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.get_schema_bundle(
+ name="name_value",
)
- client._transport._wrapped_methods[client._transport.update_table] = mock_rpc
-
- request = {}
- client.update_table(request)
- # Establish that the underlying gRPC stub method was called.
- assert mock_rpc.call_count == 1
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
- # Operation methods build a cached wrapper on first rpc call
- # subsequent calls should use the cached wrapper
- wrapper_fn.reset_mock()
- client.update_table(request)
+@pytest.mark.asyncio
+async def test_get_schema_bundle_flattened_error_async():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
- # Establish that a new wrapper was not created for this call
- assert wrapper_fn.call_count == 0
- assert mock_rpc.call_count == 2
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.get_schema_bundle(
+ bigtable_table_admin.GetSchemaBundleRequest(),
+ name="name_value",
+ )
-def test_update_table_rest_required_fields(
- request_type=bigtable_table_admin.UpdateTableRequest,
-):
- transport_class = transports.BigtableTableAdminRestTransport
-
- request_init = {}
- request = request_type(**request_init)
- pb_request = request_type.pb(request)
- jsonified_request = json.loads(
- json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ bigtable_table_admin.ListSchemaBundlesRequest,
+ dict,
+ ],
+)
+def test_list_schema_bundles(request_type, transport: str = "grpc"):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
)
- # verify fields with default values are dropped
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
- unset_fields = transport_class(
- credentials=ga_credentials.AnonymousCredentials()
- ).update_table._get_unset_required_fields(jsonified_request)
- jsonified_request.update(unset_fields)
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_schema_bundles), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = bigtable_table_admin.ListSchemaBundlesResponse(
+ next_page_token="next_page_token_value",
+ )
+ response = client.list_schema_bundles(request)
- # verify required fields with default values are now present
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = bigtable_table_admin.ListSchemaBundlesRequest()
+ assert args[0] == request
- unset_fields = transport_class(
- credentials=ga_credentials.AnonymousCredentials()
- ).update_table._get_unset_required_fields(jsonified_request)
- # Check that path parameters and body parameters are not mixing in.
- assert not set(unset_fields) - set(
- (
- "ignore_warnings",
- "update_mask",
- )
- )
- jsonified_request.update(unset_fields)
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListSchemaBundlesPager)
+ assert response.next_page_token == "next_page_token_value"
- # verify required fields with non-default values are left alone
- client = BigtableTableAdminClient(
+def test_list_schema_bundles_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
+ transport="grpc",
)
- request = request_type(**request_init)
-
- # Designate an appropriate value for the returned response.
- return_value = operations_pb2.Operation(name="operations/spam")
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(Session, "request") as req:
- # We need to mock transcode() because providing default values
- # for required fields will fail the real version if the http_options
- # expect actual values for those fields.
- with mock.patch.object(path_template, "transcode") as transcode:
- # A uri without fields and an empty body will force all the
- # request fields to show up in the query_params.
- pb_request = request_type.pb(request)
- transcode_result = {
- "uri": "v1/sample_method",
- "method": "patch",
- "query_params": pb_request,
- }
- transcode_result["body"] = pb_request
- transcode.return_value = transcode_result
-
- response_value = Response()
- response_value.status_code = 200
- json_return_value = json_format.MessageToJson(return_value)
-
- response_value._content = json_return_value.encode("UTF-8")
- req.return_value = response_value
- req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
-
- response = client.update_table(request)
-
- expected_params = [("$alt", "json;enum-encoding=int")]
- actual_params = req.call_args.kwargs["params"]
- assert expected_params == actual_params
-
-def test_update_table_rest_unset_required_fields():
- transport = transports.BigtableTableAdminRestTransport(
- credentials=ga_credentials.AnonymousCredentials
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = bigtable_table_admin.ListSchemaBundlesRequest(
+ parent="parent_value",
+ page_token="page_token_value",
)
- unset_fields = transport.update_table._get_unset_required_fields({})
- assert set(unset_fields) == (
- set(
- (
- "ignoreWarnings",
- "updateMask",
- )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_schema_bundles), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
)
- & set(
- (
- "table",
- "updateMask",
- )
+ client.list_schema_bundles(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == bigtable_table_admin.ListSchemaBundlesRequest(
+ parent="parent_value",
+ page_token="page_token_value",
)
- )
-
-
-def test_update_table_rest_flattened():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
- )
-
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(type(client.transport._session), "request") as req:
- # Designate an appropriate value for the returned response.
- return_value = operations_pb2.Operation(name="operations/spam")
- # get arguments that satisfy an http rule for this method
- sample_request = {
- "table": {"name": "projects/sample1/instances/sample2/tables/sample3"}
- }
- # get truthy value for each flattened field
- mock_args = dict(
- table=gba_table.Table(name="name_value"),
- update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+def test_list_schema_bundles_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
)
- mock_args.update(sample_request)
- # Wrap the value into a proper Response obj
- response_value = Response()
- response_value.status_code = 200
- json_return_value = json_format.MessageToJson(return_value)
- response_value._content = json_return_value.encode("UTF-8")
- req.return_value = response_value
- req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
- client.update_table(**mock_args)
+ # Ensure method has been cached
+ assert (
+ client._transport.list_schema_bundles in client._transport._wrapped_methods
+ )
- # Establish that the underlying call was made with the expected
- # request object values.
- assert len(req.mock_calls) == 1
- _, args, _ = req.mock_calls[0]
- assert path_template.validate(
- "%s/v2/{table.name=projects/*/instances/*/tables/*}"
- % client.transport._host,
- args[1],
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
)
+ client._transport._wrapped_methods[
+ client._transport.list_schema_bundles
+ ] = mock_rpc
+ request = {}
+ client.list_schema_bundles(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
-def test_update_table_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport=transport,
- )
+ client.list_schema_bundles(request)
- # Attempting to call a method with both a request object and flattened
- # fields is an error.
- with pytest.raises(ValueError):
- client.update_table(
- bigtable_table_admin.UpdateTableRequest(),
- table=gba_table.Table(name="name_value"),
- update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
- )
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
-def test_delete_table_rest_use_cached_wrapped_rpc():
+@pytest.mark.asyncio
+async def test_list_schema_bundles_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
+):
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
- with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
)
# Should wrap all calls on client creation
@@ -13083,349 +13443,500 @@ def test_delete_table_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert client._transport.delete_table in client._transport._wrapped_methods
+ assert (
+ client._client._transport.list_schema_bundles
+ in client._client._transport._wrapped_methods
+ )
# Replace cached wrapped function with mock
- mock_rpc = mock.Mock()
- mock_rpc.return_value.name = (
- "foo" # operation_request.operation in compute client(s) expect a string.
- )
- client._transport._wrapped_methods[client._transport.delete_table] = mock_rpc
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.list_schema_bundles
+ ] = mock_rpc
request = {}
- client.delete_table(request)
+ await client.list_schema_bundles(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- client.delete_table(request)
+ await client.list_schema_bundles(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_delete_table_rest_required_fields(
- request_type=bigtable_table_admin.DeleteTableRequest,
+@pytest.mark.asyncio
+async def test_list_schema_bundles_async(
+ transport: str = "grpc_asyncio",
+ request_type=bigtable_table_admin.ListSchemaBundlesRequest,
):
- transport_class = transports.BigtableTableAdminRestTransport
-
- request_init = {}
- request_init["name"] = ""
- request = request_type(**request_init)
- pb_request = request_type.pb(request)
- jsonified_request = json.loads(
- json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
)
- # verify fields with default values are dropped
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
- unset_fields = transport_class(
- credentials=ga_credentials.AnonymousCredentials()
- ).delete_table._get_unset_required_fields(jsonified_request)
- jsonified_request.update(unset_fields)
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_schema_bundles), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ response = await client.list_schema_bundles(request)
- # verify required fields with default values are now present
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = bigtable_table_admin.ListSchemaBundlesRequest()
+ assert args[0] == request
- jsonified_request["name"] = "name_value"
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListSchemaBundlesAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
- unset_fields = transport_class(
- credentials=ga_credentials.AnonymousCredentials()
- ).delete_table._get_unset_required_fields(jsonified_request)
- jsonified_request.update(unset_fields)
- # verify required fields with non-default values are left alone
- assert "name" in jsonified_request
- assert jsonified_request["name"] == "name_value"
+@pytest.mark.asyncio
+async def test_list_schema_bundles_async_from_dict():
+ await test_list_schema_bundles_async(request_type=dict)
- client = BigtableTableAdminClient(
+
+def test_list_schema_bundles_field_headers():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
)
- request = request_type(**request_init)
- # Designate an appropriate value for the returned response.
- return_value = None
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(Session, "request") as req:
- # We need to mock transcode() because providing default values
- # for required fields will fail the real version if the http_options
- # expect actual values for those fields.
- with mock.patch.object(path_template, "transcode") as transcode:
- # A uri without fields and an empty body will force all the
- # request fields to show up in the query_params.
- pb_request = request_type.pb(request)
- transcode_result = {
- "uri": "v1/sample_method",
- "method": "delete",
- "query_params": pb_request,
- }
- transcode.return_value = transcode_result
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = bigtable_table_admin.ListSchemaBundlesRequest()
- response_value = Response()
- response_value.status_code = 200
- json_return_value = ""
+ request.parent = "parent_value"
- response_value._content = json_return_value.encode("UTF-8")
- req.return_value = response_value
- req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_schema_bundles), "__call__"
+ ) as call:
+ call.return_value = bigtable_table_admin.ListSchemaBundlesResponse()
+ client.list_schema_bundles(request)
- response = client.delete_table(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
- expected_params = [("$alt", "json;enum-encoding=int")]
- actual_params = req.call_args.kwargs["params"]
- assert expected_params == actual_params
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
-def test_delete_table_rest_unset_required_fields():
- transport = transports.BigtableTableAdminRestTransport(
- credentials=ga_credentials.AnonymousCredentials
+@pytest.mark.asyncio
+async def test_list_schema_bundles_field_headers_async():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
)
- unset_fields = transport.delete_table._get_unset_required_fields({})
- assert set(unset_fields) == (set(()) & set(("name",)))
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = bigtable_table_admin.ListSchemaBundlesRequest()
+ request.parent = "parent_value"
-def test_delete_table_rest_flattened():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
- )
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_schema_bundles), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ bigtable_table_admin.ListSchemaBundlesResponse()
+ )
+ await client.list_schema_bundles(request)
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(type(client.transport._session), "request") as req:
- # Designate an appropriate value for the returned response.
- return_value = None
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
- # get arguments that satisfy an http rule for this method
- sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
- # get truthy value for each flattened field
- mock_args = dict(
- name="name_value",
- )
- mock_args.update(sample_request)
- # Wrap the value into a proper Response obj
- response_value = Response()
- response_value.status_code = 200
- json_return_value = ""
- response_value._content = json_return_value.encode("UTF-8")
- req.return_value = response_value
- req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+def test_list_schema_bundles_flattened():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
- client.delete_table(**mock_args)
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_schema_bundles), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = bigtable_table_admin.ListSchemaBundlesResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.list_schema_bundles(
+ parent="parent_value",
+ )
# Establish that the underlying call was made with the expected
# request object values.
- assert len(req.mock_calls) == 1
- _, args, _ = req.mock_calls[0]
- assert path_template.validate(
- "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host,
- args[1],
- )
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
-def test_delete_table_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_list_schema_bundles_flattened_error():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.delete_table(
- bigtable_table_admin.DeleteTableRequest(),
- name="name_value",
+ client.list_schema_bundles(
+ bigtable_table_admin.ListSchemaBundlesRequest(),
+ parent="parent_value",
)
-def test_undelete_table_rest_use_cached_wrapped_rpc():
- # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
- # instead of constructing them on each call
- with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
- )
-
- # Should wrap all calls on client creation
- assert wrapper_fn.call_count > 0
- wrapper_fn.reset_mock()
+@pytest.mark.asyncio
+async def test_list_schema_bundles_flattened_async():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
- # Ensure method has been cached
- assert client._transport.undelete_table in client._transport._wrapped_methods
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_schema_bundles), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = bigtable_table_admin.ListSchemaBundlesResponse()
- # Replace cached wrapped function with mock
- mock_rpc = mock.Mock()
- mock_rpc.return_value.name = (
- "foo" # operation_request.operation in compute client(s) expect a string.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ bigtable_table_admin.ListSchemaBundlesResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.list_schema_bundles(
+ parent="parent_value",
)
- client._transport._wrapped_methods[client._transport.undelete_table] = mock_rpc
-
- request = {}
- client.undelete_table(request)
-
- # Establish that the underlying gRPC stub method was called.
- assert mock_rpc.call_count == 1
- # Operation methods build a cached wrapper on first rpc call
- # subsequent calls should use the cached wrapper
- wrapper_fn.reset_mock()
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
- client.undelete_table(request)
- # Establish that a new wrapper was not created for this call
- assert wrapper_fn.call_count == 0
- assert mock_rpc.call_count == 2
+@pytest.mark.asyncio
+async def test_list_schema_bundles_flattened_error_async():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.list_schema_bundles(
+ bigtable_table_admin.ListSchemaBundlesRequest(),
+ parent="parent_value",
+ )
-def test_undelete_table_rest_required_fields(
- request_type=bigtable_table_admin.UndeleteTableRequest,
-):
- transport_class = transports.BigtableTableAdminRestTransport
- request_init = {}
- request_init["name"] = ""
- request = request_type(**request_init)
- pb_request = request_type.pb(request)
- jsonified_request = json.loads(
- json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+def test_list_schema_bundles_pager(transport_name: str = "grpc"):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport_name,
)
- # verify fields with default values are dropped
-
- unset_fields = transport_class(
- credentials=ga_credentials.AnonymousCredentials()
- ).undelete_table._get_unset_required_fields(jsonified_request)
- jsonified_request.update(unset_fields)
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_schema_bundles), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ schema_bundles=[
+ table.SchemaBundle(),
+ table.SchemaBundle(),
+ table.SchemaBundle(),
+ ],
+ next_page_token="abc",
+ ),
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ schema_bundles=[],
+ next_page_token="def",
+ ),
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ schema_bundles=[
+ table.SchemaBundle(),
+ ],
+ next_page_token="ghi",
+ ),
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ schema_bundles=[
+ table.SchemaBundle(),
+ table.SchemaBundle(),
+ ],
+ ),
+ RuntimeError,
+ )
- # verify required fields with default values are now present
+ expected_metadata = ()
+ retry = retries.Retry()
+ timeout = 5
+ expected_metadata = tuple(expected_metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
+ )
+ pager = client.list_schema_bundles(request={}, retry=retry, timeout=timeout)
- jsonified_request["name"] = "name_value"
+ assert pager._metadata == expected_metadata
+ assert pager._retry == retry
+ assert pager._timeout == timeout
- unset_fields = transport_class(
- credentials=ga_credentials.AnonymousCredentials()
- ).undelete_table._get_unset_required_fields(jsonified_request)
- jsonified_request.update(unset_fields)
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, table.SchemaBundle) for i in results)
- # verify required fields with non-default values are left alone
- assert "name" in jsonified_request
- assert jsonified_request["name"] == "name_value"
- client = BigtableTableAdminClient(
+def test_list_schema_bundles_pages(transport_name: str = "grpc"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
+ transport=transport_name,
)
- request = request_type(**request_init)
- # Designate an appropriate value for the returned response.
- return_value = operations_pb2.Operation(name="operations/spam")
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(Session, "request") as req:
- # We need to mock transcode() because providing default values
- # for required fields will fail the real version if the http_options
- # expect actual values for those fields.
- with mock.patch.object(path_template, "transcode") as transcode:
- # A uri without fields and an empty body will force all the
- # request fields to show up in the query_params.
- pb_request = request_type.pb(request)
- transcode_result = {
- "uri": "v1/sample_method",
- "method": "post",
- "query_params": pb_request,
- }
- transcode_result["body"] = pb_request
- transcode.return_value = transcode_result
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_schema_bundles), "__call__"
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ schema_bundles=[
+ table.SchemaBundle(),
+ table.SchemaBundle(),
+ table.SchemaBundle(),
+ ],
+ next_page_token="abc",
+ ),
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ schema_bundles=[],
+ next_page_token="def",
+ ),
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ schema_bundles=[
+ table.SchemaBundle(),
+ ],
+ next_page_token="ghi",
+ ),
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ schema_bundles=[
+ table.SchemaBundle(),
+ table.SchemaBundle(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = list(client.list_schema_bundles(request={}).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
- response_value = Response()
- response_value.status_code = 200
- json_return_value = json_format.MessageToJson(return_value)
- response_value._content = json_return_value.encode("UTF-8")
- req.return_value = response_value
- req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+@pytest.mark.asyncio
+async def test_list_schema_bundles_async_pager():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
- response = client.undelete_table(request)
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_schema_bundles),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ schema_bundles=[
+ table.SchemaBundle(),
+ table.SchemaBundle(),
+ table.SchemaBundle(),
+ ],
+ next_page_token="abc",
+ ),
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ schema_bundles=[],
+ next_page_token="def",
+ ),
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ schema_bundles=[
+ table.SchemaBundle(),
+ ],
+ next_page_token="ghi",
+ ),
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ schema_bundles=[
+ table.SchemaBundle(),
+ table.SchemaBundle(),
+ ],
+ ),
+ RuntimeError,
+ )
+ async_pager = await client.list_schema_bundles(
+ request={},
+ )
+ assert async_pager.next_page_token == "abc"
+ responses = []
+ async for response in async_pager: # pragma: no branch
+ responses.append(response)
- expected_params = [("$alt", "json;enum-encoding=int")]
- actual_params = req.call_args.kwargs["params"]
- assert expected_params == actual_params
+ assert len(responses) == 6
+ assert all(isinstance(i, table.SchemaBundle) for i in responses)
-def test_undelete_table_rest_unset_required_fields():
- transport = transports.BigtableTableAdminRestTransport(
- credentials=ga_credentials.AnonymousCredentials
+@pytest.mark.asyncio
+async def test_list_schema_bundles_async_pages():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
)
- unset_fields = transport.undelete_table._get_unset_required_fields({})
- assert set(unset_fields) == (set(()) & set(("name",)))
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_schema_bundles),
+ "__call__",
+ new_callable=mock.AsyncMock,
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ schema_bundles=[
+ table.SchemaBundle(),
+ table.SchemaBundle(),
+ table.SchemaBundle(),
+ ],
+ next_page_token="abc",
+ ),
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ schema_bundles=[],
+ next_page_token="def",
+ ),
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ schema_bundles=[
+ table.SchemaBundle(),
+ ],
+ next_page_token="ghi",
+ ),
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ schema_bundles=[
+ table.SchemaBundle(),
+ table.SchemaBundle(),
+ ],
+ ),
+ RuntimeError,
+ )
+ pages = []
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
+ await client.list_schema_bundles(request={})
+ ).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
-def test_undelete_table_rest_flattened():
- client = BigtableTableAdminClient(
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ bigtable_table_admin.DeleteSchemaBundleRequest,
+ dict,
+ ],
+)
+def test_delete_schema_bundle(request_type, transport: str = "grpc"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
+ transport=transport,
)
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(type(client.transport._session), "request") as req:
- # Designate an appropriate value for the returned response.
- return_value = operations_pb2.Operation(name="operations/spam")
-
- # get arguments that satisfy an http rule for this method
- sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"}
-
- # get truthy value for each flattened field
- mock_args = dict(
- name="name_value",
- )
- mock_args.update(sample_request)
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
- # Wrap the value into a proper Response obj
- response_value = Response()
- response_value.status_code = 200
- json_return_value = json_format.MessageToJson(return_value)
- response_value._content = json_return_value.encode("UTF-8")
- req.return_value = response_value
- req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_schema_bundle), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.delete_schema_bundle(request)
- client.undelete_table(**mock_args)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ request = bigtable_table_admin.DeleteSchemaBundleRequest()
+ assert args[0] == request
- # Establish that the underlying call was made with the expected
- # request object values.
- assert len(req.mock_calls) == 1
- _, args, _ = req.mock_calls[0]
- assert path_template.validate(
- "%s/v2/{name=projects/*/instances/*/tables/*}:undelete"
- % client.transport._host,
- args[1],
- )
+ # Establish that the response is the type that we expect.
+ assert response is None
-def test_undelete_table_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_delete_schema_bundle_non_empty_request_with_auto_populated_field():
+ # This test is a coverage failsafe to make sure that UUID4 fields are
+ # automatically populated, according to AIP-4235, with non-empty requests.
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport=transport,
+ transport="grpc",
)
- # Attempting to call a method with both a request object and flattened
- # fields is an error.
- with pytest.raises(ValueError):
- client.undelete_table(
- bigtable_table_admin.UndeleteTableRequest(),
+ # Populate all string fields in the request which are not UUID4
+ # since we want to check that UUID4 are populated automatically
+ # if they meet the requirements of AIP 4235.
+ request = bigtable_table_admin.DeleteSchemaBundleRequest(
+ name="name_value",
+ etag="etag_value",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_schema_bundle), "__call__"
+ ) as call:
+ call.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client.delete_schema_bundle(request=request)
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == bigtable_table_admin.DeleteSchemaBundleRequest(
name="name_value",
+ etag="etag_value",
)
-def test_create_authorized_view_rest_use_cached_wrapped_rpc():
+def test_delete_schema_bundle_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
+ transport="grpc",
)
# Should wrap all calls on client creation
@@ -13434,8 +13945,7 @@ def test_create_authorized_view_rest_use_cached_wrapped_rpc():
# Ensure method has been cached
assert (
- client._transport.create_authorized_view
- in client._transport._wrapped_methods
+ client._transport.delete_schema_bundle in client._transport._wrapped_methods
)
# Replace cached wrapped function with mock
@@ -13444,197 +13954,252 @@ def test_create_authorized_view_rest_use_cached_wrapped_rpc():
"foo" # operation_request.operation in compute client(s) expect a string.
)
client._transport._wrapped_methods[
- client._transport.create_authorized_view
+ client._transport.delete_schema_bundle
] = mock_rpc
-
request = {}
- client.create_authorized_view(request)
+ client.delete_schema_bundle(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- # Operation methods build a cached wrapper on first rpc call
- # subsequent calls should use the cached wrapper
- wrapper_fn.reset_mock()
-
- client.create_authorized_view(request)
+ client.delete_schema_bundle(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_create_authorized_view_rest_required_fields(
- request_type=bigtable_table_admin.CreateAuthorizedViewRequest,
+@pytest.mark.asyncio
+async def test_delete_schema_bundle_async_use_cached_wrapped_rpc(
+ transport: str = "grpc_asyncio",
):
- transport_class = transports.BigtableTableAdminRestTransport
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn:
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
- request_init = {}
- request_init["parent"] = ""
- request_init["authorized_view_id"] = ""
- request = request_type(**request_init)
- pb_request = request_type.pb(request)
- jsonified_request = json.loads(
- json_format.MessageToJson(pb_request, use_integers_for_enums=False)
- )
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
- # verify fields with default values are dropped
- assert "authorizedViewId" not in jsonified_request
+ # Ensure method has been cached
+ assert (
+ client._client._transport.delete_schema_bundle
+ in client._client._transport._wrapped_methods
+ )
- unset_fields = transport_class(
- credentials=ga_credentials.AnonymousCredentials()
- ).create_authorized_view._get_unset_required_fields(jsonified_request)
- jsonified_request.update(unset_fields)
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.AsyncMock()
+ mock_rpc.return_value = mock.Mock()
+ client._client._transport._wrapped_methods[
+ client._client._transport.delete_schema_bundle
+ ] = mock_rpc
- # verify required fields with default values are now present
- assert "authorizedViewId" in jsonified_request
- assert jsonified_request["authorizedViewId"] == request_init["authorized_view_id"]
+ request = {}
+ await client.delete_schema_bundle(request)
- jsonified_request["parent"] = "parent_value"
- jsonified_request["authorizedViewId"] = "authorized_view_id_value"
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
- unset_fields = transport_class(
- credentials=ga_credentials.AnonymousCredentials()
- ).create_authorized_view._get_unset_required_fields(jsonified_request)
- # Check that path parameters and body parameters are not mixing in.
- assert not set(unset_fields) - set(("authorized_view_id",))
- jsonified_request.update(unset_fields)
+ await client.delete_schema_bundle(request)
- # verify required fields with non-default values are left alone
- assert "parent" in jsonified_request
- assert jsonified_request["parent"] == "parent_value"
- assert "authorizedViewId" in jsonified_request
- assert jsonified_request["authorizedViewId"] == "authorized_view_id_value"
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
- )
- request = request_type(**request_init)
- # Designate an appropriate value for the returned response.
- return_value = operations_pb2.Operation(name="operations/spam")
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(Session, "request") as req:
- # We need to mock transcode() because providing default values
- # for required fields will fail the real version if the http_options
- # expect actual values for those fields.
- with mock.patch.object(path_template, "transcode") as transcode:
- # A uri without fields and an empty body will force all the
- # request fields to show up in the query_params.
- pb_request = request_type.pb(request)
- transcode_result = {
- "uri": "v1/sample_method",
- "method": "post",
- "query_params": pb_request,
- }
- transcode_result["body"] = pb_request
- transcode.return_value = transcode_result
+@pytest.mark.asyncio
+async def test_delete_schema_bundle_async(
+ transport: str = "grpc_asyncio",
+ request_type=bigtable_table_admin.DeleteSchemaBundleRequest,
+):
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport=transport,
+ )
- response_value = Response()
- response_value.status_code = 200
- json_return_value = json_format.MessageToJson(return_value)
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
- response_value._content = json_return_value.encode("UTF-8")
- req.return_value = response_value
- req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_schema_bundle), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_schema_bundle(request)
- response = client.create_authorized_view(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ request = bigtable_table_admin.DeleteSchemaBundleRequest()
+ assert args[0] == request
- expected_params = [
- (
- "authorizedViewId",
- "",
- ),
- ("$alt", "json;enum-encoding=int"),
- ]
- actual_params = req.call_args.kwargs["params"]
- assert expected_params == actual_params
+ # Establish that the response is the type that we expect.
+ assert response is None
-def test_create_authorized_view_rest_unset_required_fields():
- transport = transports.BigtableTableAdminRestTransport(
- credentials=ga_credentials.AnonymousCredentials
+@pytest.mark.asyncio
+async def test_delete_schema_bundle_async_from_dict():
+ await test_delete_schema_bundle_async(request_type=dict)
+
+
+def test_delete_schema_bundle_field_headers():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
)
- unset_fields = transport.create_authorized_view._get_unset_required_fields({})
- assert set(unset_fields) == (
- set(("authorizedViewId",))
- & set(
- (
- "parent",
- "authorizedViewId",
- "authorizedView",
- )
- )
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = bigtable_table_admin.DeleteSchemaBundleRequest()
+
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_schema_bundle), "__call__"
+ ) as call:
+ call.return_value = None
+ client.delete_schema_bundle(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_schema_bundle_field_headers_async():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
)
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = bigtable_table_admin.DeleteSchemaBundleRequest()
-def test_create_authorized_view_rest_flattened():
- client = BigtableTableAdminClient(
+ request.name = "name_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_schema_bundle), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.delete_schema_bundle(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "name=name_value",
+ ) in kw["metadata"]
+
+
+def test_delete_schema_bundle_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
)
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(type(client.transport._session), "request") as req:
- # Designate an appropriate value for the returned response.
- return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_schema_bundle), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.delete_schema_bundle(
+ name="name_value",
+ )
- # get arguments that satisfy an http rule for this method
- sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"}
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
- # get truthy value for each flattened field
- mock_args = dict(
- parent="parent_value",
- authorized_view=table.AuthorizedView(name="name_value"),
- authorized_view_id="authorized_view_id_value",
+
+def test_delete_schema_bundle_flattened_error():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_schema_bundle(
+ bigtable_table_admin.DeleteSchemaBundleRequest(),
+ name="name_value",
)
- mock_args.update(sample_request)
- # Wrap the value into a proper Response obj
- response_value = Response()
- response_value.status_code = 200
- json_return_value = json_format.MessageToJson(return_value)
- response_value._content = json_return_value.encode("UTF-8")
- req.return_value = response_value
- req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.create_authorized_view(**mock_args)
+@pytest.mark.asyncio
+async def test_delete_schema_bundle_flattened_async():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_schema_bundle), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.delete_schema_bundle(
+ name="name_value",
+ )
# Establish that the underlying call was made with the expected
# request object values.
- assert len(req.mock_calls) == 1
- _, args, _ = req.mock_calls[0]
- assert path_template.validate(
- "%s/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews"
- % client.transport._host,
- args[1],
- )
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
-def test_create_authorized_view_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport=transport,
+@pytest.mark.asyncio
+async def test_delete_schema_bundle_flattened_error_async():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.create_authorized_view(
- bigtable_table_admin.CreateAuthorizedViewRequest(),
- parent="parent_value",
- authorized_view=table.AuthorizedView(name="name_value"),
- authorized_view_id="authorized_view_id_value",
+ await client.delete_schema_bundle(
+ bigtable_table_admin.DeleteSchemaBundleRequest(),
+ name="name_value",
)
-def test_list_authorized_views_rest_use_cached_wrapped_rpc():
+def test_create_table_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -13644,40 +14209,36 @@ def test_list_authorized_views_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert (
- client._transport.list_authorized_views
- in client._transport._wrapped_methods
- )
+ assert client._transport.create_table in client._transport._wrapped_methods
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[
- client._transport.list_authorized_views
- ] = mock_rpc
+ client._transport._wrapped_methods[client._transport.create_table] = mock_rpc
request = {}
- client.list_authorized_views(request)
+ client.create_table(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- client.list_authorized_views(request)
+ client.create_table(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_list_authorized_views_rest_required_fields(
- request_type=bigtable_table_admin.ListAuthorizedViewsRequest,
+def test_create_table_rest_required_fields(
+ request_type=bigtable_table_admin.CreateTableRequest,
):
transport_class = transports.BigtableTableAdminRestTransport
request_init = {}
request_init["parent"] = ""
+ request_init["table_id"] = ""
request = request_type(**request_init)
pb_request = request_type.pb(request)
jsonified_request = json.loads(
@@ -13688,38 +14249,33 @@ def test_list_authorized_views_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).list_authorized_views._get_unset_required_fields(jsonified_request)
+ ).create_table._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["parent"] = "parent_value"
+ jsonified_request["tableId"] = "table_id_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).list_authorized_views._get_unset_required_fields(jsonified_request)
- # Check that path parameters and body parameters are not mixing in.
- assert not set(unset_fields) - set(
- (
- "page_size",
- "page_token",
- "view",
- )
- )
+ ).create_table._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "parent" in jsonified_request
assert jsonified_request["parent"] == "parent_value"
+ assert "tableId" in jsonified_request
+ assert jsonified_request["tableId"] == "table_id_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
request = request_type(**request_init)
# Designate an appropriate value for the returned response.
- return_value = bigtable_table_admin.ListAuthorizedViewsResponse()
+ return_value = gba_table.Table()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
@@ -13731,51 +14287,50 @@ def test_list_authorized_views_rest_required_fields(
pb_request = request_type.pb(request)
transcode_result = {
"uri": "v1/sample_method",
- "method": "get",
+ "method": "post",
"query_params": pb_request,
}
+ transcode_result["body"] = pb_request
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
# Convert return value to protobuf type
- return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb(
- return_value
- )
+ return_value = gba_table.Table.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.list_authorized_views(request)
+ response = client.create_table(request)
expected_params = [("$alt", "json;enum-encoding=int")]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_list_authorized_views_rest_unset_required_fields():
+def test_create_table_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.list_authorized_views._get_unset_required_fields({})
+ unset_fields = transport.create_table._get_unset_required_fields({})
assert set(unset_fields) == (
- set(
+ set(())
+ & set(
(
- "pageSize",
- "pageToken",
- "view",
- )
- )
- & set(("parent",))
+ "parent",
+ "tableId",
+ "table",
+ )
+ )
)
-def test_list_authorized_views_rest_flattened():
- client = BigtableTableAdminClient(
+def test_create_table_rest_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -13783,14 +14338,16 @@ def test_list_authorized_views_rest_flattened():
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = bigtable_table_admin.ListAuthorizedViewsResponse()
+ return_value = gba_table.Table()
# get arguments that satisfy an http rule for this method
- sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"}
+ sample_request = {"parent": "projects/sample1/instances/sample2"}
# get truthy value for each flattened field
mock_args = dict(
parent="parent_value",
+ table_id="table_id_value",
+ table=gba_table.Table(name="name_value"),
)
mock_args.update(sample_request)
@@ -13798,27 +14355,26 @@ def test_list_authorized_views_rest_flattened():
response_value = Response()
response_value.status_code = 200
# Convert return value to protobuf type
- return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb(return_value)
+ return_value = gba_table.Table.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.list_authorized_views(**mock_args)
+ client.create_table(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
- "%s/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews"
- % client.transport._host,
+ "%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host,
args[1],
)
-def test_list_authorized_views_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_create_table_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -13826,81 +14382,19 @@ def test_list_authorized_views_rest_flattened_error(transport: str = "rest"):
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.list_authorized_views(
- bigtable_table_admin.ListAuthorizedViewsRequest(),
+ client.create_table(
+ bigtable_table_admin.CreateTableRequest(),
parent="parent_value",
+ table_id="table_id_value",
+ table=gba_table.Table(name="name_value"),
)
-def test_list_authorized_views_rest_pager(transport: str = "rest"):
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport=transport,
- )
-
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(Session, "request") as req:
- # TODO(kbandes): remove this mock unless there's a good reason for it.
- # with mock.patch.object(path_template, 'transcode') as transcode:
- # Set the response as a series of pages
- response = (
- bigtable_table_admin.ListAuthorizedViewsResponse(
- authorized_views=[
- table.AuthorizedView(),
- table.AuthorizedView(),
- table.AuthorizedView(),
- ],
- next_page_token="abc",
- ),
- bigtable_table_admin.ListAuthorizedViewsResponse(
- authorized_views=[],
- next_page_token="def",
- ),
- bigtable_table_admin.ListAuthorizedViewsResponse(
- authorized_views=[
- table.AuthorizedView(),
- ],
- next_page_token="ghi",
- ),
- bigtable_table_admin.ListAuthorizedViewsResponse(
- authorized_views=[
- table.AuthorizedView(),
- table.AuthorizedView(),
- ],
- ),
- )
- # Two responses for two calls
- response = response + response
-
- # Wrap the values into proper Response objs
- response = tuple(
- bigtable_table_admin.ListAuthorizedViewsResponse.to_json(x)
- for x in response
- )
- return_values = tuple(Response() for i in response)
- for return_val, response_val in zip(return_values, response):
- return_val._content = response_val.encode("UTF-8")
- return_val.status_code = 200
- req.side_effect = return_values
-
- sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"}
-
- pager = client.list_authorized_views(request=sample_request)
-
- results = list(pager)
- assert len(results) == 6
- assert all(isinstance(i, table.AuthorizedView) for i in results)
-
- pages = list(client.list_authorized_views(request=sample_request).pages)
- for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
- assert page_.raw_page.next_page_token == token
-
-
-def test_get_authorized_view_rest_use_cached_wrapped_rpc():
+def test_create_table_from_snapshot_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -13911,7 +14405,8 @@ def test_get_authorized_view_rest_use_cached_wrapped_rpc():
# Ensure method has been cached
assert (
- client._transport.get_authorized_view in client._transport._wrapped_methods
+ client._transport.create_table_from_snapshot
+ in client._transport._wrapped_methods
)
# Replace cached wrapped function with mock
@@ -13920,29 +14415,35 @@ def test_get_authorized_view_rest_use_cached_wrapped_rpc():
"foo" # operation_request.operation in compute client(s) expect a string.
)
client._transport._wrapped_methods[
- client._transport.get_authorized_view
+ client._transport.create_table_from_snapshot
] = mock_rpc
request = {}
- client.get_authorized_view(request)
+ client.create_table_from_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- client.get_authorized_view(request)
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.create_table_from_snapshot(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_get_authorized_view_rest_required_fields(
- request_type=bigtable_table_admin.GetAuthorizedViewRequest,
+def test_create_table_from_snapshot_rest_required_fields(
+ request_type=bigtable_table_admin.CreateTableFromSnapshotRequest,
):
transport_class = transports.BigtableTableAdminRestTransport
request_init = {}
- request_init["name"] = ""
+ request_init["parent"] = ""
+ request_init["table_id"] = ""
+ request_init["source_snapshot"] = ""
request = request_type(**request_init)
pb_request = request_type.pb(request)
jsonified_request = json.loads(
@@ -13953,32 +14454,36 @@ def test_get_authorized_view_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).get_authorized_view._get_unset_required_fields(jsonified_request)
+ ).create_table_from_snapshot._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
- jsonified_request["name"] = "name_value"
+ jsonified_request["parent"] = "parent_value"
+ jsonified_request["tableId"] = "table_id_value"
+ jsonified_request["sourceSnapshot"] = "source_snapshot_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).get_authorized_view._get_unset_required_fields(jsonified_request)
- # Check that path parameters and body parameters are not mixing in.
- assert not set(unset_fields) - set(("view",))
+ ).create_table_from_snapshot._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
- assert "name" in jsonified_request
- assert jsonified_request["name"] == "name_value"
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+ assert "tableId" in jsonified_request
+ assert jsonified_request["tableId"] == "table_id_value"
+ assert "sourceSnapshot" in jsonified_request
+ assert jsonified_request["sourceSnapshot"] == "source_snapshot_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
request = request_type(**request_init)
# Designate an appropriate value for the returned response.
- return_value = table.AuthorizedView()
+ return_value = operations_pb2.Operation(name="operations/spam")
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
@@ -13990,40 +14495,47 @@ def test_get_authorized_view_rest_required_fields(
pb_request = request_type.pb(request)
transcode_result = {
"uri": "v1/sample_method",
- "method": "get",
+ "method": "post",
"query_params": pb_request,
}
+ transcode_result["body"] = pb_request
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
-
- # Convert return value to protobuf type
- return_value = table.AuthorizedView.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.get_authorized_view(request)
+ response = client.create_table_from_snapshot(request)
expected_params = [("$alt", "json;enum-encoding=int")]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_get_authorized_view_rest_unset_required_fields():
+def test_create_table_from_snapshot_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.get_authorized_view._get_unset_required_fields({})
- assert set(unset_fields) == (set(("view",)) & set(("name",)))
+ unset_fields = transport.create_table_from_snapshot._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "parent",
+ "tableId",
+ "sourceSnapshot",
+ )
+ )
+ )
-def test_get_authorized_view_rest_flattened():
- client = BigtableTableAdminClient(
+def test_create_table_from_snapshot_rest_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -14031,44 +14543,42 @@ def test_get_authorized_view_rest_flattened():
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = table.AuthorizedView()
+ return_value = operations_pb2.Operation(name="operations/spam")
# get arguments that satisfy an http rule for this method
- sample_request = {
- "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
- }
+ sample_request = {"parent": "projects/sample1/instances/sample2"}
# get truthy value for each flattened field
mock_args = dict(
- name="name_value",
+ parent="parent_value",
+ table_id="table_id_value",
+ source_snapshot="source_snapshot_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- # Convert return value to protobuf type
- return_value = table.AuthorizedView.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.get_authorized_view(**mock_args)
+ client.create_table_from_snapshot(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
- "%s/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}"
+ "%s/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot"
% client.transport._host,
args[1],
)
-def test_get_authorized_view_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_create_table_from_snapshot_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -14076,17 +14586,19 @@ def test_get_authorized_view_rest_flattened_error(transport: str = "rest"):
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.get_authorized_view(
- bigtable_table_admin.GetAuthorizedViewRequest(),
- name="name_value",
+ client.create_table_from_snapshot(
+ bigtable_table_admin.CreateTableFromSnapshotRequest(),
+ parent="parent_value",
+ table_id="table_id_value",
+ source_snapshot="source_snapshot_value",
)
-def test_update_authorized_view_rest_use_cached_wrapped_rpc():
+def test_list_tables_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -14096,43 +14608,35 @@ def test_update_authorized_view_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert (
- client._transport.update_authorized_view
- in client._transport._wrapped_methods
- )
+ assert client._transport.list_tables in client._transport._wrapped_methods
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[
- client._transport.update_authorized_view
- ] = mock_rpc
+ client._transport._wrapped_methods[client._transport.list_tables] = mock_rpc
request = {}
- client.update_authorized_view(request)
+ client.list_tables(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- # Operation methods build a cached wrapper on first rpc call
- # subsequent calls should use the cached wrapper
- wrapper_fn.reset_mock()
-
- client.update_authorized_view(request)
+ client.list_tables(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_update_authorized_view_rest_required_fields(
- request_type=bigtable_table_admin.UpdateAuthorizedViewRequest,
+def test_list_tables_rest_required_fields(
+ request_type=bigtable_table_admin.ListTablesRequest,
):
transport_class = transports.BigtableTableAdminRestTransport
request_init = {}
+ request_init["parent"] = ""
request = request_type(**request_init)
pb_request = request_type.pb(request)
jsonified_request = json.loads(
@@ -14143,33 +14647,38 @@ def test_update_authorized_view_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).update_authorized_view._get_unset_required_fields(jsonified_request)
+ ).list_tables._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
+ jsonified_request["parent"] = "parent_value"
+
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).update_authorized_view._get_unset_required_fields(jsonified_request)
+ ).list_tables._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(
(
- "ignore_warnings",
- "update_mask",
+ "page_size",
+ "page_token",
+ "view",
)
)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
request = request_type(**request_init)
# Designate an appropriate value for the returned response.
- return_value = operations_pb2.Operation(name="operations/spam")
+ return_value = bigtable_table_admin.ListTablesResponse()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
@@ -14181,46 +14690,49 @@ def test_update_authorized_view_rest_required_fields(
pb_request = request_type.pb(request)
transcode_result = {
"uri": "v1/sample_method",
- "method": "patch",
+ "method": "get",
"query_params": pb_request,
}
- transcode_result["body"] = pb_request
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListTablesResponse.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.update_authorized_view(request)
+ response = client.list_tables(request)
expected_params = [("$alt", "json;enum-encoding=int")]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_update_authorized_view_rest_unset_required_fields():
+def test_list_tables_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.update_authorized_view._get_unset_required_fields({})
+ unset_fields = transport.list_tables._get_unset_required_fields({})
assert set(unset_fields) == (
set(
(
- "ignoreWarnings",
- "updateMask",
+ "pageSize",
+ "pageToken",
+ "view",
)
)
- & set(("authorizedView",))
+ & set(("parent",))
)
-def test_update_authorized_view_rest_flattened():
- client = BigtableTableAdminClient(
+def test_list_tables_rest_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -14228,45 +14740,41 @@ def test_update_authorized_view_rest_flattened():
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = operations_pb2.Operation(name="operations/spam")
+ return_value = bigtable_table_admin.ListTablesResponse()
# get arguments that satisfy an http rule for this method
- sample_request = {
- "authorized_view": {
- "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
- }
- }
+ sample_request = {"parent": "projects/sample1/instances/sample2"}
# get truthy value for each flattened field
mock_args = dict(
- authorized_view=table.AuthorizedView(name="name_value"),
- update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ parent="parent_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListTablesResponse.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.update_authorized_view(**mock_args)
+ client.list_tables(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
- "%s/v2/{authorized_view.name=projects/*/instances/*/tables/*/authorizedViews/*}"
- % client.transport._host,
+ "%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host,
args[1],
)
-def test_update_authorized_view_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_list_tables_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -14274,18 +14782,80 @@ def test_update_authorized_view_rest_flattened_error(transport: str = "rest"):
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.update_authorized_view(
- bigtable_table_admin.UpdateAuthorizedViewRequest(),
- authorized_view=table.AuthorizedView(name="name_value"),
- update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ client.list_tables(
+ bigtable_table_admin.ListTablesRequest(),
+ parent="parent_value",
)
-def test_delete_authorized_view_rest_use_cached_wrapped_rpc():
+def test_list_tables_rest_pager(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # TODO(kbandes): remove this mock unless there's a good reason for it.
+ # with mock.patch.object(path_template, 'transcode') as transcode:
+ # Set the response as a series of pages
+ response = (
+ bigtable_table_admin.ListTablesResponse(
+ tables=[
+ table.Table(),
+ table.Table(),
+ table.Table(),
+ ],
+ next_page_token="abc",
+ ),
+ bigtable_table_admin.ListTablesResponse(
+ tables=[],
+ next_page_token="def",
+ ),
+ bigtable_table_admin.ListTablesResponse(
+ tables=[
+ table.Table(),
+ ],
+ next_page_token="ghi",
+ ),
+ bigtable_table_admin.ListTablesResponse(
+ tables=[
+ table.Table(),
+ table.Table(),
+ ],
+ ),
+ )
+ # Two responses for two calls
+ response = response + response
+
+ # Wrap the values into proper Response objs
+ response = tuple(
+ bigtable_table_admin.ListTablesResponse.to_json(x) for x in response
+ )
+ return_values = tuple(Response() for i in response)
+ for return_val, response_val in zip(return_values, response):
+ return_val._content = response_val.encode("UTF-8")
+ return_val.status_code = 200
+ req.side_effect = return_values
+
+ sample_request = {"parent": "projects/sample1/instances/sample2"}
+
+ pager = client.list_tables(request=sample_request)
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, table.Table) for i in results)
+
+ pages = list(client.list_tables(request=sample_request).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_get_table_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -14295,35 +14865,30 @@ def test_delete_authorized_view_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert (
- client._transport.delete_authorized_view
- in client._transport._wrapped_methods
- )
+ assert client._transport.get_table in client._transport._wrapped_methods
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[
- client._transport.delete_authorized_view
- ] = mock_rpc
+ client._transport._wrapped_methods[client._transport.get_table] = mock_rpc
request = {}
- client.delete_authorized_view(request)
+ client.get_table(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- client.delete_authorized_view(request)
+ client.get_table(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_delete_authorized_view_rest_required_fields(
- request_type=bigtable_table_admin.DeleteAuthorizedViewRequest,
+def test_get_table_rest_required_fields(
+ request_type=bigtable_table_admin.GetTableRequest,
):
transport_class = transports.BigtableTableAdminRestTransport
@@ -14339,7 +14904,7 @@ def test_delete_authorized_view_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).delete_authorized_view._get_unset_required_fields(jsonified_request)
+ ).get_table._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
@@ -14348,23 +14913,23 @@ def test_delete_authorized_view_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).delete_authorized_view._get_unset_required_fields(jsonified_request)
+ ).get_table._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
- assert not set(unset_fields) - set(("etag",))
+ assert not set(unset_fields) - set(("view",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "name" in jsonified_request
assert jsonified_request["name"] == "name_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
request = request_type(**request_init)
# Designate an appropriate value for the returned response.
- return_value = None
+ return_value = table.Table()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
@@ -14376,37 +14941,40 @@ def test_delete_authorized_view_rest_required_fields(
pb_request = request_type.pb(request)
transcode_result = {
"uri": "v1/sample_method",
- "method": "delete",
+ "method": "get",
"query_params": pb_request,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
- json_return_value = ""
+
+ # Convert return value to protobuf type
+ return_value = table.Table.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.delete_authorized_view(request)
+ response = client.get_table(request)
expected_params = [("$alt", "json;enum-encoding=int")]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_delete_authorized_view_rest_unset_required_fields():
+def test_get_table_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.delete_authorized_view._get_unset_required_fields({})
- assert set(unset_fields) == (set(("etag",)) & set(("name",)))
+ unset_fields = transport.get_table._get_unset_required_fields({})
+ assert set(unset_fields) == (set(("view",)) & set(("name",)))
-def test_delete_authorized_view_rest_flattened():
- client = BigtableTableAdminClient(
+def test_get_table_rest_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -14414,12 +14982,10 @@ def test_delete_authorized_view_rest_flattened():
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = None
+ return_value = table.Table()
# get arguments that satisfy an http rule for this method
- sample_request = {
- "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
- }
+ sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"}
# get truthy value for each flattened field
mock_args = dict(
@@ -14430,26 +14996,27 @@ def test_delete_authorized_view_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- json_return_value = ""
+ # Convert return value to protobuf type
+ return_value = table.Table.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.delete_authorized_view(**mock_args)
+ client.get_table(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
- "%s/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}"
- % client.transport._host,
+ "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host,
args[1],
)
-def test_delete_authorized_view_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_get_table_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -14457,17 +15024,17 @@ def test_delete_authorized_view_rest_flattened_error(transport: str = "rest"):
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.delete_authorized_view(
- bigtable_table_admin.DeleteAuthorizedViewRequest(),
+ client.get_table(
+ bigtable_table_admin.GetTableRequest(),
name="name_value",
)
-def test_modify_column_families_rest_use_cached_wrapped_rpc():
+def test_update_table_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -14477,40 +15044,38 @@ def test_modify_column_families_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert (
- client._transport.modify_column_families
- in client._transport._wrapped_methods
- )
+ assert client._transport.update_table in client._transport._wrapped_methods
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[
- client._transport.modify_column_families
- ] = mock_rpc
+ client._transport._wrapped_methods[client._transport.update_table] = mock_rpc
request = {}
- client.modify_column_families(request)
+ client.update_table(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- client.modify_column_families(request)
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.update_table(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_modify_column_families_rest_required_fields(
- request_type=bigtable_table_admin.ModifyColumnFamiliesRequest,
+def test_update_table_rest_required_fields(
+ request_type=bigtable_table_admin.UpdateTableRequest,
):
transport_class = transports.BigtableTableAdminRestTransport
request_init = {}
- request_init["name"] = ""
request = request_type(**request_init)
pb_request = request_type.pb(request)
jsonified_request = json.loads(
@@ -14521,30 +15086,33 @@ def test_modify_column_families_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).modify_column_families._get_unset_required_fields(jsonified_request)
+ ).update_table._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
- jsonified_request["name"] = "name_value"
-
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).modify_column_families._get_unset_required_fields(jsonified_request)
+ ).update_table._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(
+ (
+ "ignore_warnings",
+ "update_mask",
+ )
+ )
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
- assert "name" in jsonified_request
- assert jsonified_request["name"] == "name_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
request = request_type(**request_init)
# Designate an appropriate value for the returned response.
- return_value = table.Table()
+ return_value = operations_pb2.Operation(name="operations/spam")
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
@@ -14556,7 +15124,7 @@ def test_modify_column_families_rest_required_fields(
pb_request = request_type.pb(request)
transcode_result = {
"uri": "v1/sample_method",
- "method": "post",
+ "method": "patch",
"query_params": pb_request,
}
transcode_result["body"] = pb_request
@@ -14564,41 +15132,43 @@ def test_modify_column_families_rest_required_fields(
response_value = Response()
response_value.status_code = 200
-
- # Convert return value to protobuf type
- return_value = table.Table.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.modify_column_families(request)
+ response = client.update_table(request)
expected_params = [("$alt", "json;enum-encoding=int")]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_modify_column_families_rest_unset_required_fields():
+def test_update_table_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.modify_column_families._get_unset_required_fields({})
+ unset_fields = transport.update_table._get_unset_required_fields({})
assert set(unset_fields) == (
- set(())
+ set(
+ (
+ "ignoreWarnings",
+ "updateMask",
+ )
+ )
& set(
(
- "name",
- "modifications",
+ "table",
+ "updateMask",
)
)
)
-def test_modify_column_families_rest_flattened():
- client = BigtableTableAdminClient(
+def test_update_table_rest_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -14606,47 +15176,43 @@ def test_modify_column_families_rest_flattened():
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = table.Table()
+ return_value = operations_pb2.Operation(name="operations/spam")
# get arguments that satisfy an http rule for this method
- sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ sample_request = {
+ "table": {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ }
# get truthy value for each flattened field
mock_args = dict(
- name="name_value",
- modifications=[
- bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(
- id="id_value"
- )
- ],
+ table=gba_table.Table(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- # Convert return value to protobuf type
- return_value = table.Table.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.modify_column_families(**mock_args)
+ client.update_table(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
- "%s/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies"
+ "%s/v2/{table.name=projects/*/instances/*/tables/*}"
% client.transport._host,
args[1],
)
-def test_modify_column_families_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_update_table_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -14654,22 +15220,18 @@ def test_modify_column_families_rest_flattened_error(transport: str = "rest"):
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.modify_column_families(
- bigtable_table_admin.ModifyColumnFamiliesRequest(),
- name="name_value",
- modifications=[
- bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(
- id="id_value"
- )
- ],
+ client.update_table(
+ bigtable_table_admin.UpdateTableRequest(),
+ table=gba_table.Table(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
-def test_drop_row_range_rest_use_cached_wrapped_rpc():
+def test_delete_table_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -14679,31 +15241,31 @@ def test_drop_row_range_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert client._transport.drop_row_range in client._transport._wrapped_methods
+ assert client._transport.delete_table in client._transport._wrapped_methods
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[client._transport.drop_row_range] = mock_rpc
+ client._transport._wrapped_methods[client._transport.delete_table] = mock_rpc
request = {}
- client.drop_row_range(request)
+ client.delete_table(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- client.drop_row_range(request)
+ client.delete_table(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_drop_row_range_rest_required_fields(
- request_type=bigtable_table_admin.DropRowRangeRequest,
-):
+def test_delete_table_rest_required_fields(
+ request_type=bigtable_table_admin.DeleteTableRequest,
+):
transport_class = transports.BigtableTableAdminRestTransport
request_init = {}
@@ -14718,7 +15280,7 @@ def test_drop_row_range_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).drop_row_range._get_unset_required_fields(jsonified_request)
+ ).delete_table._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
@@ -14727,14 +15289,14 @@ def test_drop_row_range_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).drop_row_range._get_unset_required_fields(jsonified_request)
+ ).delete_table._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "name" in jsonified_request
assert jsonified_request["name"] == "name_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -14753,10 +15315,9 @@ def test_drop_row_range_rest_required_fields(
pb_request = request_type.pb(request)
transcode_result = {
"uri": "v1/sample_method",
- "method": "post",
+ "method": "delete",
"query_params": pb_request,
}
- transcode_result["body"] = pb_request
transcode.return_value = transcode_result
response_value = Response()
@@ -14767,27 +15328,82 @@ def test_drop_row_range_rest_required_fields(
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.drop_row_range(request)
+ response = client.delete_table(request)
expected_params = [("$alt", "json;enum-encoding=int")]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_drop_row_range_rest_unset_required_fields():
+def test_delete_table_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.drop_row_range._get_unset_required_fields({})
+ unset_fields = transport.delete_table._get_unset_required_fields({})
assert set(unset_fields) == (set(()) & set(("name",)))
-def test_generate_consistency_token_rest_use_cached_wrapped_rpc():
+def test_delete_table_rest_flattened():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.delete_table(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host,
+ args[1],
+ )
+
+
+def test_delete_table_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_table(
+ bigtable_table_admin.DeleteTableRequest(),
+ name="name_value",
+ )
+
+
+def test_undelete_table_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -14797,35 +15413,34 @@ def test_generate_consistency_token_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert (
- client._transport.generate_consistency_token
- in client._transport._wrapped_methods
- )
+ assert client._transport.undelete_table in client._transport._wrapped_methods
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[
- client._transport.generate_consistency_token
- ] = mock_rpc
+ client._transport._wrapped_methods[client._transport.undelete_table] = mock_rpc
request = {}
- client.generate_consistency_token(request)
+ client.undelete_table(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- client.generate_consistency_token(request)
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.undelete_table(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_generate_consistency_token_rest_required_fields(
- request_type=bigtable_table_admin.GenerateConsistencyTokenRequest,
+def test_undelete_table_rest_required_fields(
+ request_type=bigtable_table_admin.UndeleteTableRequest,
):
transport_class = transports.BigtableTableAdminRestTransport
@@ -14841,7 +15456,7 @@ def test_generate_consistency_token_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).generate_consistency_token._get_unset_required_fields(jsonified_request)
+ ).undelete_table._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
@@ -14850,21 +15465,21 @@ def test_generate_consistency_token_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).generate_consistency_token._get_unset_required_fields(jsonified_request)
+ ).undelete_table._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "name" in jsonified_request
assert jsonified_request["name"] == "name_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
request = request_type(**request_init)
# Designate an appropriate value for the returned response.
- return_value = bigtable_table_admin.GenerateConsistencyTokenResponse()
+ return_value = operations_pb2.Operation(name="operations/spam")
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
@@ -14884,35 +15499,30 @@ def test_generate_consistency_token_rest_required_fields(
response_value = Response()
response_value.status_code = 200
-
- # Convert return value to protobuf type
- return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(
- return_value
- )
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.generate_consistency_token(request)
+ response = client.undelete_table(request)
expected_params = [("$alt", "json;enum-encoding=int")]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_generate_consistency_token_rest_unset_required_fields():
+def test_undelete_table_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.generate_consistency_token._get_unset_required_fields({})
+ unset_fields = transport.undelete_table._get_unset_required_fields({})
assert set(unset_fields) == (set(()) & set(("name",)))
-def test_generate_consistency_token_rest_flattened():
- client = BigtableTableAdminClient(
+def test_undelete_table_rest_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -14920,7 +15530,7 @@ def test_generate_consistency_token_rest_flattened():
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = bigtable_table_admin.GenerateConsistencyTokenResponse()
+ return_value = operations_pb2.Operation(name="operations/spam")
# get arguments that satisfy an http rule for this method
sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"}
@@ -14934,30 +15544,26 @@ def test_generate_consistency_token_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- # Convert return value to protobuf type
- return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(
- return_value
- )
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.generate_consistency_token(**mock_args)
+ client.undelete_table(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
- "%s/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken"
+ "%s/v2/{name=projects/*/instances/*/tables/*}:undelete"
% client.transport._host,
args[1],
)
-def test_generate_consistency_token_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_undelete_table_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -14965,17 +15571,17 @@ def test_generate_consistency_token_rest_flattened_error(transport: str = "rest"
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.generate_consistency_token(
- bigtable_table_admin.GenerateConsistencyTokenRequest(),
+ client.undelete_table(
+ bigtable_table_admin.UndeleteTableRequest(),
name="name_value",
)
-def test_check_consistency_rest_use_cached_wrapped_rpc():
+def test_create_authorized_view_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -14985,7 +15591,10 @@ def test_check_consistency_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert client._transport.check_consistency in client._transport._wrapped_methods
+ assert (
+ client._transport.create_authorized_view
+ in client._transport._wrapped_methods
+ )
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
@@ -14993,30 +15602,34 @@ def test_check_consistency_rest_use_cached_wrapped_rpc():
"foo" # operation_request.operation in compute client(s) expect a string.
)
client._transport._wrapped_methods[
- client._transport.check_consistency
+ client._transport.create_authorized_view
] = mock_rpc
request = {}
- client.check_consistency(request)
+ client.create_authorized_view(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- client.check_consistency(request)
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.create_authorized_view(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_check_consistency_rest_required_fields(
- request_type=bigtable_table_admin.CheckConsistencyRequest,
+def test_create_authorized_view_rest_required_fields(
+ request_type=bigtable_table_admin.CreateAuthorizedViewRequest,
):
transport_class = transports.BigtableTableAdminRestTransport
request_init = {}
- request_init["name"] = ""
- request_init["consistency_token"] = ""
+ request_init["parent"] = ""
+ request_init["authorized_view_id"] = ""
request = request_type(**request_init)
pb_request = request_type.pb(request)
jsonified_request = json.loads(
@@ -15024,36 +15637,41 @@ def test_check_consistency_rest_required_fields(
)
# verify fields with default values are dropped
+ assert "authorizedViewId" not in jsonified_request
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).check_consistency._get_unset_required_fields(jsonified_request)
+ ).create_authorized_view._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
+ assert "authorizedViewId" in jsonified_request
+ assert jsonified_request["authorizedViewId"] == request_init["authorized_view_id"]
- jsonified_request["name"] = "name_value"
- jsonified_request["consistencyToken"] = "consistency_token_value"
+ jsonified_request["parent"] = "parent_value"
+ jsonified_request["authorizedViewId"] = "authorized_view_id_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).check_consistency._get_unset_required_fields(jsonified_request)
+ ).create_authorized_view._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(("authorized_view_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
- assert "name" in jsonified_request
- assert jsonified_request["name"] == "name_value"
- assert "consistencyToken" in jsonified_request
- assert jsonified_request["consistencyToken"] == "consistency_token_value"
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+ assert "authorizedViewId" in jsonified_request
+ assert jsonified_request["authorizedViewId"] == "authorized_view_id_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
request = request_type(**request_init)
# Designate an appropriate value for the returned response.
- return_value = bigtable_table_admin.CheckConsistencyResponse()
+ return_value = operations_pb2.Operation(name="operations/spam")
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
@@ -15073,43 +15691,45 @@ def test_check_consistency_rest_required_fields(
response_value = Response()
response_value.status_code = 200
-
- # Convert return value to protobuf type
- return_value = bigtable_table_admin.CheckConsistencyResponse.pb(
- return_value
- )
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.check_consistency(request)
+ response = client.create_authorized_view(request)
- expected_params = [("$alt", "json;enum-encoding=int")]
+ expected_params = [
+ (
+ "authorizedViewId",
+ "",
+ ),
+ ("$alt", "json;enum-encoding=int"),
+ ]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_check_consistency_rest_unset_required_fields():
+def test_create_authorized_view_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.check_consistency._get_unset_required_fields({})
+ unset_fields = transport.create_authorized_view._get_unset_required_fields({})
assert set(unset_fields) == (
- set(())
+ set(("authorizedViewId",))
& set(
(
- "name",
- "consistencyToken",
+ "parent",
+ "authorizedViewId",
+ "authorizedView",
)
)
)
-def test_check_consistency_rest_flattened():
- client = BigtableTableAdminClient(
+def test_create_authorized_view_rest_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -15117,43 +15737,42 @@ def test_check_consistency_rest_flattened():
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = bigtable_table_admin.CheckConsistencyResponse()
+ return_value = operations_pb2.Operation(name="operations/spam")
# get arguments that satisfy an http rule for this method
- sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"}
# get truthy value for each flattened field
mock_args = dict(
- name="name_value",
- consistency_token="consistency_token_value",
+ parent="parent_value",
+ authorized_view=table.AuthorizedView(name="name_value"),
+ authorized_view_id="authorized_view_id_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- # Convert return value to protobuf type
- return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.check_consistency(**mock_args)
+ client.create_authorized_view(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
- "%s/v2/{name=projects/*/instances/*/tables/*}:checkConsistency"
+ "%s/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews"
% client.transport._host,
args[1],
)
-def test_check_consistency_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_create_authorized_view_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -15161,18 +15780,19 @@ def test_check_consistency_rest_flattened_error(transport: str = "rest"):
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.check_consistency(
- bigtable_table_admin.CheckConsistencyRequest(),
- name="name_value",
- consistency_token="consistency_token_value",
+ client.create_authorized_view(
+ bigtable_table_admin.CreateAuthorizedViewRequest(),
+ parent="parent_value",
+ authorized_view=table.AuthorizedView(name="name_value"),
+ authorized_view_id="authorized_view_id_value",
)
-def test_snapshot_table_rest_use_cached_wrapped_rpc():
+def test_list_authorized_views_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -15182,41 +15802,40 @@ def test_snapshot_table_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert client._transport.snapshot_table in client._transport._wrapped_methods
+ assert (
+ client._transport.list_authorized_views
+ in client._transport._wrapped_methods
+ )
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[client._transport.snapshot_table] = mock_rpc
+ client._transport._wrapped_methods[
+ client._transport.list_authorized_views
+ ] = mock_rpc
request = {}
- client.snapshot_table(request)
+ client.list_authorized_views(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- # Operation methods build a cached wrapper on first rpc call
- # subsequent calls should use the cached wrapper
- wrapper_fn.reset_mock()
-
- client.snapshot_table(request)
+ client.list_authorized_views(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_snapshot_table_rest_required_fields(
- request_type=bigtable_table_admin.SnapshotTableRequest,
+def test_list_authorized_views_rest_required_fields(
+ request_type=bigtable_table_admin.ListAuthorizedViewsRequest,
):
transport_class = transports.BigtableTableAdminRestTransport
request_init = {}
- request_init["name"] = ""
- request_init["cluster"] = ""
- request_init["snapshot_id"] = ""
+ request_init["parent"] = ""
request = request_type(**request_init)
pb_request = request_type.pb(request)
jsonified_request = json.loads(
@@ -15227,36 +15846,38 @@ def test_snapshot_table_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).snapshot_table._get_unset_required_fields(jsonified_request)
+ ).list_authorized_views._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
- jsonified_request["name"] = "name_value"
- jsonified_request["cluster"] = "cluster_value"
- jsonified_request["snapshotId"] = "snapshot_id_value"
+ jsonified_request["parent"] = "parent_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).snapshot_table._get_unset_required_fields(jsonified_request)
+ ).list_authorized_views._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(
+ (
+ "page_size",
+ "page_token",
+ "view",
+ )
+ )
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
- assert "name" in jsonified_request
- assert jsonified_request["name"] == "name_value"
- assert "cluster" in jsonified_request
- assert jsonified_request["cluster"] == "cluster_value"
- assert "snapshotId" in jsonified_request
- assert jsonified_request["snapshotId"] == "snapshot_id_value"
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
request = request_type(**request_init)
# Designate an appropriate value for the returned response.
- return_value = operations_pb2.Operation(name="operations/spam")
+ return_value = bigtable_table_admin.ListAuthorizedViewsResponse()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
@@ -15268,47 +15889,51 @@ def test_snapshot_table_rest_required_fields(
pb_request = request_type.pb(request)
transcode_result = {
"uri": "v1/sample_method",
- "method": "post",
+ "method": "get",
"query_params": pb_request,
}
- transcode_result["body"] = pb_request
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb(
+ return_value
+ )
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.snapshot_table(request)
+ response = client.list_authorized_views(request)
expected_params = [("$alt", "json;enum-encoding=int")]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_snapshot_table_rest_unset_required_fields():
+def test_list_authorized_views_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.snapshot_table._get_unset_required_fields({})
+ unset_fields = transport.list_authorized_views._get_unset_required_fields({})
assert set(unset_fields) == (
- set(())
- & set(
+ set(
(
- "name",
- "cluster",
- "snapshotId",
+ "pageSize",
+ "pageToken",
+ "view",
)
)
+ & set(("parent",))
)
-def test_snapshot_table_rest_flattened():
- client = BigtableTableAdminClient(
+def test_list_authorized_views_rest_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -15316,43 +15941,42 @@ def test_snapshot_table_rest_flattened():
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = operations_pb2.Operation(name="operations/spam")
+ return_value = bigtable_table_admin.ListAuthorizedViewsResponse()
# get arguments that satisfy an http rule for this method
- sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"}
# get truthy value for each flattened field
mock_args = dict(
- name="name_value",
- cluster="cluster_value",
- snapshot_id="snapshot_id_value",
- description="description_value",
+ parent="parent_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.snapshot_table(**mock_args)
+ client.list_authorized_views(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
- "%s/v2/{name=projects/*/instances/*/tables/*}:snapshot"
+ "%s/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews"
% client.transport._host,
args[1],
)
-def test_snapshot_table_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_list_authorized_views_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -15360,20 +15984,81 @@ def test_snapshot_table_rest_flattened_error(transport: str = "rest"):
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.snapshot_table(
- bigtable_table_admin.SnapshotTableRequest(),
- name="name_value",
- cluster="cluster_value",
- snapshot_id="snapshot_id_value",
- description="description_value",
+ client.list_authorized_views(
+ bigtable_table_admin.ListAuthorizedViewsRequest(),
+ parent="parent_value",
)
-def test_get_snapshot_rest_use_cached_wrapped_rpc():
+def test_list_authorized_views_rest_pager(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # TODO(kbandes): remove this mock unless there's a good reason for it.
+ # with mock.patch.object(path_template, 'transcode') as transcode:
+ # Set the response as a series of pages
+ response = (
+ bigtable_table_admin.ListAuthorizedViewsResponse(
+ authorized_views=[
+ table.AuthorizedView(),
+ table.AuthorizedView(),
+ table.AuthorizedView(),
+ ],
+ next_page_token="abc",
+ ),
+ bigtable_table_admin.ListAuthorizedViewsResponse(
+ authorized_views=[],
+ next_page_token="def",
+ ),
+ bigtable_table_admin.ListAuthorizedViewsResponse(
+ authorized_views=[
+ table.AuthorizedView(),
+ ],
+ next_page_token="ghi",
+ ),
+ bigtable_table_admin.ListAuthorizedViewsResponse(
+ authorized_views=[
+ table.AuthorizedView(),
+ table.AuthorizedView(),
+ ],
+ ),
+ )
+ # Two responses for two calls
+ response = response + response
+
+ # Wrap the values into proper Response objs
+ response = tuple(
+ bigtable_table_admin.ListAuthorizedViewsResponse.to_json(x)
+ for x in response
+ )
+ return_values = tuple(Response() for i in response)
+ for return_val, response_val in zip(return_values, response):
+ return_val._content = response_val.encode("UTF-8")
+ return_val.status_code = 200
+ req.side_effect = return_values
+
+ sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"}
+
+ pager = client.list_authorized_views(request=sample_request)
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, table.AuthorizedView) for i in results)
+
+ pages = list(client.list_authorized_views(request=sample_request).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_get_authorized_view_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -15383,30 +16068,34 @@ def test_get_snapshot_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert client._transport.get_snapshot in client._transport._wrapped_methods
+ assert (
+ client._transport.get_authorized_view in client._transport._wrapped_methods
+ )
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[client._transport.get_snapshot] = mock_rpc
+ client._transport._wrapped_methods[
+ client._transport.get_authorized_view
+ ] = mock_rpc
request = {}
- client.get_snapshot(request)
+ client.get_authorized_view(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- client.get_snapshot(request)
+ client.get_authorized_view(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_get_snapshot_rest_required_fields(
- request_type=bigtable_table_admin.GetSnapshotRequest,
+def test_get_authorized_view_rest_required_fields(
+ request_type=bigtable_table_admin.GetAuthorizedViewRequest,
):
transport_class = transports.BigtableTableAdminRestTransport
@@ -15422,7 +16111,7 @@ def test_get_snapshot_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).get_snapshot._get_unset_required_fields(jsonified_request)
+ ).get_authorized_view._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
@@ -15431,21 +16120,23 @@ def test_get_snapshot_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).get_snapshot._get_unset_required_fields(jsonified_request)
+ ).get_authorized_view._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(("view",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "name" in jsonified_request
assert jsonified_request["name"] == "name_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
request = request_type(**request_init)
# Designate an appropriate value for the returned response.
- return_value = table.Snapshot()
+ return_value = table.AuthorizedView()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
@@ -15466,31 +16157,31 @@ def test_get_snapshot_rest_required_fields(
response_value.status_code = 200
# Convert return value to protobuf type
- return_value = table.Snapshot.pb(return_value)
+ return_value = table.AuthorizedView.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.get_snapshot(request)
+ response = client.get_authorized_view(request)
expected_params = [("$alt", "json;enum-encoding=int")]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_get_snapshot_rest_unset_required_fields():
+def test_get_authorized_view_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.get_snapshot._get_unset_required_fields({})
- assert set(unset_fields) == (set(()) & set(("name",)))
+ unset_fields = transport.get_authorized_view._get_unset_required_fields({})
+ assert set(unset_fields) == (set(("view",)) & set(("name",)))
-def test_get_snapshot_rest_flattened():
- client = BigtableTableAdminClient(
+def test_get_authorized_view_rest_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -15498,11 +16189,11 @@ def test_get_snapshot_rest_flattened():
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = table.Snapshot()
+ return_value = table.AuthorizedView()
# get arguments that satisfy an http rule for this method
sample_request = {
- "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4"
+ "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
}
# get truthy value for each flattened field
@@ -15515,27 +16206,27 @@ def test_get_snapshot_rest_flattened():
response_value = Response()
response_value.status_code = 200
# Convert return value to protobuf type
- return_value = table.Snapshot.pb(return_value)
+ return_value = table.AuthorizedView.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.get_snapshot(**mock_args)
+ client.get_authorized_view(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
- "%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}"
+ "%s/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}"
% client.transport._host,
args[1],
)
-def test_get_snapshot_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_get_authorized_view_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -15543,17 +16234,17 @@ def test_get_snapshot_rest_flattened_error(transport: str = "rest"):
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.get_snapshot(
- bigtable_table_admin.GetSnapshotRequest(),
+ client.get_authorized_view(
+ bigtable_table_admin.GetAuthorizedViewRequest(),
name="name_value",
)
-def test_list_snapshots_rest_use_cached_wrapped_rpc():
+def test_update_authorized_view_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -15563,35 +16254,43 @@ def test_list_snapshots_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert client._transport.list_snapshots in client._transport._wrapped_methods
+ assert (
+ client._transport.update_authorized_view
+ in client._transport._wrapped_methods
+ )
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[client._transport.list_snapshots] = mock_rpc
+ client._transport._wrapped_methods[
+ client._transport.update_authorized_view
+ ] = mock_rpc
request = {}
- client.list_snapshots(request)
+ client.update_authorized_view(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- client.list_snapshots(request)
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.update_authorized_view(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_list_snapshots_rest_required_fields(
- request_type=bigtable_table_admin.ListSnapshotsRequest,
+def test_update_authorized_view_rest_required_fields(
+ request_type=bigtable_table_admin.UpdateAuthorizedViewRequest,
):
transport_class = transports.BigtableTableAdminRestTransport
request_init = {}
- request_init["parent"] = ""
request = request_type(**request_init)
pb_request = request_type.pb(request)
jsonified_request = json.loads(
@@ -15602,37 +16301,33 @@ def test_list_snapshots_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).list_snapshots._get_unset_required_fields(jsonified_request)
+ ).update_authorized_view._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
- jsonified_request["parent"] = "parent_value"
-
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).list_snapshots._get_unset_required_fields(jsonified_request)
+ ).update_authorized_view._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(
(
- "page_size",
- "page_token",
+ "ignore_warnings",
+ "update_mask",
)
)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
- assert "parent" in jsonified_request
- assert jsonified_request["parent"] == "parent_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
request = request_type(**request_init)
# Designate an appropriate value for the returned response.
- return_value = bigtable_table_admin.ListSnapshotsResponse()
+ return_value = operations_pb2.Operation(name="operations/spam")
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
@@ -15644,48 +16339,46 @@ def test_list_snapshots_rest_required_fields(
pb_request = request_type.pb(request)
transcode_result = {
"uri": "v1/sample_method",
- "method": "get",
+ "method": "patch",
"query_params": pb_request,
}
+ transcode_result["body"] = pb_request
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
-
- # Convert return value to protobuf type
- return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.list_snapshots(request)
+ response = client.update_authorized_view(request)
expected_params = [("$alt", "json;enum-encoding=int")]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_list_snapshots_rest_unset_required_fields():
+def test_update_authorized_view_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.list_snapshots._get_unset_required_fields({})
+ unset_fields = transport.update_authorized_view._get_unset_required_fields({})
assert set(unset_fields) == (
set(
(
- "pageSize",
- "pageToken",
+ "ignoreWarnings",
+ "updateMask",
)
)
- & set(("parent",))
+ & set(("authorizedView",))
)
-def test_list_snapshots_rest_flattened():
- client = BigtableTableAdminClient(
+def test_update_authorized_view_rest_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -15693,44 +16386,45 @@ def test_list_snapshots_rest_flattened():
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = bigtable_table_admin.ListSnapshotsResponse()
+ return_value = operations_pb2.Operation(name="operations/spam")
# get arguments that satisfy an http rule for this method
sample_request = {
- "parent": "projects/sample1/instances/sample2/clusters/sample3"
+ "authorized_view": {
+ "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ }
}
# get truthy value for each flattened field
mock_args = dict(
- parent="parent_value",
+ authorized_view=table.AuthorizedView(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- # Convert return value to protobuf type
- return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.list_snapshots(**mock_args)
+ client.update_authorized_view(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
- "%s/v2/{parent=projects/*/instances/*/clusters/*}/snapshots"
+ "%s/v2/{authorized_view.name=projects/*/instances/*/tables/*/authorizedViews/*}"
% client.transport._host,
args[1],
)
-def test_list_snapshots_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_update_authorized_view_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -15738,82 +16432,18 @@ def test_list_snapshots_rest_flattened_error(transport: str = "rest"):
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.list_snapshots(
- bigtable_table_admin.ListSnapshotsRequest(),
- parent="parent_value",
- )
-
-
-def test_list_snapshots_rest_pager(transport: str = "rest"):
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport=transport,
- )
-
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(Session, "request") as req:
- # TODO(kbandes): remove this mock unless there's a good reason for it.
- # with mock.patch.object(path_template, 'transcode') as transcode:
- # Set the response as a series of pages
- response = (
- bigtable_table_admin.ListSnapshotsResponse(
- snapshots=[
- table.Snapshot(),
- table.Snapshot(),
- table.Snapshot(),
- ],
- next_page_token="abc",
- ),
- bigtable_table_admin.ListSnapshotsResponse(
- snapshots=[],
- next_page_token="def",
- ),
- bigtable_table_admin.ListSnapshotsResponse(
- snapshots=[
- table.Snapshot(),
- ],
- next_page_token="ghi",
- ),
- bigtable_table_admin.ListSnapshotsResponse(
- snapshots=[
- table.Snapshot(),
- table.Snapshot(),
- ],
- ),
- )
- # Two responses for two calls
- response = response + response
-
- # Wrap the values into proper Response objs
- response = tuple(
- bigtable_table_admin.ListSnapshotsResponse.to_json(x) for x in response
+ client.update_authorized_view(
+ bigtable_table_admin.UpdateAuthorizedViewRequest(),
+ authorized_view=table.AuthorizedView(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
- return_values = tuple(Response() for i in response)
- for return_val, response_val in zip(return_values, response):
- return_val._content = response_val.encode("UTF-8")
- return_val.status_code = 200
- req.side_effect = return_values
-
- sample_request = {
- "parent": "projects/sample1/instances/sample2/clusters/sample3"
- }
-
- pager = client.list_snapshots(request=sample_request)
-
- results = list(pager)
- assert len(results) == 6
- assert all(isinstance(i, table.Snapshot) for i in results)
-
- pages = list(client.list_snapshots(request=sample_request).pages)
- for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
- assert page_.raw_page.next_page_token == token
-def test_delete_snapshot_rest_use_cached_wrapped_rpc():
+def test_delete_authorized_view_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -15823,30 +16453,35 @@ def test_delete_snapshot_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert client._transport.delete_snapshot in client._transport._wrapped_methods
+ assert (
+ client._transport.delete_authorized_view
+ in client._transport._wrapped_methods
+ )
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[client._transport.delete_snapshot] = mock_rpc
+ client._transport._wrapped_methods[
+ client._transport.delete_authorized_view
+ ] = mock_rpc
request = {}
- client.delete_snapshot(request)
+ client.delete_authorized_view(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- client.delete_snapshot(request)
+ client.delete_authorized_view(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_delete_snapshot_rest_required_fields(
- request_type=bigtable_table_admin.DeleteSnapshotRequest,
+def test_delete_authorized_view_rest_required_fields(
+ request_type=bigtable_table_admin.DeleteAuthorizedViewRequest,
):
transport_class = transports.BigtableTableAdminRestTransport
@@ -15862,7 +16497,7 @@ def test_delete_snapshot_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).delete_snapshot._get_unset_required_fields(jsonified_request)
+ ).delete_authorized_view._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
@@ -15871,14 +16506,16 @@ def test_delete_snapshot_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).delete_snapshot._get_unset_required_fields(jsonified_request)
+ ).delete_authorized_view._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(("etag",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "name" in jsonified_request
assert jsonified_request["name"] == "name_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -15910,24 +16547,24 @@ def test_delete_snapshot_rest_required_fields(
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.delete_snapshot(request)
+ response = client.delete_authorized_view(request)
expected_params = [("$alt", "json;enum-encoding=int")]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_delete_snapshot_rest_unset_required_fields():
+def test_delete_authorized_view_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.delete_snapshot._get_unset_required_fields({})
- assert set(unset_fields) == (set(()) & set(("name",)))
+ unset_fields = transport.delete_authorized_view._get_unset_required_fields({})
+ assert set(unset_fields) == (set(("etag",)) & set(("name",)))
-def test_delete_snapshot_rest_flattened():
- client = BigtableTableAdminClient(
+def test_delete_authorized_view_rest_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -15939,7 +16576,7 @@ def test_delete_snapshot_rest_flattened():
# get arguments that satisfy an http rule for this method
sample_request = {
- "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4"
+ "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
}
# get truthy value for each flattened field
@@ -15956,21 +16593,21 @@ def test_delete_snapshot_rest_flattened():
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.delete_snapshot(**mock_args)
+ client.delete_authorized_view(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
- "%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}"
+ "%s/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}"
% client.transport._host,
args[1],
)
-def test_delete_snapshot_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_delete_authorized_view_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -15978,17 +16615,17 @@ def test_delete_snapshot_rest_flattened_error(transport: str = "rest"):
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.delete_snapshot(
- bigtable_table_admin.DeleteSnapshotRequest(),
+ client.delete_authorized_view(
+ bigtable_table_admin.DeleteAuthorizedViewRequest(),
name="name_value",
)
-def test_create_backup_rest_use_cached_wrapped_rpc():
+def test_modify_column_families_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -15998,40 +16635,40 @@ def test_create_backup_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert client._transport.create_backup in client._transport._wrapped_methods
+ assert (
+ client._transport.modify_column_families
+ in client._transport._wrapped_methods
+ )
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[client._transport.create_backup] = mock_rpc
+ client._transport._wrapped_methods[
+ client._transport.modify_column_families
+ ] = mock_rpc
request = {}
- client.create_backup(request)
+ client.modify_column_families(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- # Operation methods build a cached wrapper on first rpc call
- # subsequent calls should use the cached wrapper
- wrapper_fn.reset_mock()
-
- client.create_backup(request)
+ client.modify_column_families(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_create_backup_rest_required_fields(
- request_type=bigtable_table_admin.CreateBackupRequest,
+def test_modify_column_families_rest_required_fields(
+ request_type=bigtable_table_admin.ModifyColumnFamiliesRequest,
):
transport_class = transports.BigtableTableAdminRestTransport
request_init = {}
- request_init["parent"] = ""
- request_init["backup_id"] = ""
+ request_init["name"] = ""
request = request_type(**request_init)
pb_request = request_type.pb(request)
jsonified_request = json.loads(
@@ -16039,41 +16676,33 @@ def test_create_backup_rest_required_fields(
)
# verify fields with default values are dropped
- assert "backupId" not in jsonified_request
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).create_backup._get_unset_required_fields(jsonified_request)
+ ).modify_column_families._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
- assert "backupId" in jsonified_request
- assert jsonified_request["backupId"] == request_init["backup_id"]
- jsonified_request["parent"] = "parent_value"
- jsonified_request["backupId"] = "backup_id_value"
+ jsonified_request["name"] = "name_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).create_backup._get_unset_required_fields(jsonified_request)
- # Check that path parameters and body parameters are not mixing in.
- assert not set(unset_fields) - set(("backup_id",))
+ ).modify_column_families._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
- assert "parent" in jsonified_request
- assert jsonified_request["parent"] == "parent_value"
- assert "backupId" in jsonified_request
- assert jsonified_request["backupId"] == "backup_id_value"
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
request = request_type(**request_init)
# Designate an appropriate value for the returned response.
- return_value = operations_pb2.Operation(name="operations/spam")
+ return_value = table.Table()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
@@ -16093,45 +16722,41 @@ def test_create_backup_rest_required_fields(
response_value = Response()
response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = table.Table.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.create_backup(request)
+ response = client.modify_column_families(request)
- expected_params = [
- (
- "backupId",
- "",
- ),
- ("$alt", "json;enum-encoding=int"),
- ]
+ expected_params = [("$alt", "json;enum-encoding=int")]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_create_backup_rest_unset_required_fields():
+def test_modify_column_families_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.create_backup._get_unset_required_fields({})
+ unset_fields = transport.modify_column_families._get_unset_required_fields({})
assert set(unset_fields) == (
- set(("backupId",))
+ set(())
& set(
(
- "parent",
- "backupId",
- "backup",
+ "name",
+ "modifications",
)
)
)
-def test_create_backup_rest_flattened():
- client = BigtableTableAdminClient(
+def test_modify_column_families_rest_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -16139,44 +16764,47 @@ def test_create_backup_rest_flattened():
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = operations_pb2.Operation(name="operations/spam")
+ return_value = table.Table()
# get arguments that satisfy an http rule for this method
- sample_request = {
- "parent": "projects/sample1/instances/sample2/clusters/sample3"
- }
+ sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"}
# get truthy value for each flattened field
mock_args = dict(
- parent="parent_value",
- backup_id="backup_id_value",
- backup=table.Backup(name="name_value"),
+ name="name_value",
+ modifications=[
+ bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(
+ id="id_value"
+ )
+ ],
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = table.Table.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.create_backup(**mock_args)
+ client.modify_column_families(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
- "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups"
+ "%s/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies"
% client.transport._host,
args[1],
)
-def test_create_backup_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_modify_column_families_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -16184,19 +16812,22 @@ def test_create_backup_rest_flattened_error(transport: str = "rest"):
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.create_backup(
- bigtable_table_admin.CreateBackupRequest(),
- parent="parent_value",
- backup_id="backup_id_value",
- backup=table.Backup(name="name_value"),
+ client.modify_column_families(
+ bigtable_table_admin.ModifyColumnFamiliesRequest(),
+ name="name_value",
+ modifications=[
+ bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(
+ id="id_value"
+ )
+ ],
)
-def test_get_backup_rest_use_cached_wrapped_rpc():
+def test_drop_row_range_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -16206,30 +16837,30 @@ def test_get_backup_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert client._transport.get_backup in client._transport._wrapped_methods
+ assert client._transport.drop_row_range in client._transport._wrapped_methods
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[client._transport.get_backup] = mock_rpc
+ client._transport._wrapped_methods[client._transport.drop_row_range] = mock_rpc
request = {}
- client.get_backup(request)
+ client.drop_row_range(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- client.get_backup(request)
+ client.drop_row_range(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_get_backup_rest_required_fields(
- request_type=bigtable_table_admin.GetBackupRequest,
+def test_drop_row_range_rest_required_fields(
+ request_type=bigtable_table_admin.DropRowRangeRequest,
):
transport_class = transports.BigtableTableAdminRestTransport
@@ -16245,7 +16876,7 @@ def test_get_backup_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).get_backup._get_unset_required_fields(jsonified_request)
+ ).drop_row_range._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
@@ -16254,21 +16885,21 @@ def test_get_backup_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).get_backup._get_unset_required_fields(jsonified_request)
+ ).drop_row_range._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "name" in jsonified_request
assert jsonified_request["name"] == "name_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
request = request_type(**request_init)
# Designate an appropriate value for the returned response.
- return_value = table.Backup()
+ return_value = None
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
@@ -16280,103 +16911,41 @@ def test_get_backup_rest_required_fields(
pb_request = request_type.pb(request)
transcode_result = {
"uri": "v1/sample_method",
- "method": "get",
+ "method": "post",
"query_params": pb_request,
}
+ transcode_result["body"] = pb_request
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
-
- # Convert return value to protobuf type
- return_value = table.Backup.pb(return_value)
- json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = ""
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.get_backup(request)
+ response = client.drop_row_range(request)
expected_params = [("$alt", "json;enum-encoding=int")]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_get_backup_rest_unset_required_fields():
+def test_drop_row_range_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.get_backup._get_unset_required_fields({})
+ unset_fields = transport.drop_row_range._get_unset_required_fields({})
assert set(unset_fields) == (set(()) & set(("name",)))
-def test_get_backup_rest_flattened():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="rest",
- )
-
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(type(client.transport._session), "request") as req:
- # Designate an appropriate value for the returned response.
- return_value = table.Backup()
-
- # get arguments that satisfy an http rule for this method
- sample_request = {
- "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4"
- }
-
- # get truthy value for each flattened field
- mock_args = dict(
- name="name_value",
- )
- mock_args.update(sample_request)
-
- # Wrap the value into a proper Response obj
- response_value = Response()
- response_value.status_code = 200
- # Convert return value to protobuf type
- return_value = table.Backup.pb(return_value)
- json_return_value = json_format.MessageToJson(return_value)
- response_value._content = json_return_value.encode("UTF-8")
- req.return_value = response_value
- req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
-
- client.get_backup(**mock_args)
-
- # Establish that the underlying call was made with the expected
- # request object values.
- assert len(req.mock_calls) == 1
- _, args, _ = req.mock_calls[0]
- assert path_template.validate(
- "%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}"
- % client.transport._host,
- args[1],
- )
-
-
-def test_get_backup_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport=transport,
- )
-
- # Attempting to call a method with both a request object and flattened
- # fields is an error.
- with pytest.raises(ValueError):
- client.get_backup(
- bigtable_table_admin.GetBackupRequest(),
- name="name_value",
- )
-
-
-def test_update_backup_rest_use_cached_wrapped_rpc():
+def test_generate_consistency_token_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -16386,34 +16955,40 @@ def test_update_backup_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert client._transport.update_backup in client._transport._wrapped_methods
+ assert (
+ client._transport.generate_consistency_token
+ in client._transport._wrapped_methods
+ )
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[client._transport.update_backup] = mock_rpc
+ client._transport._wrapped_methods[
+ client._transport.generate_consistency_token
+ ] = mock_rpc
request = {}
- client.update_backup(request)
+ client.generate_consistency_token(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- client.update_backup(request)
+ client.generate_consistency_token(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_update_backup_rest_required_fields(
- request_type=bigtable_table_admin.UpdateBackupRequest,
+def test_generate_consistency_token_rest_required_fields(
+ request_type=bigtable_table_admin.GenerateConsistencyTokenRequest,
):
transport_class = transports.BigtableTableAdminRestTransport
request_init = {}
+ request_init["name"] = ""
request = request_type(**request_init)
pb_request = request_type.pb(request)
jsonified_request = json.loads(
@@ -16424,28 +16999,30 @@ def test_update_backup_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).update_backup._get_unset_required_fields(jsonified_request)
+ ).generate_consistency_token._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
+ jsonified_request["name"] = "name_value"
+
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).update_backup._get_unset_required_fields(jsonified_request)
- # Check that path parameters and body parameters are not mixing in.
- assert not set(unset_fields) - set(("update_mask",))
+ ).generate_consistency_token._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
request = request_type(**request_init)
# Designate an appropriate value for the returned response.
- return_value = table.Backup()
+ return_value = bigtable_table_admin.GenerateConsistencyTokenResponse()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
@@ -16457,7 +17034,7 @@ def test_update_backup_rest_required_fields(
pb_request = request_type.pb(request)
transcode_result = {
"uri": "v1/sample_method",
- "method": "patch",
+ "method": "post",
"query_params": pb_request,
}
transcode_result["body"] = pb_request
@@ -16467,39 +17044,33 @@ def test_update_backup_rest_required_fields(
response_value.status_code = 200
# Convert return value to protobuf type
- return_value = table.Backup.pb(return_value)
+ return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(
+ return_value
+ )
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.update_backup(request)
+ response = client.generate_consistency_token(request)
expected_params = [("$alt", "json;enum-encoding=int")]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_update_backup_rest_unset_required_fields():
+def test_generate_consistency_token_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.update_backup._get_unset_required_fields({})
- assert set(unset_fields) == (
- set(("updateMask",))
- & set(
- (
- "backup",
- "updateMask",
- )
- )
- )
+ unset_fields = transport.generate_consistency_token._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
-def test_update_backup_rest_flattened():
- client = BigtableTableAdminClient(
+def test_generate_consistency_token_rest_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -16507,19 +17078,14 @@ def test_update_backup_rest_flattened():
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = table.Backup()
+ return_value = bigtable_table_admin.GenerateConsistencyTokenResponse()
# get arguments that satisfy an http rule for this method
- sample_request = {
- "backup": {
- "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4"
- }
- }
+ sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"}
# get truthy value for each flattened field
mock_args = dict(
- backup=table.Backup(name="name_value"),
- update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ name="name_value",
)
mock_args.update(sample_request)
@@ -16527,27 +17093,29 @@ def test_update_backup_rest_flattened():
response_value = Response()
response_value.status_code = 200
# Convert return value to protobuf type
- return_value = table.Backup.pb(return_value)
+ return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(
+ return_value
+ )
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.update_backup(**mock_args)
+ client.generate_consistency_token(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
- "%s/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}"
+ "%s/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken"
% client.transport._host,
args[1],
)
-def test_update_backup_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_generate_consistency_token_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -16555,18 +17123,17 @@ def test_update_backup_rest_flattened_error(transport: str = "rest"):
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.update_backup(
- bigtable_table_admin.UpdateBackupRequest(),
- backup=table.Backup(name="name_value"),
- update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ client.generate_consistency_token(
+ bigtable_table_admin.GenerateConsistencyTokenRequest(),
+ name="name_value",
)
-def test_delete_backup_rest_use_cached_wrapped_rpc():
+def test_check_consistency_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -16576,35 +17143,38 @@ def test_delete_backup_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert client._transport.delete_backup in client._transport._wrapped_methods
+ assert client._transport.check_consistency in client._transport._wrapped_methods
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[client._transport.delete_backup] = mock_rpc
+ client._transport._wrapped_methods[
+ client._transport.check_consistency
+ ] = mock_rpc
request = {}
- client.delete_backup(request)
+ client.check_consistency(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- client.delete_backup(request)
+ client.check_consistency(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_delete_backup_rest_required_fields(
- request_type=bigtable_table_admin.DeleteBackupRequest,
+def test_check_consistency_rest_required_fields(
+ request_type=bigtable_table_admin.CheckConsistencyRequest,
):
transport_class = transports.BigtableTableAdminRestTransport
request_init = {}
request_init["name"] = ""
+ request_init["consistency_token"] = ""
request = request_type(**request_init)
pb_request = request_type.pb(request)
jsonified_request = json.loads(
@@ -16615,30 +17185,33 @@ def test_delete_backup_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).delete_backup._get_unset_required_fields(jsonified_request)
+ ).check_consistency._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["name"] = "name_value"
+ jsonified_request["consistencyToken"] = "consistency_token_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).delete_backup._get_unset_required_fields(jsonified_request)
+ ).check_consistency._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "name" in jsonified_request
assert jsonified_request["name"] == "name_value"
+ assert "consistencyToken" in jsonified_request
+ assert jsonified_request["consistencyToken"] == "consistency_token_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
request = request_type(**request_init)
# Designate an appropriate value for the returned response.
- return_value = None
+ return_value = bigtable_table_admin.CheckConsistencyResponse()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
@@ -16650,37 +17223,51 @@ def test_delete_backup_rest_required_fields(
pb_request = request_type.pb(request)
transcode_result = {
"uri": "v1/sample_method",
- "method": "delete",
+ "method": "post",
"query_params": pb_request,
}
+ transcode_result["body"] = pb_request
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
- json_return_value = ""
+
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.CheckConsistencyResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.delete_backup(request)
+ response = client.check_consistency(request)
expected_params = [("$alt", "json;enum-encoding=int")]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_delete_backup_rest_unset_required_fields():
+def test_check_consistency_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.delete_backup._get_unset_required_fields({})
- assert set(unset_fields) == (set(()) & set(("name",)))
+ unset_fields = transport.check_consistency._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "name",
+ "consistencyToken",
+ )
+ )
+ )
-def test_delete_backup_rest_flattened():
- client = BigtableTableAdminClient(
+def test_check_consistency_rest_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -16688,42 +17275,43 @@ def test_delete_backup_rest_flattened():
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = None
+ return_value = bigtable_table_admin.CheckConsistencyResponse()
# get arguments that satisfy an http rule for this method
- sample_request = {
- "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4"
- }
+ sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"}
# get truthy value for each flattened field
mock_args = dict(
name="name_value",
+ consistency_token="consistency_token_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- json_return_value = ""
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.delete_backup(**mock_args)
+ client.check_consistency(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
- "%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}"
+ "%s/v2/{name=projects/*/instances/*/tables/*}:checkConsistency"
% client.transport._host,
args[1],
)
-def test_delete_backup_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_check_consistency_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -16731,17 +17319,18 @@ def test_delete_backup_rest_flattened_error(transport: str = "rest"):
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.delete_backup(
- bigtable_table_admin.DeleteBackupRequest(),
+ client.check_consistency(
+ bigtable_table_admin.CheckConsistencyRequest(),
name="name_value",
+ consistency_token="consistency_token_value",
)
-def test_list_backups_rest_use_cached_wrapped_rpc():
+def test_snapshot_table_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -16751,36 +17340,42 @@ def test_list_backups_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert client._transport.list_backups in client._transport._wrapped_methods
+ assert client._transport.snapshot_table in client._transport._wrapped_methods
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[client._transport.list_backups] = mock_rpc
+ client._transport._wrapped_methods[client._transport.snapshot_table] = mock_rpc
request = {}
- client.list_backups(request)
+ client.snapshot_table(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- client.list_backups(request)
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.snapshot_table(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_list_backups_rest_required_fields(
- request_type=bigtable_table_admin.ListBackupsRequest,
+def test_snapshot_table_rest_required_fields(
+ request_type=bigtable_table_admin.SnapshotTableRequest,
):
transport_class = transports.BigtableTableAdminRestTransport
request_init = {}
- request_init["parent"] = ""
- request = request_type(**request_init)
+ request_init["name"] = ""
+ request_init["cluster"] = ""
+ request_init["snapshot_id"] = ""
+ request = request_type(**request_init)
pb_request = request_type.pb(request)
jsonified_request = json.loads(
json_format.MessageToJson(pb_request, use_integers_for_enums=False)
@@ -16790,39 +17385,36 @@ def test_list_backups_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).list_backups._get_unset_required_fields(jsonified_request)
+ ).snapshot_table._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
- jsonified_request["parent"] = "parent_value"
+ jsonified_request["name"] = "name_value"
+ jsonified_request["cluster"] = "cluster_value"
+ jsonified_request["snapshotId"] = "snapshot_id_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).list_backups._get_unset_required_fields(jsonified_request)
- # Check that path parameters and body parameters are not mixing in.
- assert not set(unset_fields) - set(
- (
- "filter",
- "order_by",
- "page_size",
- "page_token",
- )
- )
+ ).snapshot_table._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
- assert "parent" in jsonified_request
- assert jsonified_request["parent"] == "parent_value"
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+ assert "cluster" in jsonified_request
+ assert jsonified_request["cluster"] == "cluster_value"
+ assert "snapshotId" in jsonified_request
+ assert jsonified_request["snapshotId"] == "snapshot_id_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
request = request_type(**request_init)
# Designate an appropriate value for the returned response.
- return_value = bigtable_table_admin.ListBackupsResponse()
+ return_value = operations_pb2.Operation(name="operations/spam")
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
@@ -16834,50 +17426,47 @@ def test_list_backups_rest_required_fields(
pb_request = request_type.pb(request)
transcode_result = {
"uri": "v1/sample_method",
- "method": "get",
+ "method": "post",
"query_params": pb_request,
}
+ transcode_result["body"] = pb_request
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
-
- # Convert return value to protobuf type
- return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.list_backups(request)
+ response = client.snapshot_table(request)
expected_params = [("$alt", "json;enum-encoding=int")]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_list_backups_rest_unset_required_fields():
+def test_snapshot_table_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.list_backups._get_unset_required_fields({})
+ unset_fields = transport.snapshot_table._get_unset_required_fields({})
assert set(unset_fields) == (
- set(
+ set(())
+ & set(
(
- "filter",
- "orderBy",
- "pageSize",
- "pageToken",
+ "name",
+ "cluster",
+ "snapshotId",
)
)
- & set(("parent",))
)
-def test_list_backups_rest_flattened():
- client = BigtableTableAdminClient(
+def test_snapshot_table_rest_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -16885,44 +17474,43 @@ def test_list_backups_rest_flattened():
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = bigtable_table_admin.ListBackupsResponse()
+ return_value = operations_pb2.Operation(name="operations/spam")
# get arguments that satisfy an http rule for this method
- sample_request = {
- "parent": "projects/sample1/instances/sample2/clusters/sample3"
- }
+ sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"}
# get truthy value for each flattened field
mock_args = dict(
- parent="parent_value",
+ name="name_value",
+ cluster="cluster_value",
+ snapshot_id="snapshot_id_value",
+ description="description_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- # Convert return value to protobuf type
- return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.list_backups(**mock_args)
+ client.snapshot_table(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
- "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups"
+ "%s/v2/{name=projects/*/instances/*/tables/*}:snapshot"
% client.transport._host,
args[1],
)
-def test_list_backups_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_snapshot_table_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -16930,82 +17518,20 @@ def test_list_backups_rest_flattened_error(transport: str = "rest"):
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.list_backups(
- bigtable_table_admin.ListBackupsRequest(),
- parent="parent_value",
- )
-
-
-def test_list_backups_rest_pager(transport: str = "rest"):
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport=transport,
- )
-
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(Session, "request") as req:
- # TODO(kbandes): remove this mock unless there's a good reason for it.
- # with mock.patch.object(path_template, 'transcode') as transcode:
- # Set the response as a series of pages
- response = (
- bigtable_table_admin.ListBackupsResponse(
- backups=[
- table.Backup(),
- table.Backup(),
- table.Backup(),
- ],
- next_page_token="abc",
- ),
- bigtable_table_admin.ListBackupsResponse(
- backups=[],
- next_page_token="def",
- ),
- bigtable_table_admin.ListBackupsResponse(
- backups=[
- table.Backup(),
- ],
- next_page_token="ghi",
- ),
- bigtable_table_admin.ListBackupsResponse(
- backups=[
- table.Backup(),
- table.Backup(),
- ],
- ),
- )
- # Two responses for two calls
- response = response + response
-
- # Wrap the values into proper Response objs
- response = tuple(
- bigtable_table_admin.ListBackupsResponse.to_json(x) for x in response
+ client.snapshot_table(
+ bigtable_table_admin.SnapshotTableRequest(),
+ name="name_value",
+ cluster="cluster_value",
+ snapshot_id="snapshot_id_value",
+ description="description_value",
)
- return_values = tuple(Response() for i in response)
- for return_val, response_val in zip(return_values, response):
- return_val._content = response_val.encode("UTF-8")
- return_val.status_code = 200
- req.side_effect = return_values
-
- sample_request = {
- "parent": "projects/sample1/instances/sample2/clusters/sample3"
- }
-
- pager = client.list_backups(request=sample_request)
-
- results = list(pager)
- assert len(results) == 6
- assert all(isinstance(i, table.Backup) for i in results)
-
- pages = list(client.list_backups(request=sample_request).pages)
- for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
- assert page_.raw_page.next_page_token == token
-def test_restore_table_rest_use_cached_wrapped_rpc():
+def test_get_snapshot_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -17015,40 +17541,35 @@ def test_restore_table_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert client._transport.restore_table in client._transport._wrapped_methods
+ assert client._transport.get_snapshot in client._transport._wrapped_methods
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[client._transport.restore_table] = mock_rpc
+ client._transport._wrapped_methods[client._transport.get_snapshot] = mock_rpc
request = {}
- client.restore_table(request)
+ client.get_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- # Operation methods build a cached wrapper on first rpc call
- # subsequent calls should use the cached wrapper
- wrapper_fn.reset_mock()
-
- client.restore_table(request)
+ client.get_snapshot(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_restore_table_rest_required_fields(
- request_type=bigtable_table_admin.RestoreTableRequest,
+def test_get_snapshot_rest_required_fields(
+ request_type=bigtable_table_admin.GetSnapshotRequest,
):
transport_class = transports.BigtableTableAdminRestTransport
request_init = {}
- request_init["parent"] = ""
- request_init["table_id"] = ""
+ request_init["name"] = ""
request = request_type(**request_init)
pb_request = request_type.pb(request)
jsonified_request = json.loads(
@@ -17059,33 +17580,30 @@ def test_restore_table_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).restore_table._get_unset_required_fields(jsonified_request)
+ ).get_snapshot._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
- jsonified_request["parent"] = "parent_value"
- jsonified_request["tableId"] = "table_id_value"
+ jsonified_request["name"] = "name_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).restore_table._get_unset_required_fields(jsonified_request)
+ ).get_snapshot._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
- assert "parent" in jsonified_request
- assert jsonified_request["parent"] == "parent_value"
- assert "tableId" in jsonified_request
- assert jsonified_request["tableId"] == "table_id_value"
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
request = request_type(**request_init)
# Designate an appropriate value for the returned response.
- return_value = operations_pb2.Operation(name="operations/spam")
+ return_value = table.Snapshot()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
@@ -17097,49 +17615,103 @@ def test_restore_table_rest_required_fields(
pb_request = request_type.pb(request)
transcode_result = {
"uri": "v1/sample_method",
- "method": "post",
+ "method": "get",
"query_params": pb_request,
}
- transcode_result["body"] = pb_request
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = table.Snapshot.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.restore_table(request)
+ response = client.get_snapshot(request)
expected_params = [("$alt", "json;enum-encoding=int")]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_restore_table_rest_unset_required_fields():
+def test_get_snapshot_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.restore_table._get_unset_required_fields({})
- assert set(unset_fields) == (
- set(())
- & set(
- (
- "parent",
- "tableId",
- )
+ unset_fields = transport.get_snapshot._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_get_snapshot_rest_flattened():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = table.Snapshot()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = table.Snapshot.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.get_snapshot(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}"
+ % client.transport._host,
+ args[1],
)
+
+
+def test_get_snapshot_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
)
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_snapshot(
+ bigtable_table_admin.GetSnapshotRequest(),
+ name="name_value",
+ )
+
-def test_copy_backup_rest_use_cached_wrapped_rpc():
+def test_list_snapshots_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -17149,41 +17721,35 @@ def test_copy_backup_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert client._transport.copy_backup in client._transport._wrapped_methods
+ assert client._transport.list_snapshots in client._transport._wrapped_methods
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[client._transport.copy_backup] = mock_rpc
+ client._transport._wrapped_methods[client._transport.list_snapshots] = mock_rpc
request = {}
- client.copy_backup(request)
+ client.list_snapshots(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- # Operation methods build a cached wrapper on first rpc call
- # subsequent calls should use the cached wrapper
- wrapper_fn.reset_mock()
-
- client.copy_backup(request)
+ client.list_snapshots(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_copy_backup_rest_required_fields(
- request_type=bigtable_table_admin.CopyBackupRequest,
+def test_list_snapshots_rest_required_fields(
+ request_type=bigtable_table_admin.ListSnapshotsRequest,
):
transport_class = transports.BigtableTableAdminRestTransport
request_init = {}
request_init["parent"] = ""
- request_init["backup_id"] = ""
- request_init["source_backup"] = ""
request = request_type(**request_init)
pb_request = request_type.pb(request)
jsonified_request = json.loads(
@@ -17194,36 +17760,37 @@ def test_copy_backup_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).copy_backup._get_unset_required_fields(jsonified_request)
+ ).list_snapshots._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["parent"] = "parent_value"
- jsonified_request["backupId"] = "backup_id_value"
- jsonified_request["sourceBackup"] = "source_backup_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).copy_backup._get_unset_required_fields(jsonified_request)
+ ).list_snapshots._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(
+ (
+ "page_size",
+ "page_token",
+ )
+ )
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "parent" in jsonified_request
assert jsonified_request["parent"] == "parent_value"
- assert "backupId" in jsonified_request
- assert jsonified_request["backupId"] == "backup_id_value"
- assert "sourceBackup" in jsonified_request
- assert jsonified_request["sourceBackup"] == "source_backup_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
request = request_type(**request_init)
# Designate an appropriate value for the returned response.
- return_value = operations_pb2.Operation(name="operations/spam")
+ return_value = bigtable_table_admin.ListSnapshotsResponse()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
@@ -17235,48 +17802,48 @@ def test_copy_backup_rest_required_fields(
pb_request = request_type.pb(request)
transcode_result = {
"uri": "v1/sample_method",
- "method": "post",
+ "method": "get",
"query_params": pb_request,
}
- transcode_result["body"] = pb_request
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.copy_backup(request)
+ response = client.list_snapshots(request)
expected_params = [("$alt", "json;enum-encoding=int")]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_copy_backup_rest_unset_required_fields():
+def test_list_snapshots_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.copy_backup._get_unset_required_fields({})
+ unset_fields = transport.list_snapshots._get_unset_required_fields({})
assert set(unset_fields) == (
- set(())
- & set(
+ set(
(
- "parent",
- "backupId",
- "sourceBackup",
- "expireTime",
+ "pageSize",
+ "pageToken",
)
)
+ & set(("parent",))
)
-def test_copy_backup_rest_flattened():
- client = BigtableTableAdminClient(
+def test_list_snapshots_rest_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -17284,7 +17851,7 @@ def test_copy_backup_rest_flattened():
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = operations_pb2.Operation(name="operations/spam")
+ return_value = bigtable_table_admin.ListSnapshotsResponse()
# get arguments that satisfy an http rule for this method
sample_request = {
@@ -17294,35 +17861,34 @@ def test_copy_backup_rest_flattened():
# get truthy value for each flattened field
mock_args = dict(
parent="parent_value",
- backup_id="backup_id_value",
- source_backup="source_backup_value",
- expire_time=timestamp_pb2.Timestamp(seconds=751),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.copy_backup(**mock_args)
+ client.list_snapshots(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
- "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy"
+ "%s/v2/{parent=projects/*/instances/*/clusters/*}/snapshots"
% client.transport._host,
args[1],
)
-def test_copy_backup_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_list_snapshots_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -17330,20 +17896,82 @@ def test_copy_backup_rest_flattened_error(transport: str = "rest"):
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.copy_backup(
- bigtable_table_admin.CopyBackupRequest(),
+ client.list_snapshots(
+ bigtable_table_admin.ListSnapshotsRequest(),
parent="parent_value",
- backup_id="backup_id_value",
- source_backup="source_backup_value",
- expire_time=timestamp_pb2.Timestamp(seconds=751),
)
-def test_get_iam_policy_rest_use_cached_wrapped_rpc():
+def test_list_snapshots_rest_pager(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # TODO(kbandes): remove this mock unless there's a good reason for it.
+ # with mock.patch.object(path_template, 'transcode') as transcode:
+ # Set the response as a series of pages
+ response = (
+ bigtable_table_admin.ListSnapshotsResponse(
+ snapshots=[
+ table.Snapshot(),
+ table.Snapshot(),
+ table.Snapshot(),
+ ],
+ next_page_token="abc",
+ ),
+ bigtable_table_admin.ListSnapshotsResponse(
+ snapshots=[],
+ next_page_token="def",
+ ),
+ bigtable_table_admin.ListSnapshotsResponse(
+ snapshots=[
+ table.Snapshot(),
+ ],
+ next_page_token="ghi",
+ ),
+ bigtable_table_admin.ListSnapshotsResponse(
+ snapshots=[
+ table.Snapshot(),
+ table.Snapshot(),
+ ],
+ ),
+ )
+ # Two responses for two calls
+ response = response + response
+
+ # Wrap the values into proper Response objs
+ response = tuple(
+ bigtable_table_admin.ListSnapshotsResponse.to_json(x) for x in response
+ )
+ return_values = tuple(Response() for i in response)
+ for return_val, response_val in zip(return_values, response):
+ return_val._content = response_val.encode("UTF-8")
+ return_val.status_code = 200
+ req.side_effect = return_values
+
+ sample_request = {
+ "parent": "projects/sample1/instances/sample2/clusters/sample3"
+ }
+
+ pager = client.list_snapshots(request=sample_request)
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, table.Snapshot) for i in results)
+
+ pages = list(client.list_snapshots(request=sample_request).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_delete_snapshot_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -17353,37 +17981,37 @@ def test_get_iam_policy_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert client._transport.get_iam_policy in client._transport._wrapped_methods
+ assert client._transport.delete_snapshot in client._transport._wrapped_methods
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc
+ client._transport._wrapped_methods[client._transport.delete_snapshot] = mock_rpc
request = {}
- client.get_iam_policy(request)
+ client.delete_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- client.get_iam_policy(request)
+ client.delete_snapshot(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_get_iam_policy_rest_required_fields(
- request_type=iam_policy_pb2.GetIamPolicyRequest,
+def test_delete_snapshot_rest_required_fields(
+ request_type=bigtable_table_admin.DeleteSnapshotRequest,
):
transport_class = transports.BigtableTableAdminRestTransport
request_init = {}
- request_init["resource"] = ""
+ request_init["name"] = ""
request = request_type(**request_init)
- pb_request = request
+ pb_request = request_type.pb(request)
jsonified_request = json.loads(
json_format.MessageToJson(pb_request, use_integers_for_enums=False)
)
@@ -17392,30 +18020,30 @@ def test_get_iam_policy_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).get_iam_policy._get_unset_required_fields(jsonified_request)
+ ).delete_snapshot._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
- jsonified_request["resource"] = "resource_value"
+ jsonified_request["name"] = "name_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).get_iam_policy._get_unset_required_fields(jsonified_request)
+ ).delete_snapshot._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
- assert "resource" in jsonified_request
- assert jsonified_request["resource"] == "resource_value"
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
request = request_type(**request_init)
# Designate an appropriate value for the returned response.
- return_value = policy_pb2.Policy()
+ return_value = None
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
@@ -17424,42 +18052,40 @@ def test_get_iam_policy_rest_required_fields(
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
- pb_request = request
+ pb_request = request_type.pb(request)
transcode_result = {
"uri": "v1/sample_method",
- "method": "post",
+ "method": "delete",
"query_params": pb_request,
}
- transcode_result["body"] = pb_request
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
-
- json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = ""
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.get_iam_policy(request)
+ response = client.delete_snapshot(request)
expected_params = [("$alt", "json;enum-encoding=int")]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_get_iam_policy_rest_unset_required_fields():
+def test_delete_snapshot_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.get_iam_policy._get_unset_required_fields({})
- assert set(unset_fields) == (set(()) & set(("resource",)))
+ unset_fields = transport.delete_snapshot._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
-def test_get_iam_policy_rest_flattened():
- client = BigtableTableAdminClient(
+def test_delete_snapshot_rest_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -17467,42 +18093,42 @@ def test_get_iam_policy_rest_flattened():
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = policy_pb2.Policy()
+ return_value = None
# get arguments that satisfy an http rule for this method
sample_request = {
- "resource": "projects/sample1/instances/sample2/tables/sample3"
+ "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4"
}
# get truthy value for each flattened field
mock_args = dict(
- resource="resource_value",
+ name="name_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = ""
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.get_iam_policy(**mock_args)
+ client.delete_snapshot(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
- "%s/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy"
+ "%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}"
% client.transport._host,
args[1],
)
-def test_get_iam_policy_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_delete_snapshot_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -17510,17 +18136,17 @@ def test_get_iam_policy_rest_flattened_error(transport: str = "rest"):
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.get_iam_policy(
- iam_policy_pb2.GetIamPolicyRequest(),
- resource="resource_value",
+ client.delete_snapshot(
+ bigtable_table_admin.DeleteSnapshotRequest(),
+ name="name_value",
)
-def test_set_iam_policy_rest_use_cached_wrapped_rpc():
+def test_create_backup_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -17530,69 +18156,82 @@ def test_set_iam_policy_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert client._transport.set_iam_policy in client._transport._wrapped_methods
+ assert client._transport.create_backup in client._transport._wrapped_methods
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc
+ client._transport._wrapped_methods[client._transport.create_backup] = mock_rpc
request = {}
- client.set_iam_policy(request)
+ client.create_backup(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- client.set_iam_policy(request)
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.create_backup(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_set_iam_policy_rest_required_fields(
- request_type=iam_policy_pb2.SetIamPolicyRequest,
+def test_create_backup_rest_required_fields(
+ request_type=bigtable_table_admin.CreateBackupRequest,
):
transport_class = transports.BigtableTableAdminRestTransport
request_init = {}
- request_init["resource"] = ""
+ request_init["parent"] = ""
+ request_init["backup_id"] = ""
request = request_type(**request_init)
- pb_request = request
+ pb_request = request_type.pb(request)
jsonified_request = json.loads(
json_format.MessageToJson(pb_request, use_integers_for_enums=False)
)
# verify fields with default values are dropped
+ assert "backupId" not in jsonified_request
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).set_iam_policy._get_unset_required_fields(jsonified_request)
+ ).create_backup._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
+ assert "backupId" in jsonified_request
+ assert jsonified_request["backupId"] == request_init["backup_id"]
- jsonified_request["resource"] = "resource_value"
+ jsonified_request["parent"] = "parent_value"
+ jsonified_request["backupId"] = "backup_id_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).set_iam_policy._get_unset_required_fields(jsonified_request)
+ ).create_backup._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(("backup_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
- assert "resource" in jsonified_request
- assert jsonified_request["resource"] == "resource_value"
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+ assert "backupId" in jsonified_request
+ assert jsonified_request["backupId"] == "backup_id_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
request = request_type(**request_init)
# Designate an appropriate value for the returned response.
- return_value = policy_pb2.Policy()
+ return_value = operations_pb2.Operation(name="operations/spam")
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
@@ -17601,7 +18240,7 @@ def test_set_iam_policy_rest_required_fields(
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
- pb_request = request
+ pb_request = request_type.pb(request)
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
@@ -17612,39 +18251,45 @@ def test_set_iam_policy_rest_required_fields(
response_value = Response()
response_value.status_code = 200
-
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.set_iam_policy(request)
+ response = client.create_backup(request)
- expected_params = [("$alt", "json;enum-encoding=int")]
+ expected_params = [
+ (
+ "backupId",
+ "",
+ ),
+ ("$alt", "json;enum-encoding=int"),
+ ]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_set_iam_policy_rest_unset_required_fields():
+def test_create_backup_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.set_iam_policy._get_unset_required_fields({})
+ unset_fields = transport.create_backup._get_unset_required_fields({})
assert set(unset_fields) == (
- set(())
+ set(("backupId",))
& set(
(
- "resource",
- "policy",
+ "parent",
+ "backupId",
+ "backup",
)
)
)
-def test_set_iam_policy_rest_flattened():
- client = BigtableTableAdminClient(
+def test_create_backup_rest_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -17652,16 +18297,18 @@ def test_set_iam_policy_rest_flattened():
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = policy_pb2.Policy()
+ return_value = operations_pb2.Operation(name="operations/spam")
# get arguments that satisfy an http rule for this method
sample_request = {
- "resource": "projects/sample1/instances/sample2/tables/sample3"
+ "parent": "projects/sample1/instances/sample2/clusters/sample3"
}
# get truthy value for each flattened field
mock_args = dict(
- resource="resource_value",
+ parent="parent_value",
+ backup_id="backup_id_value",
+ backup=table.Backup(name="name_value"),
)
mock_args.update(sample_request)
@@ -17673,21 +18320,21 @@ def test_set_iam_policy_rest_flattened():
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.set_iam_policy(**mock_args)
+ client.create_backup(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
- "%s/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy"
+ "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups"
% client.transport._host,
args[1],
)
-def test_set_iam_policy_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_create_backup_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -17695,17 +18342,19 @@ def test_set_iam_policy_rest_flattened_error(transport: str = "rest"):
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.set_iam_policy(
- iam_policy_pb2.SetIamPolicyRequest(),
- resource="resource_value",
+ client.create_backup(
+ bigtable_table_admin.CreateBackupRequest(),
+ parent="parent_value",
+ backup_id="backup_id_value",
+ backup=table.Backup(name="name_value"),
)
-def test_test_iam_permissions_rest_use_cached_wrapped_rpc():
+def test_get_backup_rest_use_cached_wrapped_rpc():
# Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
# instead of constructing them on each call
with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -17715,42 +18364,37 @@ def test_test_iam_permissions_rest_use_cached_wrapped_rpc():
wrapper_fn.reset_mock()
# Ensure method has been cached
- assert (
- client._transport.test_iam_permissions in client._transport._wrapped_methods
- )
+ assert client._transport.get_backup in client._transport._wrapped_methods
# Replace cached wrapped function with mock
mock_rpc = mock.Mock()
mock_rpc.return_value.name = (
"foo" # operation_request.operation in compute client(s) expect a string.
)
- client._transport._wrapped_methods[
- client._transport.test_iam_permissions
- ] = mock_rpc
+ client._transport._wrapped_methods[client._transport.get_backup] = mock_rpc
request = {}
- client.test_iam_permissions(request)
+ client.get_backup(request)
# Establish that the underlying gRPC stub method was called.
assert mock_rpc.call_count == 1
- client.test_iam_permissions(request)
+ client.get_backup(request)
# Establish that a new wrapper was not created for this call
assert wrapper_fn.call_count == 0
assert mock_rpc.call_count == 2
-def test_test_iam_permissions_rest_required_fields(
- request_type=iam_policy_pb2.TestIamPermissionsRequest,
+def test_get_backup_rest_required_fields(
+ request_type=bigtable_table_admin.GetBackupRequest,
):
transport_class = transports.BigtableTableAdminRestTransport
request_init = {}
- request_init["resource"] = ""
- request_init["permissions"] = ""
+ request_init["name"] = ""
request = request_type(**request_init)
- pb_request = request
+ pb_request = request_type.pb(request)
jsonified_request = json.loads(
json_format.MessageToJson(pb_request, use_integers_for_enums=False)
)
@@ -17759,33 +18403,30 @@ def test_test_iam_permissions_rest_required_fields(
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).test_iam_permissions._get_unset_required_fields(jsonified_request)
+ ).get_backup._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
- jsonified_request["resource"] = "resource_value"
- jsonified_request["permissions"] = "permissions_value"
+ jsonified_request["name"] = "name_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- ).test_iam_permissions._get_unset_required_fields(jsonified_request)
+ ).get_backup._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
- assert "resource" in jsonified_request
- assert jsonified_request["resource"] == "resource_value"
- assert "permissions" in jsonified_request
- assert jsonified_request["permissions"] == "permissions_value"
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
request = request_type(**request_init)
# Designate an appropriate value for the returned response.
- return_value = iam_policy_pb2.TestIamPermissionsResponse()
+ return_value = table.Backup()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
@@ -17794,50 +18435,43 @@ def test_test_iam_permissions_rest_required_fields(
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
- pb_request = request
+ pb_request = request_type.pb(request)
transcode_result = {
"uri": "v1/sample_method",
- "method": "post",
+ "method": "get",
"query_params": pb_request,
}
- transcode_result["body"] = pb_request
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = table.Backup.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.test_iam_permissions(request)
+ response = client.get_backup(request)
expected_params = [("$alt", "json;enum-encoding=int")]
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
-def test_test_iam_permissions_rest_unset_required_fields():
+def test_get_backup_rest_unset_required_fields():
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
- unset_fields = transport.test_iam_permissions._get_unset_required_fields({})
- assert set(unset_fields) == (
- set(())
- & set(
- (
- "resource",
- "permissions",
- )
- )
- )
+ unset_fields = transport.get_backup._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
-def test_test_iam_permissions_rest_flattened():
- client = BigtableTableAdminClient(
+def test_get_backup_rest_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -17845,43 +18479,44 @@ def test_test_iam_permissions_rest_flattened():
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = iam_policy_pb2.TestIamPermissionsResponse()
+ return_value = table.Backup()
# get arguments that satisfy an http rule for this method
sample_request = {
- "resource": "projects/sample1/instances/sample2/tables/sample3"
+ "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4"
}
# get truthy value for each flattened field
mock_args = dict(
- resource="resource_value",
- permissions=["permissions_value"],
+ name="name_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = table.Backup.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.test_iam_permissions(**mock_args)
+ client.get_backup(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
- "%s/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions"
+ "%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}"
% client.transport._host,
args[1],
)
-def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"):
- client = BigtableTableAdminClient(
+def test_get_backup_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
@@ -17889,1613 +18524,5307 @@ def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"):
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.test_iam_permissions(
- iam_policy_pb2.TestIamPermissionsRequest(),
- resource="resource_value",
- permissions=["permissions_value"],
+ client.get_backup(
+ bigtable_table_admin.GetBackupRequest(),
+ name="name_value",
)
-def test_credentials_transport_error():
- # It is an error to provide credentials and a transport instance.
- transport = transports.BigtableTableAdminGrpcTransport(
- credentials=ga_credentials.AnonymousCredentials(),
- )
- with pytest.raises(ValueError):
- client = BigtableTableAdminClient(
+def test_update_backup_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport=transport,
+ transport="rest",
)
- # It is an error to provide a credentials file and a transport instance.
- transport = transports.BigtableTableAdminGrpcTransport(
- credentials=ga_credentials.AnonymousCredentials(),
- )
- with pytest.raises(ValueError):
- client = BigtableTableAdminClient(
- client_options={"credentials_file": "credentials.json"},
- transport=transport,
- )
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
- # It is an error to provide an api_key and a transport instance.
- transport = transports.BigtableTableAdminGrpcTransport(
- credentials=ga_credentials.AnonymousCredentials(),
- )
- options = client_options.ClientOptions()
- options.api_key = "api_key"
- with pytest.raises(ValueError):
- client = BigtableTableAdminClient(
- client_options=options,
- transport=transport,
- )
+ # Ensure method has been cached
+ assert client._transport.update_backup in client._transport._wrapped_methods
- # It is an error to provide an api_key and a credential.
- options = client_options.ClientOptions()
- options.api_key = "api_key"
- with pytest.raises(ValueError):
- client = BigtableTableAdminClient(
- client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
)
+ client._transport._wrapped_methods[client._transport.update_backup] = mock_rpc
- # It is an error to provide scopes and a transport instance.
- transport = transports.BigtableTableAdminGrpcTransport(
- credentials=ga_credentials.AnonymousCredentials(),
- )
- with pytest.raises(ValueError):
- client = BigtableTableAdminClient(
- client_options={"scopes": ["1", "2"]},
- transport=transport,
- )
+ request = {}
+ client.update_backup(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
-def test_transport_instance():
- # A client may be instantiated with a custom transport instance.
- transport = transports.BigtableTableAdminGrpcTransport(
- credentials=ga_credentials.AnonymousCredentials(),
- )
- client = BigtableTableAdminClient(transport=transport)
- assert client.transport is transport
+ client.update_backup(request)
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
-def test_transport_get_channel():
- # A client may be instantiated with a custom transport instance.
- transport = transports.BigtableTableAdminGrpcTransport(
- credentials=ga_credentials.AnonymousCredentials(),
- )
- channel = transport.grpc_channel
- assert channel
- transport = transports.BigtableTableAdminGrpcAsyncIOTransport(
- credentials=ga_credentials.AnonymousCredentials(),
+def test_update_backup_rest_required_fields(
+ request_type=bigtable_table_admin.UpdateBackupRequest,
+):
+ transport_class = transports.BigtableTableAdminRestTransport
+
+ request_init = {}
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
)
- channel = transport.grpc_channel
- assert channel
+ # verify fields with default values are dropped
-@pytest.mark.parametrize(
- "transport_class",
- [
- transports.BigtableTableAdminGrpcTransport,
- transports.BigtableTableAdminGrpcAsyncIOTransport,
- transports.BigtableTableAdminRestTransport,
- ],
-)
-def test_transport_adc(transport_class):
- # Test default credentials are used if not provided.
- with mock.patch.object(google.auth, "default") as adc:
- adc.return_value = (ga_credentials.AnonymousCredentials(), None)
- transport_class()
- adc.assert_called_once()
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).update_backup._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+ # verify required fields with default values are now present
-def test_transport_kind_grpc():
- transport = BigtableTableAdminClient.get_transport_class("grpc")(
+ unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
- )
- assert transport.kind == "grpc"
+ ).update_backup._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(("update_mask",))
+ jsonified_request.update(unset_fields)
+ # verify required fields with non-default values are left alone
-def test_initialize_client_w_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
)
- assert client is not None
+ request = request_type(**request_init)
+ # Designate an appropriate value for the returned response.
+ return_value = table.Backup()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "patch",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_create_table_empty_call_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
- )
-
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.create_table), "__call__") as call:
- call.return_value = gba_table.Table()
- client.create_table(request=None)
+ response_value = Response()
+ response_value.status_code = 200
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.CreateTableRequest()
+ # Convert return value to protobuf type
+ return_value = table.Backup.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
- assert args[0] == request_msg
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.update_backup(request)
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_create_table_from_snapshot_empty_call_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
- )
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
- # Mock the actual call, and fake the request.
- with mock.patch.object(
- type(client.transport.create_table_from_snapshot), "__call__"
- ) as call:
- call.return_value = operations_pb2.Operation(name="operations/op")
- client.create_table_from_snapshot(request=None)
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.CreateTableFromSnapshotRequest()
+def test_update_backup_rest_unset_required_fields():
+ transport = transports.BigtableTableAdminRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
- assert args[0] == request_msg
+ unset_fields = transport.update_backup._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(("updateMask",))
+ & set(
+ (
+ "backup",
+ "updateMask",
+ )
+ )
+ )
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_list_tables_empty_call_grpc():
- client = BigtableTableAdminClient(
+def test_update_backup_rest_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
+ transport="rest",
)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.list_tables), "__call__") as call:
- call.return_value = bigtable_table_admin.ListTablesResponse()
- client.list_tables(request=None)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = table.Backup()
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.ListTablesRequest()
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "backup": {
+ "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4"
+ }
+ }
- assert args[0] == request_msg
+ # get truthy value for each flattened field
+ mock_args = dict(
+ backup=table.Backup(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = table.Backup.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.update_backup(**mock_args)
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_get_table_empty_call_grpc():
- client = BigtableTableAdminClient(
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_update_backup_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
+ transport=transport,
)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.get_table), "__call__") as call:
- call.return_value = table.Table()
- client.get_table(request=None)
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.update_backup(
+ bigtable_table_admin.UpdateBackupRequest(),
+ backup=table.Backup(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.GetTableRequest()
- assert args[0] == request_msg
+def test_delete_backup_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_update_table_empty_call_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
- )
+ # Ensure method has been cached
+ assert client._transport.delete_backup in client._transport._wrapped_methods
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.update_table), "__call__") as call:
- call.return_value = operations_pb2.Operation(name="operations/op")
- client.update_table(request=None)
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.delete_backup] = mock_rpc
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.UpdateTableRequest()
+ request = {}
+ client.delete_backup(request)
- assert args[0] == request_msg
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+ client.delete_backup(request)
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_delete_table_empty_call_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
- )
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.delete_table), "__call__") as call:
- call.return_value = None
- client.delete_table(request=None)
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.DeleteTableRequest()
+def test_delete_backup_rest_required_fields(
+ request_type=bigtable_table_admin.DeleteBackupRequest,
+):
+ transport_class = transports.BigtableTableAdminRestTransport
- assert args[0] == request_msg
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+ # verify fields with default values are dropped
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_undelete_table_empty_call_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
- )
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_backup._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.undelete_table), "__call__") as call:
- call.return_value = operations_pb2.Operation(name="operations/op")
- client.undelete_table(request=None)
+ # verify required fields with default values are now present
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.UndeleteTableRequest()
+ jsonified_request["name"] = "name_value"
- assert args[0] == request_msg
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_backup._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_create_authorized_view_empty_call_grpc():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
+ transport="rest",
)
+ request = request_type(**request_init)
- # Mock the actual call, and fake the request.
- with mock.patch.object(
- type(client.transport.create_authorized_view), "__call__"
- ) as call:
- call.return_value = operations_pb2.Operation(name="operations/op")
- client.create_authorized_view(request=None)
+ # Designate an appropriate value for the returned response.
+ return_value = None
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "delete",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.CreateAuthorizedViewRequest()
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
- assert args[0] == request_msg
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.delete_backup(request)
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_list_authorized_views_empty_call_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
- )
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
- # Mock the actual call, and fake the request.
- with mock.patch.object(
- type(client.transport.list_authorized_views), "__call__"
- ) as call:
- call.return_value = bigtable_table_admin.ListAuthorizedViewsResponse()
- client.list_authorized_views(request=None)
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.ListAuthorizedViewsRequest()
+def test_delete_backup_rest_unset_required_fields():
+ transport = transports.BigtableTableAdminRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
- assert args[0] == request_msg
+ unset_fields = transport.delete_backup._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_get_authorized_view_empty_call_grpc():
- client = BigtableTableAdminClient(
+def test_delete_backup_rest_flattened():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
+ transport="rest",
)
- # Mock the actual call, and fake the request.
- with mock.patch.object(
- type(client.transport.get_authorized_view), "__call__"
- ) as call:
- call.return_value = table.AuthorizedView()
- client.get_authorized_view(request=None)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.GetAuthorizedViewRequest()
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4"
+ }
- assert args[0] == request_msg
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_backup(**mock_args)
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_update_authorized_view_empty_call_grpc():
- client = BigtableTableAdminClient(
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_delete_backup_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
+ transport=transport,
)
- # Mock the actual call, and fake the request.
- with mock.patch.object(
- type(client.transport.update_authorized_view), "__call__"
- ) as call:
- call.return_value = operations_pb2.Operation(name="operations/op")
- client.update_authorized_view(request=None)
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_backup(
+ bigtable_table_admin.DeleteBackupRequest(),
+ name="name_value",
+ )
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.UpdateAuthorizedViewRequest()
- assert args[0] == request_msg
+def test_list_backups_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_delete_authorized_view_empty_call_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
- )
+ # Ensure method has been cached
+ assert client._transport.list_backups in client._transport._wrapped_methods
- # Mock the actual call, and fake the request.
- with mock.patch.object(
- type(client.transport.delete_authorized_view), "__call__"
- ) as call:
- call.return_value = None
- client.delete_authorized_view(request=None)
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.list_backups] = mock_rpc
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.DeleteAuthorizedViewRequest()
+ request = {}
+ client.list_backups(request)
- assert args[0] == request_msg
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+ client.list_backups(request)
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_modify_column_families_empty_call_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
- )
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
- # Mock the actual call, and fake the request.
- with mock.patch.object(
- type(client.transport.modify_column_families), "__call__"
- ) as call:
- call.return_value = table.Table()
- client.modify_column_families(request=None)
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.ModifyColumnFamiliesRequest()
+def test_list_backups_rest_required_fields(
+ request_type=bigtable_table_admin.ListBackupsRequest,
+):
+ transport_class = transports.BigtableTableAdminRestTransport
- assert args[0] == request_msg
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+ # verify fields with default values are dropped
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_drop_row_range_empty_call_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
- )
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_backups._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call:
- call.return_value = None
- client.drop_row_range(request=None)
+ # verify required fields with default values are now present
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.DropRowRangeRequest()
+ jsonified_request["parent"] = "parent_value"
- assert args[0] == request_msg
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_backups._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(
+ (
+ "filter",
+ "order_by",
+ "page_size",
+ "page_token",
+ )
+ )
+ jsonified_request.update(unset_fields)
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_generate_consistency_token_empty_call_grpc():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
+ transport="rest",
)
+ request = request_type(**request_init)
- # Mock the actual call, and fake the request.
- with mock.patch.object(
- type(client.transport.generate_consistency_token), "__call__"
- ) as call:
- call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse()
- client.generate_consistency_token(request=None)
-
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.GenerateConsistencyTokenRequest()
+ # Designate an appropriate value for the returned response.
+ return_value = bigtable_table_admin.ListBackupsResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
- assert args[0] == request_msg
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_check_consistency_empty_call_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
- )
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- # Mock the actual call, and fake the request.
- with mock.patch.object(
- type(client.transport.check_consistency), "__call__"
- ) as call:
- call.return_value = bigtable_table_admin.CheckConsistencyResponse()
- client.check_consistency(request=None)
+ response = client.list_backups(request)
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.CheckConsistencyRequest()
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
- assert args[0] == request_msg
+def test_list_backups_rest_unset_required_fields():
+ transport = transports.BigtableTableAdminRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_snapshot_table_empty_call_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
+ unset_fields = transport.list_backups._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(
+ (
+ "filter",
+ "orderBy",
+ "pageSize",
+ "pageToken",
+ )
+ )
+ & set(("parent",))
)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call:
- call.return_value = operations_pb2.Operation(name="operations/op")
- client.snapshot_table(request=None)
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.SnapshotTableRequest()
+def test_list_backups_rest_flattened():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
- assert args[0] == request_msg
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = bigtable_table_admin.ListBackupsResponse()
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "parent": "projects/sample1/instances/sample2/clusters/sample3"
+ }
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_get_snapshot_empty_call_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
- )
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ )
+ mock_args.update(sample_request)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call:
- call.return_value = table.Snapshot()
- client.get_snapshot(request=None)
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.GetSnapshotRequest()
+ client.list_backups(**mock_args)
- assert args[0] == request_msg
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups"
+ % client.transport._host,
+ args[1],
+ )
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_list_snapshots_empty_call_grpc():
- client = BigtableTableAdminClient(
+def test_list_backups_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
+ transport=transport,
)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call:
- call.return_value = bigtable_table_admin.ListSnapshotsResponse()
- client.list_snapshots(request=None)
-
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.ListSnapshotsRequest()
-
- assert args[0] == request_msg
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_backups(
+ bigtable_table_admin.ListBackupsRequest(),
+ parent="parent_value",
+ )
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_delete_snapshot_empty_call_grpc():
- client = BigtableTableAdminClient(
+def test_list_backups_rest_pager(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
+ transport=transport,
)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call:
- call.return_value = None
- client.delete_snapshot(request=None)
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # TODO(kbandes): remove this mock unless there's a good reason for it.
+ # with mock.patch.object(path_template, 'transcode') as transcode:
+ # Set the response as a series of pages
+ response = (
+ bigtable_table_admin.ListBackupsResponse(
+ backups=[
+ table.Backup(),
+ table.Backup(),
+ table.Backup(),
+ ],
+ next_page_token="abc",
+ ),
+ bigtable_table_admin.ListBackupsResponse(
+ backups=[],
+ next_page_token="def",
+ ),
+ bigtable_table_admin.ListBackupsResponse(
+ backups=[
+ table.Backup(),
+ ],
+ next_page_token="ghi",
+ ),
+ bigtable_table_admin.ListBackupsResponse(
+ backups=[
+ table.Backup(),
+ table.Backup(),
+ ],
+ ),
+ )
+ # Two responses for two calls
+ response = response + response
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.DeleteSnapshotRequest()
+ # Wrap the values into proper Response objs
+ response = tuple(
+ bigtable_table_admin.ListBackupsResponse.to_json(x) for x in response
+ )
+ return_values = tuple(Response() for i in response)
+ for return_val, response_val in zip(return_values, response):
+ return_val._content = response_val.encode("UTF-8")
+ return_val.status_code = 200
+ req.side_effect = return_values
- assert args[0] == request_msg
+ sample_request = {
+ "parent": "projects/sample1/instances/sample2/clusters/sample3"
+ }
+ pager = client.list_backups(request=sample_request)
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_create_backup_empty_call_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
- )
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, table.Backup) for i in results)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.create_backup), "__call__") as call:
- call.return_value = operations_pb2.Operation(name="operations/op")
- client.create_backup(request=None)
+ pages = list(client.list_backups(request=sample_request).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.CreateBackupRequest()
- assert args[0] == request_msg
+def test__restore_table_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_get_backup_empty_call_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
- )
+ # Ensure method has been cached
+ assert client._transport.restore_table in client._transport._wrapped_methods
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.get_backup), "__call__") as call:
- call.return_value = table.Backup()
- client.get_backup(request=None)
-
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.GetBackupRequest()
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.restore_table] = mock_rpc
- assert args[0] == request_msg
+ request = {}
+ client._restore_table(request)
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_update_backup_empty_call_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
- )
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.update_backup), "__call__") as call:
- call.return_value = table.Backup()
- client.update_backup(request=None)
+ client._restore_table(request)
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.UpdateBackupRequest()
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
- assert args[0] == request_msg
+def test__restore_table_rest_required_fields(
+ request_type=bigtable_table_admin.RestoreTableRequest,
+):
+ transport_class = transports.BigtableTableAdminRestTransport
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_delete_backup_empty_call_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
+ request_init = {}
+ request_init["parent"] = ""
+ request_init["table_id"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.delete_backup), "__call__") as call:
- call.return_value = None
- client.delete_backup(request=None)
+ # verify fields with default values are dropped
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.DeleteBackupRequest()
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).restore_table._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
- assert args[0] == request_msg
+ # verify required fields with default values are now present
+ jsonified_request["parent"] = "parent_value"
+ jsonified_request["tableId"] = "table_id_value"
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_list_backups_empty_call_grpc():
- client = BigtableTableAdminClient(
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).restore_table._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+ assert "tableId" in jsonified_request
+ assert jsonified_request["tableId"] == "table_id_value"
+
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
+ transport="rest",
)
+ request = request_type(**request_init)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.list_backups), "__call__") as call:
- call.return_value = bigtable_table_admin.ListBackupsResponse()
- client.list_backups(request=None)
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.ListBackupsRequest()
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
- assert args[0] == request_msg
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client._restore_table(request)
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_restore_table_empty_call_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
- )
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.restore_table), "__call__") as call:
- call.return_value = operations_pb2.Operation(name="operations/op")
- client.restore_table(request=None)
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.RestoreTableRequest()
+def test__restore_table_rest_unset_required_fields():
+ transport = transports.BigtableTableAdminRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
- assert args[0] == request_msg
+ unset_fields = transport.restore_table._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "parent",
+ "tableId",
+ )
+ )
+ )
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_copy_backup_empty_call_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
- )
+def test_copy_backup_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.copy_backup), "__call__") as call:
- call.return_value = operations_pb2.Operation(name="operations/op")
- client.copy_backup(request=None)
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.CopyBackupRequest()
+ # Ensure method has been cached
+ assert client._transport.copy_backup in client._transport._wrapped_methods
- assert args[0] == request_msg
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.copy_backup] = mock_rpc
+ request = {}
+ client.copy_backup(request)
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_get_iam_policy_empty_call_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
- )
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
- call.return_value = policy_pb2.Policy()
- client.get_iam_policy(request=None)
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = iam_policy_pb2.GetIamPolicyRequest()
+ client.copy_backup(request)
- assert args[0] == request_msg
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_set_iam_policy_empty_call_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
+def test_copy_backup_rest_required_fields(
+ request_type=bigtable_table_admin.CopyBackupRequest,
+):
+ transport_class = transports.BigtableTableAdminRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request_init["backup_id"] = ""
+ request_init["source_backup"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
- call.return_value = policy_pb2.Policy()
- client.set_iam_policy(request=None)
+ # verify fields with default values are dropped
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = iam_policy_pb2.SetIamPolicyRequest()
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).copy_backup._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
- assert args[0] == request_msg
+ # verify required fields with default values are now present
+ jsonified_request["parent"] = "parent_value"
+ jsonified_request["backupId"] = "backup_id_value"
+ jsonified_request["sourceBackup"] = "source_backup_value"
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-def test_test_iam_permissions_empty_call_grpc():
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(),
- transport="grpc",
- )
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).copy_backup._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
- # Mock the actual call, and fake the request.
- with mock.patch.object(
- type(client.transport.test_iam_permissions), "__call__"
- ) as call:
- call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
- client.test_iam_permissions(request=None)
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+ assert "backupId" in jsonified_request
+ assert jsonified_request["backupId"] == "backup_id_value"
+ assert "sourceBackup" in jsonified_request
+ assert jsonified_request["sourceBackup"] == "source_backup_value"
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = iam_policy_pb2.TestIamPermissionsRequest()
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
- assert args[0] == request_msg
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
-def test_transport_kind_grpc_asyncio():
- transport = BigtableTableAdminAsyncClient.get_transport_class("grpc_asyncio")(
- credentials=async_anonymous_credentials()
- )
- assert transport.kind == "grpc_asyncio"
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.copy_backup(request)
-def test_initialize_client_w_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(), transport="grpc_asyncio"
- )
- assert client is not None
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_create_table_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
+def test_copy_backup_rest_unset_required_fields():
+ transport = transports.BigtableTableAdminRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.create_table), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- gba_table.Table(
- name="name_value",
- granularity=gba_table.Table.TimestampGranularity.MILLIS,
- deletion_protection=True,
+ unset_fields = transport.copy_backup._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "parent",
+ "backupId",
+ "sourceBackup",
+ "expireTime",
)
)
- await client.create_table(request=None)
+ )
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.CreateTableRequest()
- assert args[0] == request_msg
+def test_copy_backup_rest_flattened():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_create_table_from_snapshot_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
- )
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "parent": "projects/sample1/instances/sample2/clusters/sample3"
+ }
- # Mock the actual call, and fake the request.
- with mock.patch.object(
- type(client.transport.create_table_from_snapshot), "__call__"
- ) as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- operations_pb2.Operation(name="operations/spam")
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ backup_id="backup_id_value",
+ source_backup="source_backup_value",
+ expire_time=timestamp_pb2.Timestamp(seconds=751),
)
- await client.create_table_from_snapshot(request=None)
+ mock_args.update(sample_request)
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.CreateTableFromSnapshotRequest()
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- assert args[0] == request_msg
+ client.copy_backup(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy"
+ % client.transport._host,
+ args[1],
+ )
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_list_tables_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
+def test_copy_backup_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.list_tables), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- bigtable_table_admin.ListTablesResponse(
- next_page_token="next_page_token_value",
- )
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.copy_backup(
+ bigtable_table_admin.CopyBackupRequest(),
+ parent="parent_value",
+ backup_id="backup_id_value",
+ source_backup="source_backup_value",
+ expire_time=timestamp_pb2.Timestamp(seconds=751),
)
- await client.list_tables(request=None)
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.ListTablesRequest()
- assert args[0] == request_msg
+def test_get_iam_policy_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_get_table_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
- )
+ # Ensure method has been cached
+ assert client._transport.get_iam_policy in client._transport._wrapped_methods
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.get_table), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- table.Table(
- name="name_value",
- granularity=table.Table.TimestampGranularity.MILLIS,
- deletion_protection=True,
- )
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
)
- await client.get_table(request=None)
+ client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.GetTableRequest()
+ request = {}
+ client.get_iam_policy(request)
- assert args[0] == request_msg
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+ client.get_iam_policy(request)
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_update_table_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
- )
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.update_table), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- operations_pb2.Operation(name="operations/spam")
- )
- await client.update_table(request=None)
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.UpdateTableRequest()
+def test_get_iam_policy_rest_required_fields(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
+):
+ transport_class = transports.BigtableTableAdminRestTransport
- assert args[0] == request_msg
+ request_init = {}
+ request_init["resource"] = ""
+ request = request_type(**request_init)
+ pb_request = request
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+ # verify fields with default values are dropped
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_delete_table_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
- )
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_iam_policy._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.delete_table), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
- await client.delete_table(request=None)
+ # verify required fields with default values are now present
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.DeleteTableRequest()
+ jsonified_request["resource"] = "resource_value"
- assert args[0] == request_msg
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_iam_policy._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+ # verify required fields with non-default values are left alone
+ assert "resource" in jsonified_request
+ assert jsonified_request["resource"] == "resource_value"
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_undelete_table_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
)
+ request = request_type(**request_init)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.undelete_table), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- operations_pb2.Operation(name="operations/spam")
- )
- await client.undelete_table(request=None)
-
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.UndeleteTableRequest()
-
- assert args[0] == request_msg
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+ response_value = Response()
+ response_value.status_code = 200
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_create_authorized_view_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
- )
+ json_return_value = json_format.MessageToJson(return_value)
- # Mock the actual call, and fake the request.
- with mock.patch.object(
- type(client.transport.create_authorized_view), "__call__"
- ) as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- operations_pb2.Operation(name="operations/spam")
- )
- await client.create_authorized_view(request=None)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.CreateAuthorizedViewRequest()
+ response = client.get_iam_policy(request)
- assert args[0] == request_msg
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_list_authorized_views_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
+def test_get_iam_policy_rest_unset_required_fields():
+ transport = transports.BigtableTableAdminRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
)
- # Mock the actual call, and fake the request.
- with mock.patch.object(
- type(client.transport.list_authorized_views), "__call__"
- ) as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- bigtable_table_admin.ListAuthorizedViewsResponse(
- next_page_token="next_page_token_value",
- )
- )
- await client.list_authorized_views(request=None)
+ unset_fields = transport.get_iam_policy._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("resource",)))
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.ListAuthorizedViewsRequest()
- assert args[0] == request_msg
+def test_get_iam_policy_rest_flattened():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_get_authorized_view_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
- )
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "resource": "projects/sample1/instances/sample2/tables/sample3"
+ }
- # Mock the actual call, and fake the request.
- with mock.patch.object(
- type(client.transport.get_authorized_view), "__call__"
- ) as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- table.AuthorizedView(
- name="name_value",
- etag="etag_value",
- deletion_protection=True,
- )
+ # get truthy value for each flattened field
+ mock_args = dict(
+ resource="resource_value",
)
- await client.get_authorized_view(request=None)
+ mock_args.update(sample_request)
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.GetAuthorizedViewRequest()
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- assert args[0] == request_msg
+ client.get_iam_policy(**mock_args)
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy"
+ % client.transport._host,
+ args[1],
+ )
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_update_authorized_view_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
+
+def test_get_iam_policy_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
)
- # Mock the actual call, and fake the request.
- with mock.patch.object(
- type(client.transport.update_authorized_view), "__call__"
- ) as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- operations_pb2.Operation(name="operations/spam")
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_iam_policy(
+ iam_policy_pb2.GetIamPolicyRequest(),
+ resource="resource_value",
)
- await client.update_authorized_view(request=None)
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.UpdateAuthorizedViewRequest()
- assert args[0] == request_msg
+def test_set_iam_policy_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_delete_authorized_view_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
- )
+ # Ensure method has been cached
+ assert client._transport.set_iam_policy in client._transport._wrapped_methods
- # Mock the actual call, and fake the request.
- with mock.patch.object(
- type(client.transport.delete_authorized_view), "__call__"
- ) as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
- await client.delete_authorized_view(request=None)
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.DeleteAuthorizedViewRequest()
+ request = {}
+ client.set_iam_policy(request)
- assert args[0] == request_msg
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+ client.set_iam_policy(request)
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_modify_column_families_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
- )
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
- # Mock the actual call, and fake the request.
- with mock.patch.object(
- type(client.transport.modify_column_families), "__call__"
- ) as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- table.Table(
- name="name_value",
- granularity=table.Table.TimestampGranularity.MILLIS,
- deletion_protection=True,
- )
- )
- await client.modify_column_families(request=None)
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.ModifyColumnFamiliesRequest()
+def test_set_iam_policy_rest_required_fields(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
+):
+ transport_class = transports.BigtableTableAdminRestTransport
- assert args[0] == request_msg
+ request_init = {}
+ request_init["resource"] = ""
+ request = request_type(**request_init)
+ pb_request = request
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+ # verify fields with default values are dropped
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_drop_row_range_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
- )
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).set_iam_policy._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
- await client.drop_row_range(request=None)
+ # verify required fields with default values are now present
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.DropRowRangeRequest()
+ jsonified_request["resource"] = "resource_value"
- assert args[0] == request_msg
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).set_iam_policy._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+ # verify required fields with non-default values are left alone
+ assert "resource" in jsonified_request
+ assert jsonified_request["resource"] == "resource_value"
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_generate_consistency_token_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
)
+ request = request_type(**request_init)
- # Mock the actual call, and fake the request.
- with mock.patch.object(
- type(client.transport.generate_consistency_token), "__call__"
- ) as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- bigtable_table_admin.GenerateConsistencyTokenResponse(
- consistency_token="consistency_token_value",
- )
- )
- await client.generate_consistency_token(request=None)
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.GenerateConsistencyTokenRequest()
+ response_value = Response()
+ response_value.status_code = 200
- assert args[0] == request_msg
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_check_consistency_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
+ response = client.set_iam_policy(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_set_iam_policy_rest_unset_required_fields():
+ transport = transports.BigtableTableAdminRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
)
- # Mock the actual call, and fake the request.
- with mock.patch.object(
- type(client.transport.check_consistency), "__call__"
- ) as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- bigtable_table_admin.CheckConsistencyResponse(
- consistent=True,
+ unset_fields = transport.set_iam_policy._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "resource",
+ "policy",
)
)
- await client.check_consistency(request=None)
+ )
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.CheckConsistencyRequest()
- assert args[0] == request_msg
+def test_set_iam_policy_rest_flattened():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = policy_pb2.Policy()
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_snapshot_table_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
- )
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "resource": "projects/sample1/instances/sample2/tables/sample3"
+ }
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- operations_pb2.Operation(name="operations/spam")
+ # get truthy value for each flattened field
+ mock_args = dict(
+ resource="resource_value",
)
- await client.snapshot_table(request=None)
+ mock_args.update(sample_request)
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.SnapshotTableRequest()
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- assert args[0] == request_msg
+ client.set_iam_policy(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy"
+ % client.transport._host,
+ args[1],
+ )
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_get_snapshot_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
+def test_set_iam_policy_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- table.Snapshot(
- name="name_value",
- data_size_bytes=1594,
- state=table.Snapshot.State.READY,
- description="description_value",
- )
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.set_iam_policy(
+ iam_policy_pb2.SetIamPolicyRequest(),
+ resource="resource_value",
)
- await client.get_snapshot(request=None)
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.GetSnapshotRequest()
- assert args[0] == request_msg
+def test_test_iam_permissions_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_list_snapshots_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
- )
+ # Ensure method has been cached
+ assert (
+ client._transport.test_iam_permissions in client._transport._wrapped_methods
+ )
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- bigtable_table_admin.ListSnapshotsResponse(
- next_page_token="next_page_token_value",
- )
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
)
- await client.list_snapshots(request=None)
+ client._transport._wrapped_methods[
+ client._transport.test_iam_permissions
+ ] = mock_rpc
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.ListSnapshotsRequest()
+ request = {}
+ client.test_iam_permissions(request)
- assert args[0] == request_msg
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+ client.test_iam_permissions(request)
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_delete_snapshot_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
- )
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
- await client.delete_snapshot(request=None)
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.DeleteSnapshotRequest()
+def test_test_iam_permissions_rest_required_fields(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
+):
+ transport_class = transports.BigtableTableAdminRestTransport
- assert args[0] == request_msg
+ request_init = {}
+ request_init["resource"] = ""
+ request_init["permissions"] = ""
+ request = request_type(**request_init)
+ pb_request = request
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+ # verify fields with default values are dropped
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_create_backup_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
- )
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).test_iam_permissions._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.create_backup), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- operations_pb2.Operation(name="operations/spam")
- )
- await client.create_backup(request=None)
+ # verify required fields with default values are now present
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.CreateBackupRequest()
+ jsonified_request["resource"] = "resource_value"
+ jsonified_request["permissions"] = "permissions_value"
- assert args[0] == request_msg
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).test_iam_permissions._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+ # verify required fields with non-default values are left alone
+ assert "resource" in jsonified_request
+ assert jsonified_request["resource"] == "resource_value"
+ assert "permissions" in jsonified_request
+ assert jsonified_request["permissions"] == "permissions_value"
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_get_backup_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
)
+ request = request_type(**request_init)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.get_backup), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- table.Backup(
- name="name_value",
- source_table="source_table_value",
- source_backup="source_backup_value",
- size_bytes=1089,
- state=table.Backup.State.CREATING,
- backup_type=table.Backup.BackupType.STANDARD,
- )
- )
- await client.get_backup(request=None)
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.GetBackupRequest()
+ response_value = Response()
+ response_value.status_code = 200
- assert args[0] == request_msg
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_update_backup_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
+ response = client.test_iam_permissions(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_test_iam_permissions_rest_unset_required_fields():
+ transport = transports.BigtableTableAdminRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.update_backup), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- table.Backup(
- name="name_value",
- source_table="source_table_value",
- source_backup="source_backup_value",
- size_bytes=1089,
- state=table.Backup.State.CREATING,
- backup_type=table.Backup.BackupType.STANDARD,
+ unset_fields = transport.test_iam_permissions._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "resource",
+ "permissions",
)
)
- await client.update_backup(request=None)
+ )
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.UpdateBackupRequest()
- assert args[0] == request_msg
+def test_test_iam_permissions_rest_flattened():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = iam_policy_pb2.TestIamPermissionsResponse()
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_delete_backup_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
- )
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "resource": "projects/sample1/instances/sample2/tables/sample3"
+ }
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.delete_backup), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
- await client.delete_backup(request=None)
+ # get truthy value for each flattened field
+ mock_args = dict(
+ resource="resource_value",
+ permissions=["permissions_value"],
+ )
+ mock_args.update(sample_request)
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.DeleteBackupRequest()
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- assert args[0] == request_msg
+ client.test_iam_permissions(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions"
+ % client.transport._host,
+ args[1],
+ )
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_list_backups_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
+def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.list_backups), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- bigtable_table_admin.ListBackupsResponse(
- next_page_token="next_page_token_value",
- )
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.test_iam_permissions(
+ iam_policy_pb2.TestIamPermissionsRequest(),
+ resource="resource_value",
+ permissions=["permissions_value"],
)
- await client.list_backups(request=None)
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.ListBackupsRequest()
- assert args[0] == request_msg
+def test_create_schema_bundle_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_restore_table_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
- )
+ # Ensure method has been cached
+ assert (
+ client._transport.create_schema_bundle in client._transport._wrapped_methods
+ )
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.restore_table), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- operations_pb2.Operation(name="operations/spam")
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
)
- await client.restore_table(request=None)
+ client._transport._wrapped_methods[
+ client._transport.create_schema_bundle
+ ] = mock_rpc
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.RestoreTableRequest()
+ request = {}
+ client.create_schema_bundle(request)
- assert args[0] == request_msg
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+ client.create_schema_bundle(request)
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_copy_backup_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_create_schema_bundle_rest_required_fields(
+ request_type=bigtable_table_admin.CreateSchemaBundleRequest,
+):
+ transport_class = transports.BigtableTableAdminRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request_init["schema_bundle_id"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.copy_backup), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- operations_pb2.Operation(name="operations/spam")
- )
- await client.copy_backup(request=None)
+ # verify fields with default values are dropped
+ assert "schemaBundleId" not in jsonified_request
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = bigtable_table_admin.CopyBackupRequest()
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_schema_bundle._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
- assert args[0] == request_msg
+ # verify required fields with default values are now present
+ assert "schemaBundleId" in jsonified_request
+ assert jsonified_request["schemaBundleId"] == request_init["schema_bundle_id"]
+
+ jsonified_request["parent"] = "parent_value"
+ jsonified_request["schemaBundleId"] = "schema_bundle_id_value"
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).create_schema_bundle._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(("schema_bundle_id",))
+ jsonified_request.update(unset_fields)
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_get_iam_policy_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+ assert "schemaBundleId" in jsonified_request
+ assert jsonified_request["schemaBundleId"] == "schema_bundle_id_value"
+
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
)
+ request = request_type(**request_init)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- policy_pb2.Policy(
- version=774,
- etag=b"etag_blob",
- )
- )
- await client.get_iam_policy(request=None)
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = iam_policy_pb2.GetIamPolicyRequest()
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
- assert args[0] == request_msg
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.create_schema_bundle(request)
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_set_iam_policy_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
+ expected_params = [
+ (
+ "schemaBundleId",
+ "",
+ ),
+ ("$alt", "json;enum-encoding=int"),
+ ]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_create_schema_bundle_rest_unset_required_fields():
+ transport = transports.BigtableTableAdminRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
)
- # Mock the actual call, and fake the request.
- with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- policy_pb2.Policy(
- version=774,
- etag=b"etag_blob",
+ unset_fields = transport.create_schema_bundle._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(("schemaBundleId",))
+ & set(
+ (
+ "parent",
+ "schemaBundleId",
+ "schemaBundle",
)
)
- await client.set_iam_policy(request=None)
+ )
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = iam_policy_pb2.SetIamPolicyRequest()
- assert args[0] == request_msg
+def test_create_schema_bundle_rest_flattened():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
-# This test is a coverage failsafe to make sure that totally empty calls,
-# i.e. request == None and no flattened fields passed, work.
-@pytest.mark.asyncio
-async def test_test_iam_permissions_empty_call_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
- credentials=async_anonymous_credentials(),
- transport="grpc_asyncio",
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ schema_bundle_id="schema_bundle_id_value",
+ schema_bundle=table.SchemaBundle(name="name_value"),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.create_schema_bundle(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v2/{parent=projects/*/instances/*/tables/*}/schemaBundles"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_create_schema_bundle_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
)
- # Mock the actual call, and fake the request.
- with mock.patch.object(
- type(client.transport.test_iam_permissions), "__call__"
- ) as call:
- # Designate an appropriate return value for the call.
- call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- iam_policy_pb2.TestIamPermissionsResponse(
- permissions=["permissions_value"],
- )
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_schema_bundle(
+ bigtable_table_admin.CreateSchemaBundleRequest(),
+ parent="parent_value",
+ schema_bundle_id="schema_bundle_id_value",
+ schema_bundle=table.SchemaBundle(name="name_value"),
)
- await client.test_iam_permissions(request=None)
- # Establish that the underlying stub method was called.
- call.assert_called()
- _, args, _ = call.mock_calls[0]
- request_msg = iam_policy_pb2.TestIamPermissionsRequest()
- assert args[0] == request_msg
+def test_update_schema_bundle_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.update_schema_bundle in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.update_schema_bundle
+ ] = mock_rpc
+
+ request = {}
+ client.update_schema_bundle(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ # Operation methods build a cached wrapper on first rpc call
+ # subsequent calls should use the cached wrapper
+ wrapper_fn.reset_mock()
+
+ client.update_schema_bundle(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_update_schema_bundle_rest_required_fields(
+ request_type=bigtable_table_admin.UpdateSchemaBundleRequest,
+):
+ transport_class = transports.BigtableTableAdminRestTransport
+
+ request_init = {}
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).update_schema_bundle._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).update_schema_bundle._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(
+ (
+ "ignore_warnings",
+ "update_mask",
+ )
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "patch",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.update_schema_bundle(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_update_schema_bundle_rest_unset_required_fields():
+ transport = transports.BigtableTableAdminRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.update_schema_bundle._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(
+ (
+ "ignoreWarnings",
+ "updateMask",
+ )
+ )
+ & set(("schemaBundle",))
+ )
+
+
+def test_update_schema_bundle_rest_flattened():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "schema_bundle": {
+ "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4"
+ }
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ schema_bundle=table.SchemaBundle(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.update_schema_bundle(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v2/{schema_bundle.name=projects/*/instances/*/tables/*/schemaBundles/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_update_schema_bundle_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.update_schema_bundle(
+ bigtable_table_admin.UpdateSchemaBundleRequest(),
+ schema_bundle=table.SchemaBundle(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+
+def test_get_schema_bundle_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert client._transport.get_schema_bundle in client._transport._wrapped_methods
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.get_schema_bundle
+ ] = mock_rpc
+
+ request = {}
+ client.get_schema_bundle(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.get_schema_bundle(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_get_schema_bundle_rest_required_fields(
+ request_type=bigtable_table_admin.GetSchemaBundleRequest,
+):
+ transport_class = transports.BigtableTableAdminRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_schema_bundle._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).get_schema_bundle._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = table.SchemaBundle()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = table.SchemaBundle.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.get_schema_bundle(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_get_schema_bundle_rest_unset_required_fields():
+ transport = transports.BigtableTableAdminRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.get_schema_bundle._get_unset_required_fields({})
+ assert set(unset_fields) == (set(()) & set(("name",)))
+
+
+def test_get_schema_bundle_rest_flattened():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = table.SchemaBundle()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = table.SchemaBundle.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.get_schema_bundle(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v2/{name=projects/*/instances/*/tables/*/schemaBundles/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_get_schema_bundle_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_schema_bundle(
+ bigtable_table_admin.GetSchemaBundleRequest(),
+ name="name_value",
+ )
+
+
+def test_list_schema_bundles_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.list_schema_bundles in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.list_schema_bundles
+ ] = mock_rpc
+
+ request = {}
+ client.list_schema_bundles(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.list_schema_bundles(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_list_schema_bundles_rest_required_fields(
+ request_type=bigtable_table_admin.ListSchemaBundlesRequest,
+):
+ transport_class = transports.BigtableTableAdminRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_schema_bundles._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).list_schema_bundles._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(
+ (
+ "page_size",
+ "page_token",
+ )
+ )
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = bigtable_table_admin.ListSchemaBundlesResponse()
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "get",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListSchemaBundlesResponse.pb(
+ return_value
+ )
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.list_schema_bundles(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_list_schema_bundles_rest_unset_required_fields():
+ transport = transports.BigtableTableAdminRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.list_schema_bundles._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(
+ (
+ "pageSize",
+ "pageToken",
+ )
+ )
+ & set(("parent",))
+ )
+
+
+def test_list_schema_bundles_rest_flattened():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = bigtable_table_admin.ListSchemaBundlesResponse()
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"}
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListSchemaBundlesResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.list_schema_bundles(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v2/{parent=projects/*/instances/*/tables/*}/schemaBundles"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_list_schema_bundles_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_schema_bundles(
+ bigtable_table_admin.ListSchemaBundlesRequest(),
+ parent="parent_value",
+ )
+
+
+def test_list_schema_bundles_rest_pager(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # TODO(kbandes): remove this mock unless there's a good reason for it.
+ # with mock.patch.object(path_template, 'transcode') as transcode:
+ # Set the response as a series of pages
+ response = (
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ schema_bundles=[
+ table.SchemaBundle(),
+ table.SchemaBundle(),
+ table.SchemaBundle(),
+ ],
+ next_page_token="abc",
+ ),
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ schema_bundles=[],
+ next_page_token="def",
+ ),
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ schema_bundles=[
+ table.SchemaBundle(),
+ ],
+ next_page_token="ghi",
+ ),
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ schema_bundles=[
+ table.SchemaBundle(),
+ table.SchemaBundle(),
+ ],
+ ),
+ )
+ # Two responses for two calls
+ response = response + response
+
+ # Wrap the values into proper Response objs
+ response = tuple(
+ bigtable_table_admin.ListSchemaBundlesResponse.to_json(x) for x in response
+ )
+ return_values = tuple(Response() for i in response)
+ for return_val, response_val in zip(return_values, response):
+ return_val._content = response_val.encode("UTF-8")
+ return_val.status_code = 200
+ req.side_effect = return_values
+
+ sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"}
+
+ pager = client.list_schema_bundles(request=sample_request)
+
+ results = list(pager)
+ assert len(results) == 6
+ assert all(isinstance(i, table.SchemaBundle) for i in results)
+
+ pages = list(client.list_schema_bundles(request=sample_request).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_delete_schema_bundle_rest_use_cached_wrapped_rpc():
+ # Clients should use _prep_wrapped_messages to create cached wrapped rpcs,
+ # instead of constructing them on each call
+ with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn:
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Should wrap all calls on client creation
+ assert wrapper_fn.call_count > 0
+ wrapper_fn.reset_mock()
+
+ # Ensure method has been cached
+ assert (
+ client._transport.delete_schema_bundle in client._transport._wrapped_methods
+ )
+
+ # Replace cached wrapped function with mock
+ mock_rpc = mock.Mock()
+ mock_rpc.return_value.name = (
+ "foo" # operation_request.operation in compute client(s) expect a string.
+ )
+ client._transport._wrapped_methods[
+ client._transport.delete_schema_bundle
+ ] = mock_rpc
+
+ request = {}
+ client.delete_schema_bundle(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert mock_rpc.call_count == 1
+
+ client.delete_schema_bundle(request)
+
+ # Establish that a new wrapper was not created for this call
+ assert wrapper_fn.call_count == 0
+ assert mock_rpc.call_count == 2
+
+
+def test_delete_schema_bundle_rest_required_fields(
+ request_type=bigtable_table_admin.DeleteSchemaBundleRequest,
+):
+ transport_class = transports.BigtableTableAdminRestTransport
+
+ request_init = {}
+ request_init["name"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(pb_request, use_integers_for_enums=False)
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_schema_bundle._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["name"] = "name_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).delete_schema_bundle._get_unset_required_fields(jsonified_request)
+ # Check that path parameters and body parameters are not mixing in.
+ assert not set(unset_fields) - set(("etag",))
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "name" in jsonified_request
+ assert jsonified_request["name"] == "name_value"
+
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = None
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "delete",
+ "query_params": pb_request,
+ }
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ response = client.delete_schema_bundle(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_delete_schema_bundle_rest_unset_required_fields():
+ transport = transports.BigtableTableAdminRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.delete_schema_bundle._get_unset_required_fields({})
+ assert set(unset_fields) == (set(("etag",)) & set(("name",)))
+
+
+def test_delete_schema_bundle_rest_flattened():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ name="name_value",
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ client.delete_schema_bundle(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v2/{name=projects/*/instances/*/tables/*/schemaBundles/*}"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_delete_schema_bundle_rest_flattened_error(transport: str = "rest"):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_schema_bundle(
+ bigtable_table_admin.DeleteSchemaBundleRequest(),
+ name="name_value",
+ )
+
+
+def test_credentials_transport_error():
+ # It is an error to provide credentials and a transport instance.
+ transport = transports.BigtableTableAdminGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # It is an error to provide a credentials file and a transport instance.
+ transport = transports.BigtableTableAdminGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = BaseBigtableTableAdminClient(
+ client_options={"credentials_file": "credentials.json"},
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a transport instance.
+ transport = transports.BigtableTableAdminGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = BaseBigtableTableAdminClient(
+ client_options=options,
+ transport=transport,
+ )
+
+ # It is an error to provide an api_key and a credential.
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = BaseBigtableTableAdminClient(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+
+ # It is an error to provide scopes and a transport instance.
+ transport = transports.BigtableTableAdminGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = BaseBigtableTableAdminClient(
+ client_options={"scopes": ["1", "2"]},
+ transport=transport,
+ )
+
+
+def test_transport_instance():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.BigtableTableAdminGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ client = BaseBigtableTableAdminClient(transport=transport)
+ assert client.transport is transport
+
+
+def test_transport_get_channel():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.BigtableTableAdminGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+ transport = transports.BigtableTableAdminGrpcAsyncIOTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.BigtableTableAdminGrpcTransport,
+ transports.BigtableTableAdminGrpcAsyncIOTransport,
+ transports.BigtableTableAdminRestTransport,
+ ],
+)
+def test_transport_adc(transport_class):
+ # Test default credentials are used if not provided.
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class()
+ adc.assert_called_once()
+
+
+def test_transport_kind_grpc():
+ transport = BaseBigtableTableAdminClient.get_transport_class("grpc")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "grpc"
+
+
+def test_initialize_client_w_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_table_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.create_table), "__call__") as call:
+ call.return_value = gba_table.Table()
+ client.create_table(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.CreateTableRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_table_from_snapshot_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_table_from_snapshot), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.create_table_from_snapshot(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.CreateTableFromSnapshotRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_tables_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_tables), "__call__") as call:
+ call.return_value = bigtable_table_admin.ListTablesResponse()
+ client.list_tables(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.ListTablesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_table_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_table), "__call__") as call:
+ call.return_value = table.Table()
+ client.get_table(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.GetTableRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_update_table_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.update_table), "__call__") as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.update_table(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.UpdateTableRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_table_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.delete_table), "__call__") as call:
+ call.return_value = None
+ client.delete_table(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.DeleteTableRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_undelete_table_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.undelete_table), "__call__") as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.undelete_table(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.UndeleteTableRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_authorized_view_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_authorized_view), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.create_authorized_view(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.CreateAuthorizedViewRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_authorized_views_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_authorized_views), "__call__"
+ ) as call:
+ call.return_value = bigtable_table_admin.ListAuthorizedViewsResponse()
+ client.list_authorized_views(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.ListAuthorizedViewsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_authorized_view_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_authorized_view), "__call__"
+ ) as call:
+ call.return_value = table.AuthorizedView()
+ client.get_authorized_view(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.GetAuthorizedViewRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_update_authorized_view_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_authorized_view), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.update_authorized_view(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.UpdateAuthorizedViewRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_authorized_view_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_authorized_view), "__call__"
+ ) as call:
+ call.return_value = None
+ client.delete_authorized_view(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.DeleteAuthorizedViewRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_modify_column_families_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.modify_column_families), "__call__"
+ ) as call:
+ call.return_value = table.Table()
+ client.modify_column_families(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.ModifyColumnFamiliesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_drop_row_range_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call:
+ call.return_value = None
+ client.drop_row_range(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.DropRowRangeRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_generate_consistency_token_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.generate_consistency_token), "__call__"
+ ) as call:
+ call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse()
+ client.generate_consistency_token(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.GenerateConsistencyTokenRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_check_consistency_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.check_consistency), "__call__"
+ ) as call:
+ call.return_value = bigtable_table_admin.CheckConsistencyResponse()
+ client.check_consistency(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.CheckConsistencyRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_snapshot_table_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.snapshot_table(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.SnapshotTableRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_snapshot_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call:
+ call.return_value = table.Snapshot()
+ client.get_snapshot(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.GetSnapshotRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_snapshots_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call:
+ call.return_value = bigtable_table_admin.ListSnapshotsResponse()
+ client.list_snapshots(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.ListSnapshotsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_snapshot_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call:
+ call.return_value = None
+ client.delete_snapshot(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.DeleteSnapshotRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_backup_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.create_backup), "__call__") as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.create_backup(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.CreateBackupRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_backup_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_backup), "__call__") as call:
+ call.return_value = table.Backup()
+ client.get_backup(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.GetBackupRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_update_backup_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.update_backup), "__call__") as call:
+ call.return_value = table.Backup()
+ client.update_backup(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.UpdateBackupRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_backup_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.delete_backup), "__call__") as call:
+ call.return_value = None
+ client.delete_backup(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.DeleteBackupRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_backups_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_backups), "__call__") as call:
+ call.return_value = bigtable_table_admin.ListBackupsResponse()
+ client.list_backups(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.ListBackupsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test__restore_table_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.restore_table), "__call__") as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client._restore_table(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.RestoreTableRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_copy_backup_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.copy_backup), "__call__") as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.copy_backup(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.CopyBackupRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_iam_policy_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+ client.get_iam_policy(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = iam_policy_pb2.GetIamPolicyRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_set_iam_policy_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ call.return_value = policy_pb2.Policy()
+ client.set_iam_policy(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = iam_policy_pb2.SetIamPolicyRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_test_iam_permissions_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
+ client.test_iam_permissions(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = iam_policy_pb2.TestIamPermissionsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_schema_bundle_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_schema_bundle), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.create_schema_bundle(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.CreateSchemaBundleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_update_schema_bundle_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_schema_bundle), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.update_schema_bundle(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.UpdateSchemaBundleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_schema_bundle_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_schema_bundle), "__call__"
+ ) as call:
+ call.return_value = table.SchemaBundle()
+ client.get_schema_bundle(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.GetSchemaBundleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_schema_bundles_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_schema_bundles), "__call__"
+ ) as call:
+ call.return_value = bigtable_table_admin.ListSchemaBundlesResponse()
+ client.list_schema_bundles(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.ListSchemaBundlesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_schema_bundle_empty_call_grpc():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_schema_bundle), "__call__"
+ ) as call:
+ call.return_value = None
+ client.delete_schema_bundle(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.DeleteSchemaBundleRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_grpc_asyncio():
+ transport = BaseBigtableTableAdminAsyncClient.get_transport_class("grpc_asyncio")(
+ credentials=async_anonymous_credentials()
+ )
+ assert transport.kind == "grpc_asyncio"
+
+
+def test_initialize_client_w_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(), transport="grpc_asyncio"
+ )
+ assert client is not None
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_table_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.create_table), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ gba_table.Table(
+ name="name_value",
+ granularity=gba_table.Table.TimestampGranularity.MILLIS,
+ deletion_protection=True,
+ )
+ )
+ await client.create_table(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.CreateTableRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_table_from_snapshot_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_table_from_snapshot), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.create_table_from_snapshot(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.CreateTableFromSnapshotRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_tables_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_tables), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ bigtable_table_admin.ListTablesResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ await client.list_tables(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.ListTablesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_table_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_table), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ table.Table(
+ name="name_value",
+ granularity=table.Table.TimestampGranularity.MILLIS,
+ deletion_protection=True,
+ )
+ )
+ await client.get_table(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.GetTableRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_update_table_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.update_table), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.update_table(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.UpdateTableRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_table_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.delete_table), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.delete_table(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.DeleteTableRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_undelete_table_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.undelete_table), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.undelete_table(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.UndeleteTableRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_authorized_view_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_authorized_view), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.create_authorized_view(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.CreateAuthorizedViewRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_authorized_views_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_authorized_views), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ bigtable_table_admin.ListAuthorizedViewsResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ await client.list_authorized_views(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.ListAuthorizedViewsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_authorized_view_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_authorized_view), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ table.AuthorizedView(
+ name="name_value",
+ etag="etag_value",
+ deletion_protection=True,
+ )
+ )
+ await client.get_authorized_view(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.GetAuthorizedViewRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_update_authorized_view_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_authorized_view), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.update_authorized_view(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.UpdateAuthorizedViewRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_authorized_view_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_authorized_view), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.delete_authorized_view(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.DeleteAuthorizedViewRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_modify_column_families_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.modify_column_families), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ table.Table(
+ name="name_value",
+ granularity=table.Table.TimestampGranularity.MILLIS,
+ deletion_protection=True,
+ )
+ )
+ await client.modify_column_families(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.ModifyColumnFamiliesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_drop_row_range_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.drop_row_range(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.DropRowRangeRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_generate_consistency_token_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.generate_consistency_token), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ bigtable_table_admin.GenerateConsistencyTokenResponse(
+ consistency_token="consistency_token_value",
+ )
+ )
+ await client.generate_consistency_token(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.GenerateConsistencyTokenRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_check_consistency_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.check_consistency), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ bigtable_table_admin.CheckConsistencyResponse(
+ consistent=True,
+ )
+ )
+ await client.check_consistency(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.CheckConsistencyRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_snapshot_table_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.snapshot_table(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.SnapshotTableRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_snapshot_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ table.Snapshot(
+ name="name_value",
+ data_size_bytes=1594,
+ state=table.Snapshot.State.READY,
+ description="description_value",
+ )
+ )
+ await client.get_snapshot(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.GetSnapshotRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_snapshots_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ bigtable_table_admin.ListSnapshotsResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ await client.list_snapshots(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.ListSnapshotsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_snapshot_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.delete_snapshot(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.DeleteSnapshotRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_backup_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.create_backup), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.create_backup(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.CreateBackupRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_backup_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_backup), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ table.Backup(
+ name="name_value",
+ source_table="source_table_value",
+ source_backup="source_backup_value",
+ size_bytes=1089,
+ state=table.Backup.State.CREATING,
+ backup_type=table.Backup.BackupType.STANDARD,
+ )
+ )
+ await client.get_backup(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.GetBackupRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_update_backup_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.update_backup), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ table.Backup(
+ name="name_value",
+ source_table="source_table_value",
+ source_backup="source_backup_value",
+ size_bytes=1089,
+ state=table.Backup.State.CREATING,
+ backup_type=table.Backup.BackupType.STANDARD,
+ )
+ )
+ await client.update_backup(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.UpdateBackupRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_backup_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.delete_backup), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.delete_backup(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.DeleteBackupRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_backups_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.list_backups), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ bigtable_table_admin.ListBackupsResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ await client.list_backups(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.ListBackupsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test__restore_table_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.restore_table), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client._restore_table(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.RestoreTableRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_copy_backup_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.copy_backup), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.copy_backup(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.CopyBackupRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_iam_policy_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+ await client.get_iam_policy(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = iam_policy_pb2.GetIamPolicyRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_set_iam_policy_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
+ )
+ await client.set_iam_policy(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = iam_policy_pb2.SetIamPolicyRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_test_iam_permissions_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.test_iam_permissions), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
+ )
+ )
+ await client.test_iam_permissions(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = iam_policy_pb2.TestIamPermissionsRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_create_schema_bundle_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_schema_bundle), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.create_schema_bundle(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.CreateSchemaBundleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_update_schema_bundle_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_schema_bundle), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ await client.update_schema_bundle(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.UpdateSchemaBundleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_get_schema_bundle_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_schema_bundle), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ table.SchemaBundle(
+ name="name_value",
+ etag="etag_value",
+ )
+ )
+ await client.get_schema_bundle(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.GetSchemaBundleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_list_schema_bundles_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_schema_bundles), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ bigtable_table_admin.ListSchemaBundlesResponse(
+ next_page_token="next_page_token_value",
+ )
+ )
+ await client.list_schema_bundles(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.ListSchemaBundlesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+@pytest.mark.asyncio
+async def test_delete_schema_bundle_empty_call_grpc_asyncio():
+ client = BaseBigtableTableAdminAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_schema_bundle), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.delete_schema_bundle(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.DeleteSchemaBundleRequest()
+
+ assert args[0] == request_msg
+
+
+def test_transport_kind_rest():
+ transport = BaseBigtableTableAdminClient.get_transport_class("rest")(
+ credentials=ga_credentials.AnonymousCredentials()
+ )
+ assert transport.kind == "rest"
+
+
+def test_create_table_rest_bad_request(
+ request_type=bigtable_table_admin.CreateTableRequest,
+):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/instances/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.create_table(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ bigtable_table_admin.CreateTableRequest,
+ dict,
+ ],
+)
+def test_create_table_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/instances/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = gba_table.Table(
+ name="name_value",
+ granularity=gba_table.Table.TimestampGranularity.MILLIS,
+ deletion_protection=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = gba_table.Table.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.create_table(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, gba_table.Table)
+ assert response.name == "name_value"
+ assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS
+ assert response.deletion_protection is True
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_create_table_rest_interceptors(null_interceptor):
+ transport = transports.BigtableTableAdminRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.BigtableTableAdminRestInterceptor(),
+ )
+ client = BaseBigtableTableAdminClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "post_create_table"
+ ) as post, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "post_create_table_with_metadata"
+ ) as post_with_metadata, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "pre_create_table"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ post_with_metadata.assert_not_called()
+ pb_message = bigtable_table_admin.CreateTableRequest.pb(
+ bigtable_table_admin.CreateTableRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = gba_table.Table.to_json(gba_table.Table())
+ req.return_value.content = return_value
+
+ request = bigtable_table_admin.CreateTableRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = gba_table.Table()
+ post_with_metadata.return_value = gba_table.Table(), metadata
+
+ client.create_table(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+ post_with_metadata.assert_called_once()
+
+
+def test_create_table_from_snapshot_rest_bad_request(
+ request_type=bigtable_table_admin.CreateTableFromSnapshotRequest,
+):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/instances/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.create_table_from_snapshot(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ bigtable_table_admin.CreateTableFromSnapshotRequest,
+ dict,
+ ],
+)
+def test_create_table_from_snapshot_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/instances/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.create_table_from_snapshot(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_create_table_from_snapshot_rest_interceptors(null_interceptor):
+ transport = transports.BigtableTableAdminRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.BigtableTableAdminRestInterceptor(),
+ )
+ client = BaseBigtableTableAdminClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "post_create_table_from_snapshot"
+ ) as post, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor,
+ "post_create_table_from_snapshot_with_metadata",
+ ) as post_with_metadata, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "pre_create_table_from_snapshot"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ post_with_metadata.assert_not_called()
+ pb_message = bigtable_table_admin.CreateTableFromSnapshotRequest.pb(
+ bigtable_table_admin.CreateTableFromSnapshotRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = bigtable_table_admin.CreateTableFromSnapshotRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+ post_with_metadata.return_value = operations_pb2.Operation(), metadata
+
+ client.create_table_from_snapshot(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+ post_with_metadata.assert_called_once()
+
+
+def test_list_tables_rest_bad_request(
+ request_type=bigtable_table_admin.ListTablesRequest,
+):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/instances/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.list_tables(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ bigtable_table_admin.ListTablesRequest,
+ dict,
+ ],
+)
+def test_list_tables_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/instances/sample2"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = bigtable_table_admin.ListTablesResponse(
+ next_page_token="next_page_token_value",
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListTablesResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.list_tables(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListTablesPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_list_tables_rest_interceptors(null_interceptor):
+ transport = transports.BigtableTableAdminRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.BigtableTableAdminRestInterceptor(),
+ )
+ client = BaseBigtableTableAdminClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "post_list_tables"
+ ) as post, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "post_list_tables_with_metadata"
+ ) as post_with_metadata, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "pre_list_tables"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ post_with_metadata.assert_not_called()
+ pb_message = bigtable_table_admin.ListTablesRequest.pb(
+ bigtable_table_admin.ListTablesRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = bigtable_table_admin.ListTablesResponse.to_json(
+ bigtable_table_admin.ListTablesResponse()
+ )
+ req.return_value.content = return_value
+
+ request = bigtable_table_admin.ListTablesRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = bigtable_table_admin.ListTablesResponse()
+ post_with_metadata.return_value = (
+ bigtable_table_admin.ListTablesResponse(),
+ metadata,
+ )
+
+ client.list_tables(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+ post_with_metadata.assert_called_once()
+
+
+def test_get_table_rest_bad_request(request_type=bigtable_table_admin.GetTableRequest):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.get_table(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ bigtable_table_admin.GetTableRequest,
+ dict,
+ ],
+)
+def test_get_table_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = table.Table(
+ name="name_value",
+ granularity=table.Table.TimestampGranularity.MILLIS,
+ deletion_protection=True,
+ )
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = table.Table.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.get_table(request)
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, table.Table)
+ assert response.name == "name_value"
+ assert response.granularity == table.Table.TimestampGranularity.MILLIS
+ assert response.deletion_protection is True
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_get_table_rest_interceptors(null_interceptor):
+ transport = transports.BigtableTableAdminRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.BigtableTableAdminRestInterceptor(),
+ )
+ client = BaseBigtableTableAdminClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "post_get_table"
+ ) as post, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "post_get_table_with_metadata"
+ ) as post_with_metadata, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "pre_get_table"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ post_with_metadata.assert_not_called()
+ pb_message = bigtable_table_admin.GetTableRequest.pb(
+ bigtable_table_admin.GetTableRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = table.Table.to_json(table.Table())
+ req.return_value.content = return_value
+
+ request = bigtable_table_admin.GetTableRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = table.Table()
+ post_with_metadata.return_value = table.Table(), metadata
+
+ client.get_table(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+ post_with_metadata.assert_called_once()
+
+
+def test_update_table_rest_bad_request(
+ request_type=bigtable_table_admin.UpdateTableRequest,
+):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {
+ "table": {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ }
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.update_table(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ bigtable_table_admin.UpdateTableRequest,
+ dict,
+ ],
+)
+def test_update_table_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {
+ "table": {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ }
+ request_init["table"] = {
+ "name": "projects/sample1/instances/sample2/tables/sample3",
+ "cluster_states": {},
+ "column_families": {},
+ "granularity": 1,
+ "restore_info": {
+ "source_type": 1,
+ "backup_info": {
+ "backup": "backup_value",
+ "start_time": {"seconds": 751, "nanos": 543},
+ "end_time": {},
+ "source_table": "source_table_value",
+ "source_backup": "source_backup_value",
+ },
+ },
+ "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}},
+ "deletion_protection": True,
+ "automated_backup_policy": {"retention_period": {}, "frequency": {}},
+ "tiered_storage_config": {"infrequent_access": {"include_if_older_than": {}}},
+ "row_key_schema": {
+ "fields": [
+ {
+ "field_name": "field_name_value",
+ "type_": {
+ "bytes_type": {"encoding": {"raw": {}}},
+ "string_type": {"encoding": {"utf8_raw": {}, "utf8_bytes": {}}},
+ "int64_type": {
+ "encoding": {
+ "big_endian_bytes": {"bytes_type": {}},
+ "ordered_code_bytes": {},
+ }
+ },
+ "float32_type": {},
+ "float64_type": {},
+ "bool_type": {},
+ "timestamp_type": {"encoding": {"unix_micros_int64": {}}},
+ "date_type": {},
+ "aggregate_type": {
+ "input_type": {},
+ "state_type": {},
+ "sum": {},
+ "hllpp_unique_count": {},
+ "max_": {},
+ "min_": {},
+ },
+ "struct_type": {},
+ "array_type": {"element_type": {}},
+ "map_type": {"key_type": {}, "value_type": {}},
+ "proto_type": {
+ "schema_bundle_id": "schema_bundle_id_value",
+ "message_name": "message_name_value",
+ },
+ "enum_type": {
+ "schema_bundle_id": "schema_bundle_id_value",
+ "enum_name": "enum_name_value",
+ },
+ },
+ }
+ ],
+ "encoding": {
+ "singleton": {},
+ "delimited_bytes": {"delimiter": b"delimiter_blob"},
+ "ordered_code_bytes": {},
+ },
+ },
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = bigtable_table_admin.UpdateTableRequest.meta.fields["table"]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["table"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["table"][field])):
+ del request_init["table"][field][i][subfield]
+ else:
+ del request_init["table"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.update_table(request)
+
+ # Establish that the response is the type that we expect.
+ json_return_value = json_format.MessageToJson(return_value)
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_update_table_rest_interceptors(null_interceptor):
+ transport = transports.BigtableTableAdminRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.BigtableTableAdminRestInterceptor(),
+ )
+ client = BaseBigtableTableAdminClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "post_update_table"
+ ) as post, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "post_update_table_with_metadata"
+ ) as post_with_metadata, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "pre_update_table"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ post_with_metadata.assert_not_called()
+ pb_message = bigtable_table_admin.UpdateTableRequest.pb(
+ bigtable_table_admin.UpdateTableRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
+
+ request = bigtable_table_admin.UpdateTableRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+ post_with_metadata.return_value = operations_pb2.Operation(), metadata
+
+ client.update_table(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+ post_with_metadata.assert_called_once()
+
+
+def test_delete_table_rest_bad_request(
+ request_type=bigtable_table_admin.DeleteTableRequest,
+):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ json_return_value = ""
+ response_value.json = mock.Mock(return_value={})
+ response_value.status_code = 400
+ response_value.request = mock.Mock()
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ client.delete_table(request)
+
+
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ bigtable_table_admin.DeleteTableRequest,
+ dict,
+ ],
+)
+def test_delete_table_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = None
+
+ # Wrap the value into a proper Response obj
+ response_value = mock.Mock()
+ response_value.status_code = 200
+ json_return_value = ""
+ response_value.content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ response = client.delete_table(request)
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_delete_table_rest_interceptors(null_interceptor):
+ transport = transports.BigtableTableAdminRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.BigtableTableAdminRestInterceptor(),
+ )
+ client = BaseBigtableTableAdminClient(transport=transport)
+
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "pre_delete_table"
+ ) as pre:
+ pre.assert_not_called()
+ pb_message = bigtable_table_admin.DeleteTableRequest.pb(
+ bigtable_table_admin.DeleteTableRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = mock.Mock()
+ req.return_value.status_code = 200
+ req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+
+ request = bigtable_table_admin.DeleteTableRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ client.delete_table(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
-def test_transport_kind_rest():
- transport = BigtableTableAdminClient.get_transport_class("rest")(
- credentials=ga_credentials.AnonymousCredentials()
- )
- assert transport.kind == "rest"
+ pre.assert_called_once()
-def test_create_table_rest_bad_request(
- request_type=bigtable_table_admin.CreateTableRequest,
+def test_undelete_table_rest_bad_request(
+ request_type=bigtable_table_admin.UndeleteTableRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"parent": "projects/sample1/instances/sample2"}
+ request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -19510,79 +23839,72 @@ def test_create_table_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.create_table(request)
+ client.undelete_table(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.CreateTableRequest,
+ bigtable_table_admin.UndeleteTableRequest,
dict,
],
)
-def test_create_table_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_undelete_table_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"parent": "projects/sample1/instances/sample2"}
+ request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = gba_table.Table(
- name="name_value",
- granularity=gba_table.Table.TimestampGranularity.MILLIS,
- deletion_protection=True,
- )
+ return_value = operations_pb2.Operation(name="operations/spam")
# Wrap the value into a proper Response obj
response_value = mock.Mock()
response_value.status_code = 200
-
- # Convert return value to protobuf type
- return_value = gba_table.Table.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.create_table(request)
+ response = client.undelete_table(request)
# Establish that the response is the type that we expect.
- assert isinstance(response, gba_table.Table)
- assert response.name == "name_value"
- assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS
- assert response.deletion_protection is True
+ json_return_value = json_format.MessageToJson(return_value)
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_create_table_rest_interceptors(null_interceptor):
+def test_undelete_table_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_create_table"
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "post_undelete_table"
) as post, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_create_table_with_metadata"
+ transports.BigtableTableAdminRestInterceptor,
+ "post_undelete_table_with_metadata",
) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_create_table"
+ transports.BigtableTableAdminRestInterceptor, "pre_undelete_table"
) as pre:
pre.assert_not_called()
post.assert_not_called()
post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.CreateTableRequest.pb(
- bigtable_table_admin.CreateTableRequest()
+ pb_message = bigtable_table_admin.UndeleteTableRequest.pb(
+ bigtable_table_admin.UndeleteTableRequest()
)
transcode.return_value = {
"method": "post",
@@ -19594,19 +23916,19 @@ def test_create_table_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- return_value = gba_table.Table.to_json(gba_table.Table())
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
req.return_value.content = return_value
- request = bigtable_table_admin.CreateTableRequest()
+ request = bigtable_table_admin.UndeleteTableRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
- post.return_value = gba_table.Table()
- post_with_metadata.return_value = gba_table.Table(), metadata
+ post.return_value = operations_pb2.Operation()
+ post_with_metadata.return_value = operations_pb2.Operation(), metadata
- client.create_table(
+ client.undelete_table(
request,
metadata=[
("key", "val"),
@@ -19619,14 +23941,14 @@ def test_create_table_rest_interceptors(null_interceptor):
post_with_metadata.assert_called_once()
-def test_create_table_from_snapshot_rest_bad_request(
- request_type=bigtable_table_admin.CreateTableFromSnapshotRequest,
+def test_create_authorized_view_rest_bad_request(
+ request_type=bigtable_table_admin.CreateAuthorizedViewRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"parent": "projects/sample1/instances/sample2"}
+ request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -19641,23 +23963,101 @@ def test_create_table_from_snapshot_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.create_table_from_snapshot(request)
+ client.create_authorized_view(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.CreateTableFromSnapshotRequest,
+ bigtable_table_admin.CreateAuthorizedViewRequest,
dict,
],
)
-def test_create_table_from_snapshot_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_create_authorized_view_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"parent": "projects/sample1/instances/sample2"}
+ request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init["authorized_view"] = {
+ "name": "name_value",
+ "subset_view": {
+ "row_prefixes": [b"row_prefixes_blob1", b"row_prefixes_blob2"],
+ "family_subsets": {},
+ },
+ "etag": "etag_value",
+ "deletion_protection": True,
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = bigtable_table_admin.CreateAuthorizedViewRequest.meta.fields[
+ "authorized_view"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["authorized_view"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["authorized_view"][field])):
+ del request_init["authorized_view"][field][i][subfield]
+ else:
+ del request_init["authorized_view"][field][subfield]
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
@@ -19672,21 +24072,21 @@ def test_create_table_from_snapshot_rest_call_success(request_type):
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.create_table_from_snapshot(request)
+ response = client.create_authorized_view(request)
# Establish that the response is the type that we expect.
json_return_value = json_format.MessageToJson(return_value)
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_create_table_from_snapshot_rest_interceptors(null_interceptor):
+def test_create_authorized_view_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
@@ -19695,18 +24095,18 @@ def test_create_table_from_snapshot_rest_interceptors(null_interceptor):
) as transcode, mock.patch.object(
operation.Operation, "_set_result_from_operation"
), mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_create_table_from_snapshot"
+ transports.BigtableTableAdminRestInterceptor, "post_create_authorized_view"
) as post, mock.patch.object(
transports.BigtableTableAdminRestInterceptor,
- "post_create_table_from_snapshot_with_metadata",
+ "post_create_authorized_view_with_metadata",
) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_create_table_from_snapshot"
+ transports.BigtableTableAdminRestInterceptor, "pre_create_authorized_view"
) as pre:
pre.assert_not_called()
post.assert_not_called()
post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.CreateTableFromSnapshotRequest.pb(
- bigtable_table_admin.CreateTableFromSnapshotRequest()
+ pb_message = bigtable_table_admin.CreateAuthorizedViewRequest.pb(
+ bigtable_table_admin.CreateAuthorizedViewRequest()
)
transcode.return_value = {
"method": "post",
@@ -19721,7 +24121,7 @@ def test_create_table_from_snapshot_rest_interceptors(null_interceptor):
return_value = json_format.MessageToJson(operations_pb2.Operation())
req.return_value.content = return_value
- request = bigtable_table_admin.CreateTableFromSnapshotRequest()
+ request = bigtable_table_admin.CreateAuthorizedViewRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
@@ -19730,7 +24130,7 @@ def test_create_table_from_snapshot_rest_interceptors(null_interceptor):
post.return_value = operations_pb2.Operation()
post_with_metadata.return_value = operations_pb2.Operation(), metadata
- client.create_table_from_snapshot(
+ client.create_authorized_view(
request,
metadata=[
("key", "val"),
@@ -19743,14 +24143,14 @@ def test_create_table_from_snapshot_rest_interceptors(null_interceptor):
post_with_metadata.assert_called_once()
-def test_list_tables_rest_bad_request(
- request_type=bigtable_table_admin.ListTablesRequest,
+def test_list_authorized_views_rest_bad_request(
+ request_type=bigtable_table_admin.ListAuthorizedViewsRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"parent": "projects/sample1/instances/sample2"}
+ request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -19765,29 +24165,29 @@ def test_list_tables_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.list_tables(request)
+ client.list_authorized_views(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.ListTablesRequest,
+ bigtable_table_admin.ListAuthorizedViewsRequest,
dict,
],
)
-def test_list_tables_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_list_authorized_views_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"parent": "projects/sample1/instances/sample2"}
+ request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = bigtable_table_admin.ListTablesResponse(
+ return_value = bigtable_table_admin.ListAuthorizedViewsResponse(
next_page_token="next_page_token_value",
)
@@ -19796,44 +24196,45 @@ def test_list_tables_rest_call_success(request_type):
response_value.status_code = 200
# Convert return value to protobuf type
- return_value = bigtable_table_admin.ListTablesResponse.pb(return_value)
+ return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.list_tables(request)
+ response = client.list_authorized_views(request)
# Establish that the response is the type that we expect.
- assert isinstance(response, pagers.ListTablesPager)
+ assert isinstance(response, pagers.ListAuthorizedViewsPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_list_tables_rest_interceptors(null_interceptor):
+def test_list_authorized_views_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_list_tables"
+ transports.BigtableTableAdminRestInterceptor, "post_list_authorized_views"
) as post, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_list_tables_with_metadata"
+ transports.BigtableTableAdminRestInterceptor,
+ "post_list_authorized_views_with_metadata",
) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_list_tables"
+ transports.BigtableTableAdminRestInterceptor, "pre_list_authorized_views"
) as pre:
pre.assert_not_called()
post.assert_not_called()
post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.ListTablesRequest.pb(
- bigtable_table_admin.ListTablesRequest()
+ pb_message = bigtable_table_admin.ListAuthorizedViewsRequest.pb(
+ bigtable_table_admin.ListAuthorizedViewsRequest()
)
transcode.return_value = {
"method": "post",
@@ -19845,24 +24246,24 @@ def test_list_tables_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- return_value = bigtable_table_admin.ListTablesResponse.to_json(
- bigtable_table_admin.ListTablesResponse()
+ return_value = bigtable_table_admin.ListAuthorizedViewsResponse.to_json(
+ bigtable_table_admin.ListAuthorizedViewsResponse()
)
req.return_value.content = return_value
- request = bigtable_table_admin.ListTablesRequest()
+ request = bigtable_table_admin.ListAuthorizedViewsRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
- post.return_value = bigtable_table_admin.ListTablesResponse()
+ post.return_value = bigtable_table_admin.ListAuthorizedViewsResponse()
post_with_metadata.return_value = (
- bigtable_table_admin.ListTablesResponse(),
+ bigtable_table_admin.ListAuthorizedViewsResponse(),
metadata,
)
- client.list_tables(
+ client.list_authorized_views(
request,
metadata=[
("key", "val"),
@@ -19875,12 +24276,16 @@ def test_list_tables_rest_interceptors(null_interceptor):
post_with_metadata.assert_called_once()
-def test_get_table_rest_bad_request(request_type=bigtable_table_admin.GetTableRequest):
- client = BigtableTableAdminClient(
+def test_get_authorized_view_rest_bad_request(
+ request_type=bigtable_table_admin.GetAuthorizedViewRequest,
+):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {
+ "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ }
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -19895,31 +24300,33 @@ def test_get_table_rest_bad_request(request_type=bigtable_table_admin.GetTableRe
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.get_table(request)
+ client.get_authorized_view(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.GetTableRequest,
+ bigtable_table_admin.GetAuthorizedViewRequest,
dict,
],
)
-def test_get_table_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_get_authorized_view_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {
+ "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ }
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = table.Table(
+ return_value = table.AuthorizedView(
name="name_value",
- granularity=table.Table.TimestampGranularity.MILLIS,
+ etag="etag_value",
deletion_protection=True,
)
@@ -19928,46 +24335,47 @@ def test_get_table_rest_call_success(request_type):
response_value.status_code = 200
# Convert return value to protobuf type
- return_value = table.Table.pb(return_value)
+ return_value = table.AuthorizedView.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.get_table(request)
+ response = client.get_authorized_view(request)
# Establish that the response is the type that we expect.
- assert isinstance(response, table.Table)
+ assert isinstance(response, table.AuthorizedView)
assert response.name == "name_value"
- assert response.granularity == table.Table.TimestampGranularity.MILLIS
+ assert response.etag == "etag_value"
assert response.deletion_protection is True
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_get_table_rest_interceptors(null_interceptor):
+def test_get_authorized_view_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_get_table"
+ transports.BigtableTableAdminRestInterceptor, "post_get_authorized_view"
) as post, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_get_table_with_metadata"
+ transports.BigtableTableAdminRestInterceptor,
+ "post_get_authorized_view_with_metadata",
) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_get_table"
+ transports.BigtableTableAdminRestInterceptor, "pre_get_authorized_view"
) as pre:
pre.assert_not_called()
post.assert_not_called()
post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.GetTableRequest.pb(
- bigtable_table_admin.GetTableRequest()
+ pb_message = bigtable_table_admin.GetAuthorizedViewRequest.pb(
+ bigtable_table_admin.GetAuthorizedViewRequest()
)
transcode.return_value = {
"method": "post",
@@ -19979,19 +24387,19 @@ def test_get_table_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- return_value = table.Table.to_json(table.Table())
+ return_value = table.AuthorizedView.to_json(table.AuthorizedView())
req.return_value.content = return_value
- request = bigtable_table_admin.GetTableRequest()
+ request = bigtable_table_admin.GetAuthorizedViewRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
- post.return_value = table.Table()
- post_with_metadata.return_value = table.Table(), metadata
+ post.return_value = table.AuthorizedView()
+ post_with_metadata.return_value = table.AuthorizedView(), metadata
- client.get_table(
+ client.get_authorized_view(
request,
metadata=[
("key", "val"),
@@ -20004,15 +24412,17 @@ def test_get_table_rest_interceptors(null_interceptor):
post_with_metadata.assert_called_once()
-def test_update_table_rest_bad_request(
- request_type=bigtable_table_admin.UpdateTableRequest,
+def test_update_authorized_view_rest_bad_request(
+ request_type=bigtable_table_admin.UpdateAuthorizedViewRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
request_init = {
- "table": {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ "authorized_view": {
+ "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ }
}
request = request_type(**request_init)
@@ -20028,88 +24438,44 @@ def test_update_table_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.update_table(request)
+ client.update_authorized_view(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.UpdateTableRequest,
+ bigtable_table_admin.UpdateAuthorizedViewRequest,
dict,
],
)
-def test_update_table_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_update_authorized_view_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
request_init = {
- "table": {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ "authorized_view": {
+ "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ }
}
- request_init["table"] = {
- "name": "projects/sample1/instances/sample2/tables/sample3",
- "cluster_states": {},
- "column_families": {},
- "granularity": 1,
- "restore_info": {
- "source_type": 1,
- "backup_info": {
- "backup": "backup_value",
- "start_time": {"seconds": 751, "nanos": 543},
- "end_time": {},
- "source_table": "source_table_value",
- "source_backup": "source_backup_value",
- },
+ request_init["authorized_view"] = {
+ "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4",
+ "subset_view": {
+ "row_prefixes": [b"row_prefixes_blob1", b"row_prefixes_blob2"],
+ "family_subsets": {},
},
- "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}},
+ "etag": "etag_value",
"deletion_protection": True,
- "automated_backup_policy": {"retention_period": {}, "frequency": {}},
- "row_key_schema": {
- "fields": [
- {
- "field_name": "field_name_value",
- "type_": {
- "bytes_type": {"encoding": {"raw": {}}},
- "string_type": {"encoding": {"utf8_raw": {}, "utf8_bytes": {}}},
- "int64_type": {
- "encoding": {
- "big_endian_bytes": {"bytes_type": {}},
- "ordered_code_bytes": {},
- }
- },
- "float32_type": {},
- "float64_type": {},
- "bool_type": {},
- "timestamp_type": {"encoding": {"unix_micros_int64": {}}},
- "date_type": {},
- "aggregate_type": {
- "input_type": {},
- "state_type": {},
- "sum": {},
- "hllpp_unique_count": {},
- "max_": {},
- "min_": {},
- },
- "struct_type": {},
- "array_type": {"element_type": {}},
- "map_type": {"key_type": {}, "value_type": {}},
- },
- }
- ],
- "encoding": {
- "singleton": {},
- "delimited_bytes": {"delimiter": b"delimiter_blob"},
- "ordered_code_bytes": {},
- },
- },
}
# The version of a generated dependency at test runtime may differ from the version used during generation.
# Delete any fields which are not present in the current runtime dependency
# See https://github.com/googleapis/gapic-generator-python/issues/1748
# Determine if the message type is proto-plus or protobuf
- test_field = bigtable_table_admin.UpdateTableRequest.meta.fields["table"]
+ test_field = bigtable_table_admin.UpdateAuthorizedViewRequest.meta.fields[
+ "authorized_view"
+ ]
def get_message_fields(field):
# Given a field which is a message (composite type), return a list with
@@ -20137,7 +24503,7 @@ def get_message_fields(field):
# For each item in the sample request, create a list of sub fields which are not present at runtime
# Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
- for field, value in request_init["table"].items(): # pragma: NO COVER
+ for field, value in request_init["authorized_view"].items(): # pragma: NO COVER
result = None
is_repeated = False
# For repeated fields
@@ -20167,10 +24533,10 @@ def get_message_fields(field):
subfield = subfield_to_delete.get("subfield")
if subfield:
if field_repeated:
- for i in range(0, len(request_init["table"][field])):
- del request_init["table"][field][i][subfield]
+ for i in range(0, len(request_init["authorized_view"][field])):
+ del request_init["authorized_view"][field][i][subfield]
else:
- del request_init["table"][field][subfield]
+ del request_init["authorized_view"][field][subfield]
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
@@ -20185,21 +24551,21 @@ def get_message_fields(field):
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.update_table(request)
+ response = client.update_authorized_view(request)
# Establish that the response is the type that we expect.
json_return_value = json_format.MessageToJson(return_value)
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_update_table_rest_interceptors(null_interceptor):
+def test_update_authorized_view_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
@@ -20208,17 +24574,18 @@ def test_update_table_rest_interceptors(null_interceptor):
) as transcode, mock.patch.object(
operation.Operation, "_set_result_from_operation"
), mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_update_table"
+ transports.BigtableTableAdminRestInterceptor, "post_update_authorized_view"
) as post, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_update_table_with_metadata"
+ transports.BigtableTableAdminRestInterceptor,
+ "post_update_authorized_view_with_metadata",
) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_update_table"
+ transports.BigtableTableAdminRestInterceptor, "pre_update_authorized_view"
) as pre:
pre.assert_not_called()
post.assert_not_called()
post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.UpdateTableRequest.pb(
- bigtable_table_admin.UpdateTableRequest()
+ pb_message = bigtable_table_admin.UpdateAuthorizedViewRequest.pb(
+ bigtable_table_admin.UpdateAuthorizedViewRequest()
)
transcode.return_value = {
"method": "post",
@@ -20233,7 +24600,7 @@ def test_update_table_rest_interceptors(null_interceptor):
return_value = json_format.MessageToJson(operations_pb2.Operation())
req.return_value.content = return_value
- request = bigtable_table_admin.UpdateTableRequest()
+ request = bigtable_table_admin.UpdateAuthorizedViewRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
@@ -20242,7 +24609,7 @@ def test_update_table_rest_interceptors(null_interceptor):
post.return_value = operations_pb2.Operation()
post_with_metadata.return_value = operations_pb2.Operation(), metadata
- client.update_table(
+ client.update_authorized_view(
request,
metadata=[
("key", "val"),
@@ -20255,123 +24622,16 @@ def test_update_table_rest_interceptors(null_interceptor):
post_with_metadata.assert_called_once()
-def test_delete_table_rest_bad_request(
- request_type=bigtable_table_admin.DeleteTableRequest,
-):
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(), transport="rest"
- )
- # send a request that will satisfy transcoding
- request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
- request = request_type(**request_init)
-
- # Mock the http request call within the method and fake a BadRequest error.
- with mock.patch.object(Session, "request") as req, pytest.raises(
- core_exceptions.BadRequest
- ):
- # Wrap the value into a proper Response obj
- response_value = mock.Mock()
- json_return_value = ""
- response_value.json = mock.Mock(return_value={})
- response_value.status_code = 400
- response_value.request = mock.Mock()
- req.return_value = response_value
- req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.delete_table(request)
-
-
-@pytest.mark.parametrize(
- "request_type",
- [
- bigtable_table_admin.DeleteTableRequest,
- dict,
- ],
-)
-def test_delete_table_rest_call_success(request_type):
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(), transport="rest"
- )
-
- # send a request that will satisfy transcoding
- request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
- request = request_type(**request_init)
-
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(type(client.transport._session), "request") as req:
- # Designate an appropriate value for the returned response.
- return_value = None
-
- # Wrap the value into a proper Response obj
- response_value = mock.Mock()
- response_value.status_code = 200
- json_return_value = ""
- response_value.content = json_return_value.encode("UTF-8")
- req.return_value = response_value
- req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.delete_table(request)
-
- # Establish that the response is the type that we expect.
- assert response is None
-
-
-@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_delete_table_rest_interceptors(null_interceptor):
- transport = transports.BigtableTableAdminRestTransport(
- credentials=ga_credentials.AnonymousCredentials(),
- interceptor=None
- if null_interceptor
- else transports.BigtableTableAdminRestInterceptor(),
- )
- client = BigtableTableAdminClient(transport=transport)
-
- with mock.patch.object(
- type(client.transport._session), "request"
- ) as req, mock.patch.object(
- path_template, "transcode"
- ) as transcode, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_delete_table"
- ) as pre:
- pre.assert_not_called()
- pb_message = bigtable_table_admin.DeleteTableRequest.pb(
- bigtable_table_admin.DeleteTableRequest()
- )
- transcode.return_value = {
- "method": "post",
- "uri": "my_uri",
- "body": pb_message,
- "query_params": pb_message,
- }
-
- req.return_value = mock.Mock()
- req.return_value.status_code = 200
- req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
-
- request = bigtable_table_admin.DeleteTableRequest()
- metadata = [
- ("key", "val"),
- ("cephalopod", "squid"),
- ]
- pre.return_value = request, metadata
-
- client.delete_table(
- request,
- metadata=[
- ("key", "val"),
- ("cephalopod", "squid"),
- ],
- )
-
- pre.assert_called_once()
-
-
-def test_undelete_table_rest_bad_request(
- request_type=bigtable_table_admin.UndeleteTableRequest,
+def test_delete_authorized_view_rest_bad_request(
+ request_type=bigtable_table_admin.DeleteAuthorizedViewRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {
+ "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ }
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -20386,72 +24646,65 @@ def test_undelete_table_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.undelete_table(request)
+ client.delete_authorized_view(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.UndeleteTableRequest,
+ bigtable_table_admin.DeleteAuthorizedViewRequest,
dict,
],
)
-def test_undelete_table_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_delete_authorized_view_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {
+ "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ }
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = operations_pb2.Operation(name="operations/spam")
+ return_value = None
# Wrap the value into a proper Response obj
response_value = mock.Mock()
response_value.status_code = 200
- json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = ""
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.undelete_table(request)
+ response = client.delete_authorized_view(request)
# Establish that the response is the type that we expect.
- json_return_value = json_format.MessageToJson(return_value)
+ assert response is None
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_undelete_table_rest_interceptors(null_interceptor):
+def test_delete_authorized_view_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- operation.Operation, "_set_result_from_operation"
- ), mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_undelete_table"
- ) as post, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor,
- "post_undelete_table_with_metadata",
- ) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_undelete_table"
+ transports.BigtableTableAdminRestInterceptor, "pre_delete_authorized_view"
) as pre:
pre.assert_not_called()
- post.assert_not_called()
- post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.UndeleteTableRequest.pb(
- bigtable_table_admin.UndeleteTableRequest()
+ pb_message = bigtable_table_admin.DeleteAuthorizedViewRequest.pb(
+ bigtable_table_admin.DeleteAuthorizedViewRequest()
)
transcode.return_value = {
"method": "post",
@@ -20463,19 +24716,15 @@ def test_undelete_table_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- return_value = json_format.MessageToJson(operations_pb2.Operation())
- req.return_value.content = return_value
- request = bigtable_table_admin.UndeleteTableRequest()
+ request = bigtable_table_admin.DeleteAuthorizedViewRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
- post.return_value = operations_pb2.Operation()
- post_with_metadata.return_value = operations_pb2.Operation(), metadata
- client.undelete_table(
+ client.delete_authorized_view(
request,
metadata=[
("key", "val"),
@@ -20484,18 +24733,16 @@ def test_undelete_table_rest_interceptors(null_interceptor):
)
pre.assert_called_once()
- post.assert_called_once()
- post_with_metadata.assert_called_once()
-def test_create_authorized_view_rest_bad_request(
- request_type=bigtable_table_admin.CreateAuthorizedViewRequest,
+def test_modify_column_families_rest_bad_request(
+ request_type=bigtable_table_admin.ModifyColumnFamiliesRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -20510,150 +24757,80 @@ def test_create_authorized_view_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.create_authorized_view(request)
+ client.modify_column_families(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.CreateAuthorizedViewRequest,
+ bigtable_table_admin.ModifyColumnFamiliesRequest,
dict,
],
)
-def test_create_authorized_view_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_modify_column_families_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"}
- request_init["authorized_view"] = {
- "name": "name_value",
- "subset_view": {
- "row_prefixes": [b"row_prefixes_blob1", b"row_prefixes_blob2"],
- "family_subsets": {},
- },
- "etag": "etag_value",
- "deletion_protection": True,
- }
- # The version of a generated dependency at test runtime may differ from the version used during generation.
- # Delete any fields which are not present in the current runtime dependency
- # See https://github.com/googleapis/gapic-generator-python/issues/1748
-
- # Determine if the message type is proto-plus or protobuf
- test_field = bigtable_table_admin.CreateAuthorizedViewRequest.meta.fields[
- "authorized_view"
- ]
-
- def get_message_fields(field):
- # Given a field which is a message (composite type), return a list with
- # all the fields of the message.
- # If the field is not a composite type, return an empty list.
- message_fields = []
-
- if hasattr(field, "message") and field.message:
- is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
-
- if is_field_type_proto_plus_type:
- message_fields = field.message.meta.fields.values()
- # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
- else: # pragma: NO COVER
- message_fields = field.message.DESCRIPTOR.fields
- return message_fields
-
- runtime_nested_fields = [
- (field.name, nested_field.name)
- for field in get_message_fields(test_field)
- for nested_field in get_message_fields(field)
- ]
-
- subfields_not_in_runtime = []
-
- # For each item in the sample request, create a list of sub fields which are not present at runtime
- # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
- for field, value in request_init["authorized_view"].items(): # pragma: NO COVER
- result = None
- is_repeated = False
- # For repeated fields
- if isinstance(value, list) and len(value):
- is_repeated = True
- result = value[0]
- # For fields where the type is another message
- if isinstance(value, dict):
- result = value
-
- if result and hasattr(result, "keys"):
- for subfield in result.keys():
- if (field, subfield) not in runtime_nested_fields:
- subfields_not_in_runtime.append(
- {
- "field": field,
- "subfield": subfield,
- "is_repeated": is_repeated,
- }
- )
-
- # Remove fields from the sample request which are not present in the runtime version of the dependency
- # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
- for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
- field = subfield_to_delete.get("field")
- field_repeated = subfield_to_delete.get("is_repeated")
- subfield = subfield_to_delete.get("subfield")
- if subfield:
- if field_repeated:
- for i in range(0, len(request_init["authorized_view"][field])):
- del request_init["authorized_view"][field][i][subfield]
- else:
- del request_init["authorized_view"][field][subfield]
+ request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = operations_pb2.Operation(name="operations/spam")
+ return_value = table.Table(
+ name="name_value",
+ granularity=table.Table.TimestampGranularity.MILLIS,
+ deletion_protection=True,
+ )
# Wrap the value into a proper Response obj
response_value = mock.Mock()
response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = table.Table.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.create_authorized_view(request)
+ response = client.modify_column_families(request)
# Establish that the response is the type that we expect.
- json_return_value = json_format.MessageToJson(return_value)
+ assert isinstance(response, table.Table)
+ assert response.name == "name_value"
+ assert response.granularity == table.Table.TimestampGranularity.MILLIS
+ assert response.deletion_protection is True
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_create_authorized_view_rest_interceptors(null_interceptor):
+def test_modify_column_families_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- operation.Operation, "_set_result_from_operation"
- ), mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_create_authorized_view"
+ transports.BigtableTableAdminRestInterceptor, "post_modify_column_families"
) as post, mock.patch.object(
transports.BigtableTableAdminRestInterceptor,
- "post_create_authorized_view_with_metadata",
+ "post_modify_column_families_with_metadata",
) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_create_authorized_view"
+ transports.BigtableTableAdminRestInterceptor, "pre_modify_column_families"
) as pre:
pre.assert_not_called()
post.assert_not_called()
post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.CreateAuthorizedViewRequest.pb(
- bigtable_table_admin.CreateAuthorizedViewRequest()
+ pb_message = bigtable_table_admin.ModifyColumnFamiliesRequest.pb(
+ bigtable_table_admin.ModifyColumnFamiliesRequest()
)
transcode.return_value = {
"method": "post",
@@ -20665,19 +24842,19 @@ def test_create_authorized_view_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- return_value = json_format.MessageToJson(operations_pb2.Operation())
+ return_value = table.Table.to_json(table.Table())
req.return_value.content = return_value
- request = bigtable_table_admin.CreateAuthorizedViewRequest()
+ request = bigtable_table_admin.ModifyColumnFamiliesRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
- post.return_value = operations_pb2.Operation()
- post_with_metadata.return_value = operations_pb2.Operation(), metadata
+ post.return_value = table.Table()
+ post_with_metadata.return_value = table.Table(), metadata
- client.create_authorized_view(
+ client.modify_column_families(
request,
metadata=[
("key", "val"),
@@ -20690,14 +24867,14 @@ def test_create_authorized_view_rest_interceptors(null_interceptor):
post_with_metadata.assert_called_once()
-def test_list_authorized_views_rest_bad_request(
- request_type=bigtable_table_admin.ListAuthorizedViewsRequest,
+def test_drop_row_range_rest_bad_request(
+ request_type=bigtable_table_admin.DropRowRangeRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -20712,76 +24889,63 @@ def test_list_authorized_views_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.list_authorized_views(request)
+ client.drop_row_range(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.ListAuthorizedViewsRequest,
+ bigtable_table_admin.DropRowRangeRequest,
dict,
],
)
-def test_list_authorized_views_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_drop_row_range_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = bigtable_table_admin.ListAuthorizedViewsResponse(
- next_page_token="next_page_token_value",
- )
+ return_value = None
# Wrap the value into a proper Response obj
response_value = mock.Mock()
response_value.status_code = 200
-
- # Convert return value to protobuf type
- return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = ""
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.list_authorized_views(request)
+ response = client.drop_row_range(request)
# Establish that the response is the type that we expect.
- assert isinstance(response, pagers.ListAuthorizedViewsPager)
- assert response.next_page_token == "next_page_token_value"
+ assert response is None
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_list_authorized_views_rest_interceptors(null_interceptor):
+def test_drop_row_range_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_list_authorized_views"
- ) as post, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor,
- "post_list_authorized_views_with_metadata",
- ) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_list_authorized_views"
+ transports.BigtableTableAdminRestInterceptor, "pre_drop_row_range"
) as pre:
pre.assert_not_called()
- post.assert_not_called()
- post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.ListAuthorizedViewsRequest.pb(
- bigtable_table_admin.ListAuthorizedViewsRequest()
+ pb_message = bigtable_table_admin.DropRowRangeRequest.pb(
+ bigtable_table_admin.DropRowRangeRequest()
)
transcode.return_value = {
"method": "post",
@@ -20793,24 +24957,15 @@ def test_list_authorized_views_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- return_value = bigtable_table_admin.ListAuthorizedViewsResponse.to_json(
- bigtable_table_admin.ListAuthorizedViewsResponse()
- )
- req.return_value.content = return_value
- request = bigtable_table_admin.ListAuthorizedViewsRequest()
+ request = bigtable_table_admin.DropRowRangeRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
- post.return_value = bigtable_table_admin.ListAuthorizedViewsResponse()
- post_with_metadata.return_value = (
- bigtable_table_admin.ListAuthorizedViewsResponse(),
- metadata,
- )
- client.list_authorized_views(
+ client.drop_row_range(
request,
metadata=[
("key", "val"),
@@ -20819,20 +24974,16 @@ def test_list_authorized_views_rest_interceptors(null_interceptor):
)
pre.assert_called_once()
- post.assert_called_once()
- post_with_metadata.assert_called_once()
-def test_get_authorized_view_rest_bad_request(
- request_type=bigtable_table_admin.GetAuthorizedViewRequest,
+def test_generate_consistency_token_rest_bad_request(
+ request_type=bigtable_table_admin.GenerateConsistencyTokenRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {
- "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
- }
+ request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -20847,34 +24998,30 @@ def test_get_authorized_view_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.get_authorized_view(request)
+ client.generate_consistency_token(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.GetAuthorizedViewRequest,
+ bigtable_table_admin.GenerateConsistencyTokenRequest,
dict,
],
)
-def test_get_authorized_view_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_generate_consistency_token_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {
- "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
- }
+ request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = table.AuthorizedView(
- name="name_value",
- etag="etag_value",
- deletion_protection=True,
+ return_value = bigtable_table_admin.GenerateConsistencyTokenResponse(
+ consistency_token="consistency_token_value",
)
# Wrap the value into a proper Response obj
@@ -20882,47 +25029,47 @@ def test_get_authorized_view_rest_call_success(request_type):
response_value.status_code = 200
# Convert return value to protobuf type
- return_value = table.AuthorizedView.pb(return_value)
+ return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(
+ return_value
+ )
json_return_value = json_format.MessageToJson(return_value)
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.get_authorized_view(request)
+ response = client.generate_consistency_token(request)
# Establish that the response is the type that we expect.
- assert isinstance(response, table.AuthorizedView)
- assert response.name == "name_value"
- assert response.etag == "etag_value"
- assert response.deletion_protection is True
+ assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse)
+ assert response.consistency_token == "consistency_token_value"
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_get_authorized_view_rest_interceptors(null_interceptor):
+def test_generate_consistency_token_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_get_authorized_view"
+ transports.BigtableTableAdminRestInterceptor, "post_generate_consistency_token"
) as post, mock.patch.object(
transports.BigtableTableAdminRestInterceptor,
- "post_get_authorized_view_with_metadata",
+ "post_generate_consistency_token_with_metadata",
) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_get_authorized_view"
+ transports.BigtableTableAdminRestInterceptor, "pre_generate_consistency_token"
) as pre:
pre.assert_not_called()
post.assert_not_called()
post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.GetAuthorizedViewRequest.pb(
- bigtable_table_admin.GetAuthorizedViewRequest()
+ pb_message = bigtable_table_admin.GenerateConsistencyTokenRequest.pb(
+ bigtable_table_admin.GenerateConsistencyTokenRequest()
)
transcode.return_value = {
"method": "post",
@@ -20934,19 +25081,24 @@ def test_get_authorized_view_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- return_value = table.AuthorizedView.to_json(table.AuthorizedView())
+ return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.to_json(
+ bigtable_table_admin.GenerateConsistencyTokenResponse()
+ )
req.return_value.content = return_value
- request = bigtable_table_admin.GetAuthorizedViewRequest()
+ request = bigtable_table_admin.GenerateConsistencyTokenRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
- post.return_value = table.AuthorizedView()
- post_with_metadata.return_value = table.AuthorizedView(), metadata
+ post.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse()
+ post_with_metadata.return_value = (
+ bigtable_table_admin.GenerateConsistencyTokenResponse(),
+ metadata,
+ )
- client.get_authorized_view(
+ client.generate_consistency_token(
request,
metadata=[
("key", "val"),
@@ -20959,18 +25111,14 @@ def test_get_authorized_view_rest_interceptors(null_interceptor):
post_with_metadata.assert_called_once()
-def test_update_authorized_view_rest_bad_request(
- request_type=bigtable_table_admin.UpdateAuthorizedViewRequest,
+def test_check_consistency_rest_bad_request(
+ request_type=bigtable_table_admin.CheckConsistencyRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {
- "authorized_view": {
- "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
- }
- }
+ request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -20985,154 +25133,76 @@ def test_update_authorized_view_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.update_authorized_view(request)
+ client.check_consistency(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.UpdateAuthorizedViewRequest,
+ bigtable_table_admin.CheckConsistencyRequest,
dict,
],
)
-def test_update_authorized_view_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_check_consistency_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {
- "authorized_view": {
- "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
- }
- }
- request_init["authorized_view"] = {
- "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4",
- "subset_view": {
- "row_prefixes": [b"row_prefixes_blob1", b"row_prefixes_blob2"],
- "family_subsets": {},
- },
- "etag": "etag_value",
- "deletion_protection": True,
- }
- # The version of a generated dependency at test runtime may differ from the version used during generation.
- # Delete any fields which are not present in the current runtime dependency
- # See https://github.com/googleapis/gapic-generator-python/issues/1748
-
- # Determine if the message type is proto-plus or protobuf
- test_field = bigtable_table_admin.UpdateAuthorizedViewRequest.meta.fields[
- "authorized_view"
- ]
-
- def get_message_fields(field):
- # Given a field which is a message (composite type), return a list with
- # all the fields of the message.
- # If the field is not a composite type, return an empty list.
- message_fields = []
-
- if hasattr(field, "message") and field.message:
- is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
-
- if is_field_type_proto_plus_type:
- message_fields = field.message.meta.fields.values()
- # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
- else: # pragma: NO COVER
- message_fields = field.message.DESCRIPTOR.fields
- return message_fields
-
- runtime_nested_fields = [
- (field.name, nested_field.name)
- for field in get_message_fields(test_field)
- for nested_field in get_message_fields(field)
- ]
-
- subfields_not_in_runtime = []
-
- # For each item in the sample request, create a list of sub fields which are not present at runtime
- # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
- for field, value in request_init["authorized_view"].items(): # pragma: NO COVER
- result = None
- is_repeated = False
- # For repeated fields
- if isinstance(value, list) and len(value):
- is_repeated = True
- result = value[0]
- # For fields where the type is another message
- if isinstance(value, dict):
- result = value
-
- if result and hasattr(result, "keys"):
- for subfield in result.keys():
- if (field, subfield) not in runtime_nested_fields:
- subfields_not_in_runtime.append(
- {
- "field": field,
- "subfield": subfield,
- "is_repeated": is_repeated,
- }
- )
-
- # Remove fields from the sample request which are not present in the runtime version of the dependency
- # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
- for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
- field = subfield_to_delete.get("field")
- field_repeated = subfield_to_delete.get("is_repeated")
- subfield = subfield_to_delete.get("subfield")
- if subfield:
- if field_repeated:
- for i in range(0, len(request_init["authorized_view"][field])):
- del request_init["authorized_view"][field][i][subfield]
- else:
- del request_init["authorized_view"][field][subfield]
+ request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = operations_pb2.Operation(name="operations/spam")
+ return_value = bigtable_table_admin.CheckConsistencyResponse(
+ consistent=True,
+ )
# Wrap the value into a proper Response obj
response_value = mock.Mock()
response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.update_authorized_view(request)
+ response = client.check_consistency(request)
# Establish that the response is the type that we expect.
- json_return_value = json_format.MessageToJson(return_value)
+ assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse)
+ assert response.consistent is True
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_update_authorized_view_rest_interceptors(null_interceptor):
+def test_check_consistency_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- operation.Operation, "_set_result_from_operation"
- ), mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_update_authorized_view"
+ transports.BigtableTableAdminRestInterceptor, "post_check_consistency"
) as post, mock.patch.object(
transports.BigtableTableAdminRestInterceptor,
- "post_update_authorized_view_with_metadata",
+ "post_check_consistency_with_metadata",
) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_update_authorized_view"
+ transports.BigtableTableAdminRestInterceptor, "pre_check_consistency"
) as pre:
pre.assert_not_called()
post.assert_not_called()
post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.UpdateAuthorizedViewRequest.pb(
- bigtable_table_admin.UpdateAuthorizedViewRequest()
+ pb_message = bigtable_table_admin.CheckConsistencyRequest.pb(
+ bigtable_table_admin.CheckConsistencyRequest()
)
transcode.return_value = {
"method": "post",
@@ -21144,19 +25214,24 @@ def test_update_authorized_view_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- return_value = json_format.MessageToJson(operations_pb2.Operation())
+ return_value = bigtable_table_admin.CheckConsistencyResponse.to_json(
+ bigtable_table_admin.CheckConsistencyResponse()
+ )
req.return_value.content = return_value
- request = bigtable_table_admin.UpdateAuthorizedViewRequest()
+ request = bigtable_table_admin.CheckConsistencyRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
- post.return_value = operations_pb2.Operation()
- post_with_metadata.return_value = operations_pb2.Operation(), metadata
+ post.return_value = bigtable_table_admin.CheckConsistencyResponse()
+ post_with_metadata.return_value = (
+ bigtable_table_admin.CheckConsistencyResponse(),
+ metadata,
+ )
- client.update_authorized_view(
+ client.check_consistency(
request,
metadata=[
("key", "val"),
@@ -21169,16 +25244,14 @@ def test_update_authorized_view_rest_interceptors(null_interceptor):
post_with_metadata.assert_called_once()
-def test_delete_authorized_view_rest_bad_request(
- request_type=bigtable_table_admin.DeleteAuthorizedViewRequest,
+def test_snapshot_table_rest_bad_request(
+ request_type=bigtable_table_admin.SnapshotTableRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {
- "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
- }
+ request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -21193,65 +25266,72 @@ def test_delete_authorized_view_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.delete_authorized_view(request)
+ client.snapshot_table(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.DeleteAuthorizedViewRequest,
+ bigtable_table_admin.SnapshotTableRequest,
dict,
],
)
-def test_delete_authorized_view_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_snapshot_table_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {
- "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
- }
+ request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = None
+ return_value = operations_pb2.Operation(name="operations/spam")
# Wrap the value into a proper Response obj
response_value = mock.Mock()
response_value.status_code = 200
- json_return_value = ""
+ json_return_value = json_format.MessageToJson(return_value)
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.delete_authorized_view(request)
+ response = client.snapshot_table(request)
# Establish that the response is the type that we expect.
- assert response is None
+ json_return_value = json_format.MessageToJson(return_value)
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_delete_authorized_view_rest_interceptors(null_interceptor):
+def test_snapshot_table_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_delete_authorized_view"
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "post_snapshot_table"
+ ) as post, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor,
+ "post_snapshot_table_with_metadata",
+ ) as post_with_metadata, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "pre_snapshot_table"
) as pre:
pre.assert_not_called()
- pb_message = bigtable_table_admin.DeleteAuthorizedViewRequest.pb(
- bigtable_table_admin.DeleteAuthorizedViewRequest()
+ post.assert_not_called()
+ post_with_metadata.assert_not_called()
+ pb_message = bigtable_table_admin.SnapshotTableRequest.pb(
+ bigtable_table_admin.SnapshotTableRequest()
)
transcode.return_value = {
"method": "post",
@@ -21263,15 +25343,19 @@ def test_delete_authorized_view_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
+ req.return_value.content = return_value
- request = bigtable_table_admin.DeleteAuthorizedViewRequest()
+ request = bigtable_table_admin.SnapshotTableRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+ post_with_metadata.return_value = operations_pb2.Operation(), metadata
- client.delete_authorized_view(
+ client.snapshot_table(
request,
metadata=[
("key", "val"),
@@ -21280,16 +25364,20 @@ def test_delete_authorized_view_rest_interceptors(null_interceptor):
)
pre.assert_called_once()
+ post.assert_called_once()
+ post_with_metadata.assert_called_once()
-def test_modify_column_families_rest_bad_request(
- request_type=bigtable_table_admin.ModifyColumnFamiliesRequest,
+def test_get_snapshot_rest_bad_request(
+ request_type=bigtable_table_admin.GetSnapshotRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {
+ "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4"
+ }
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -21304,32 +25392,35 @@ def test_modify_column_families_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.modify_column_families(request)
+ client.get_snapshot(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.ModifyColumnFamiliesRequest,
+ bigtable_table_admin.GetSnapshotRequest,
dict,
],
)
-def test_modify_column_families_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_get_snapshot_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {
+ "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4"
+ }
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = table.Table(
+ return_value = table.Snapshot(
name="name_value",
- granularity=table.Table.TimestampGranularity.MILLIS,
- deletion_protection=True,
+ data_size_bytes=1594,
+ state=table.Snapshot.State.READY,
+ description="description_value",
)
# Wrap the value into a proper Response obj
@@ -21337,47 +25428,47 @@ def test_modify_column_families_rest_call_success(request_type):
response_value.status_code = 200
# Convert return value to protobuf type
- return_value = table.Table.pb(return_value)
+ return_value = table.Snapshot.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.modify_column_families(request)
+ response = client.get_snapshot(request)
# Establish that the response is the type that we expect.
- assert isinstance(response, table.Table)
+ assert isinstance(response, table.Snapshot)
assert response.name == "name_value"
- assert response.granularity == table.Table.TimestampGranularity.MILLIS
- assert response.deletion_protection is True
+ assert response.data_size_bytes == 1594
+ assert response.state == table.Snapshot.State.READY
+ assert response.description == "description_value"
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_modify_column_families_rest_interceptors(null_interceptor):
+def test_get_snapshot_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_modify_column_families"
+ transports.BigtableTableAdminRestInterceptor, "post_get_snapshot"
) as post, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor,
- "post_modify_column_families_with_metadata",
+ transports.BigtableTableAdminRestInterceptor, "post_get_snapshot_with_metadata"
) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_modify_column_families"
+ transports.BigtableTableAdminRestInterceptor, "pre_get_snapshot"
) as pre:
pre.assert_not_called()
post.assert_not_called()
post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.ModifyColumnFamiliesRequest.pb(
- bigtable_table_admin.ModifyColumnFamiliesRequest()
+ pb_message = bigtable_table_admin.GetSnapshotRequest.pb(
+ bigtable_table_admin.GetSnapshotRequest()
)
transcode.return_value = {
"method": "post",
@@ -21389,19 +25480,19 @@ def test_modify_column_families_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- return_value = table.Table.to_json(table.Table())
+ return_value = table.Snapshot.to_json(table.Snapshot())
req.return_value.content = return_value
- request = bigtable_table_admin.ModifyColumnFamiliesRequest()
+ request = bigtable_table_admin.GetSnapshotRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
- post.return_value = table.Table()
- post_with_metadata.return_value = table.Table(), metadata
+ post.return_value = table.Snapshot()
+ post_with_metadata.return_value = table.Snapshot(), metadata
- client.modify_column_families(
+ client.get_snapshot(
request,
metadata=[
("key", "val"),
@@ -21414,14 +25505,14 @@ def test_modify_column_families_rest_interceptors(null_interceptor):
post_with_metadata.assert_called_once()
-def test_drop_row_range_rest_bad_request(
- request_type=bigtable_table_admin.DropRowRangeRequest,
+def test_list_snapshots_rest_bad_request(
+ request_type=bigtable_table_admin.ListSnapshotsRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -21436,63 +25527,76 @@ def test_drop_row_range_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.drop_row_range(request)
+ client.list_snapshots(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.DropRowRangeRequest,
+ bigtable_table_admin.ListSnapshotsRequest,
dict,
],
)
-def test_drop_row_range_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_list_snapshots_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = None
+ return_value = bigtable_table_admin.ListSnapshotsResponse(
+ next_page_token="next_page_token_value",
+ )
# Wrap the value into a proper Response obj
response_value = mock.Mock()
response_value.status_code = 200
- json_return_value = ""
+
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.drop_row_range(request)
+ response = client.list_snapshots(request)
# Establish that the response is the type that we expect.
- assert response is None
+ assert isinstance(response, pagers.ListSnapshotsPager)
+ assert response.next_page_token == "next_page_token_value"
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_drop_row_range_rest_interceptors(null_interceptor):
+def test_list_snapshots_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_drop_row_range"
+ transports.BigtableTableAdminRestInterceptor, "post_list_snapshots"
+ ) as post, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor,
+ "post_list_snapshots_with_metadata",
+ ) as post_with_metadata, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "pre_list_snapshots"
) as pre:
pre.assert_not_called()
- pb_message = bigtable_table_admin.DropRowRangeRequest.pb(
- bigtable_table_admin.DropRowRangeRequest()
+ post.assert_not_called()
+ post_with_metadata.assert_not_called()
+ pb_message = bigtable_table_admin.ListSnapshotsRequest.pb(
+ bigtable_table_admin.ListSnapshotsRequest()
)
transcode.return_value = {
"method": "post",
@@ -21504,15 +25608,24 @@ def test_drop_row_range_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = bigtable_table_admin.ListSnapshotsResponse.to_json(
+ bigtable_table_admin.ListSnapshotsResponse()
+ )
+ req.return_value.content = return_value
- request = bigtable_table_admin.DropRowRangeRequest()
+ request = bigtable_table_admin.ListSnapshotsRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
+ post.return_value = bigtable_table_admin.ListSnapshotsResponse()
+ post_with_metadata.return_value = (
+ bigtable_table_admin.ListSnapshotsResponse(),
+ metadata,
+ )
- client.drop_row_range(
+ client.list_snapshots(
request,
metadata=[
("key", "val"),
@@ -21521,16 +25634,20 @@ def test_drop_row_range_rest_interceptors(null_interceptor):
)
pre.assert_called_once()
+ post.assert_called_once()
+ post_with_metadata.assert_called_once()
-def test_generate_consistency_token_rest_bad_request(
- request_type=bigtable_table_admin.GenerateConsistencyTokenRequest,
+def test_delete_snapshot_rest_bad_request(
+ request_type=bigtable_table_admin.DeleteSnapshotRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {
+ "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4"
+ }
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -21545,78 +25662,65 @@ def test_generate_consistency_token_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.generate_consistency_token(request)
+ client.delete_snapshot(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.GenerateConsistencyTokenRequest,
+ bigtable_table_admin.DeleteSnapshotRequest,
dict,
],
)
-def test_generate_consistency_token_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_delete_snapshot_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {
+ "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4"
+ }
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = bigtable_table_admin.GenerateConsistencyTokenResponse(
- consistency_token="consistency_token_value",
- )
+ return_value = None
# Wrap the value into a proper Response obj
response_value = mock.Mock()
response_value.status_code = 200
-
- # Convert return value to protobuf type
- return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(
- return_value
- )
- json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = ""
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.generate_consistency_token(request)
+ response = client.delete_snapshot(request)
# Establish that the response is the type that we expect.
- assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse)
- assert response.consistency_token == "consistency_token_value"
+ assert response is None
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_generate_consistency_token_rest_interceptors(null_interceptor):
+def test_delete_snapshot_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_generate_consistency_token"
- ) as post, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor,
- "post_generate_consistency_token_with_metadata",
- ) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_generate_consistency_token"
+ transports.BigtableTableAdminRestInterceptor, "pre_delete_snapshot"
) as pre:
pre.assert_not_called()
- post.assert_not_called()
- post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.GenerateConsistencyTokenRequest.pb(
- bigtable_table_admin.GenerateConsistencyTokenRequest()
+ pb_message = bigtable_table_admin.DeleteSnapshotRequest.pb(
+ bigtable_table_admin.DeleteSnapshotRequest()
)
transcode.return_value = {
"method": "post",
@@ -21628,24 +25732,15 @@ def test_generate_consistency_token_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.to_json(
- bigtable_table_admin.GenerateConsistencyTokenResponse()
- )
- req.return_value.content = return_value
- request = bigtable_table_admin.GenerateConsistencyTokenRequest()
+ request = bigtable_table_admin.DeleteSnapshotRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
- post.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse()
- post_with_metadata.return_value = (
- bigtable_table_admin.GenerateConsistencyTokenResponse(),
- metadata,
- )
- client.generate_consistency_token(
+ client.delete_snapshot(
request,
metadata=[
("key", "val"),
@@ -21654,18 +25749,16 @@ def test_generate_consistency_token_rest_interceptors(null_interceptor):
)
pre.assert_called_once()
- post.assert_called_once()
- post_with_metadata.assert_called_once()
-def test_check_consistency_rest_bad_request(
- request_type=bigtable_table_admin.CheckConsistencyRequest,
+def test_create_backup_rest_bad_request(
+ request_type=bigtable_table_admin.CreateBackupRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -21680,76 +25773,164 @@ def test_check_consistency_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.check_consistency(request)
+ client.create_backup(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.CheckConsistencyRequest,
+ bigtable_table_admin.CreateBackupRequest,
dict,
],
)
-def test_check_consistency_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_create_backup_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
- # send a request that will satisfy transcoding
- request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"}
+ request_init["backup"] = {
+ "name": "name_value",
+ "source_table": "source_table_value",
+ "source_backup": "source_backup_value",
+ "expire_time": {"seconds": 751, "nanos": 543},
+ "start_time": {},
+ "end_time": {},
+ "size_bytes": 1089,
+ "state": 1,
+ "encryption_info": {
+ "encryption_type": 1,
+ "encryption_status": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "kms_key_version": "kms_key_version_value",
+ },
+ "backup_type": 1,
+ "hot_to_standard_time": {},
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = bigtable_table_admin.CreateBackupRequest.meta.fields["backup"]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["backup"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["backup"][field])):
+ del request_init["backup"][field][i][subfield]
+ else:
+ del request_init["backup"][field][subfield]
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = bigtable_table_admin.CheckConsistencyResponse(
- consistent=True,
- )
+ return_value = operations_pb2.Operation(name="operations/spam")
# Wrap the value into a proper Response obj
response_value = mock.Mock()
response_value.status_code = 200
-
- # Convert return value to protobuf type
- return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.check_consistency(request)
+ response = client.create_backup(request)
# Establish that the response is the type that we expect.
- assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse)
- assert response.consistent is True
+ json_return_value = json_format.MessageToJson(return_value)
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_check_consistency_rest_interceptors(null_interceptor):
+def test_create_backup_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_check_consistency"
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "post_create_backup"
) as post, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor,
- "post_check_consistency_with_metadata",
+ transports.BigtableTableAdminRestInterceptor, "post_create_backup_with_metadata"
) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_check_consistency"
+ transports.BigtableTableAdminRestInterceptor, "pre_create_backup"
) as pre:
pre.assert_not_called()
post.assert_not_called()
post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.CheckConsistencyRequest.pb(
- bigtable_table_admin.CheckConsistencyRequest()
+ pb_message = bigtable_table_admin.CreateBackupRequest.pb(
+ bigtable_table_admin.CreateBackupRequest()
)
transcode.return_value = {
"method": "post",
@@ -21761,24 +25942,19 @@ def test_check_consistency_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- return_value = bigtable_table_admin.CheckConsistencyResponse.to_json(
- bigtable_table_admin.CheckConsistencyResponse()
- )
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
req.return_value.content = return_value
- request = bigtable_table_admin.CheckConsistencyRequest()
+ request = bigtable_table_admin.CreateBackupRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
- post.return_value = bigtable_table_admin.CheckConsistencyResponse()
- post_with_metadata.return_value = (
- bigtable_table_admin.CheckConsistencyResponse(),
- metadata,
- )
+ post.return_value = operations_pb2.Operation()
+ post_with_metadata.return_value = operations_pb2.Operation(), metadata
- client.check_consistency(
+ client.create_backup(
request,
metadata=[
("key", "val"),
@@ -21791,14 +25967,16 @@ def test_check_consistency_rest_interceptors(null_interceptor):
post_with_metadata.assert_called_once()
-def test_snapshot_table_rest_bad_request(
- request_type=bigtable_table_admin.SnapshotTableRequest,
+def test_get_backup_rest_bad_request(
+ request_type=bigtable_table_admin.GetBackupRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {
+ "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4"
+ }
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -21813,72 +25991,87 @@ def test_snapshot_table_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.snapshot_table(request)
+ client.get_backup(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.SnapshotTableRequest,
+ bigtable_table_admin.GetBackupRequest,
dict,
],
)
-def test_snapshot_table_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_get_backup_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {
+ "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4"
+ }
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = operations_pb2.Operation(name="operations/spam")
+ return_value = table.Backup(
+ name="name_value",
+ source_table="source_table_value",
+ source_backup="source_backup_value",
+ size_bytes=1089,
+ state=table.Backup.State.CREATING,
+ backup_type=table.Backup.BackupType.STANDARD,
+ )
# Wrap the value into a proper Response obj
response_value = mock.Mock()
response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = table.Backup.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.snapshot_table(request)
+ response = client.get_backup(request)
# Establish that the response is the type that we expect.
- json_return_value = json_format.MessageToJson(return_value)
+ assert isinstance(response, table.Backup)
+ assert response.name == "name_value"
+ assert response.source_table == "source_table_value"
+ assert response.source_backup == "source_backup_value"
+ assert response.size_bytes == 1089
+ assert response.state == table.Backup.State.CREATING
+ assert response.backup_type == table.Backup.BackupType.STANDARD
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_snapshot_table_rest_interceptors(null_interceptor):
+def test_get_backup_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- operation.Operation, "_set_result_from_operation"
- ), mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_snapshot_table"
+ transports.BigtableTableAdminRestInterceptor, "post_get_backup"
) as post, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor,
- "post_snapshot_table_with_metadata",
+ transports.BigtableTableAdminRestInterceptor, "post_get_backup_with_metadata"
) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_snapshot_table"
+ transports.BigtableTableAdminRestInterceptor, "pre_get_backup"
) as pre:
pre.assert_not_called()
post.assert_not_called()
post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.SnapshotTableRequest.pb(
- bigtable_table_admin.SnapshotTableRequest()
+ pb_message = bigtable_table_admin.GetBackupRequest.pb(
+ bigtable_table_admin.GetBackupRequest()
)
transcode.return_value = {
"method": "post",
@@ -21890,19 +26083,19 @@ def test_snapshot_table_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- return_value = json_format.MessageToJson(operations_pb2.Operation())
+ return_value = table.Backup.to_json(table.Backup())
req.return_value.content = return_value
- request = bigtable_table_admin.SnapshotTableRequest()
+ request = bigtable_table_admin.GetBackupRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
- post.return_value = operations_pb2.Operation()
- post_with_metadata.return_value = operations_pb2.Operation(), metadata
+ post.return_value = table.Backup()
+ post_with_metadata.return_value = table.Backup(), metadata
- client.snapshot_table(
+ client.get_backup(
request,
metadata=[
("key", "val"),
@@ -21915,15 +26108,17 @@ def test_snapshot_table_rest_interceptors(null_interceptor):
post_with_metadata.assert_called_once()
-def test_get_snapshot_rest_bad_request(
- request_type=bigtable_table_admin.GetSnapshotRequest,
+def test_update_backup_rest_bad_request(
+ request_type=bigtable_table_admin.UpdateBackupRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
request_init = {
- "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4"
+ "backup": {
+ "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4"
+ }
}
request = request_type(**request_init)
@@ -21939,35 +26134,132 @@ def test_get_snapshot_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.get_snapshot(request)
+ client.update_backup(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.GetSnapshotRequest,
+ bigtable_table_admin.UpdateBackupRequest,
dict,
],
)
-def test_get_snapshot_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_update_backup_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
request_init = {
- "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4"
+ "backup": {
+ "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4"
+ }
+ }
+ request_init["backup"] = {
+ "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4",
+ "source_table": "source_table_value",
+ "source_backup": "source_backup_value",
+ "expire_time": {"seconds": 751, "nanos": 543},
+ "start_time": {},
+ "end_time": {},
+ "size_bytes": 1089,
+ "state": 1,
+ "encryption_info": {
+ "encryption_type": 1,
+ "encryption_status": {
+ "code": 411,
+ "message": "message_value",
+ "details": [
+ {
+ "type_url": "type.googleapis.com/google.protobuf.Duration",
+ "value": b"\x08\x0c\x10\xdb\x07",
+ }
+ ],
+ },
+ "kms_key_version": "kms_key_version_value",
+ },
+ "backup_type": 1,
+ "hot_to_standard_time": {},
}
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = bigtable_table_admin.UpdateBackupRequest.meta.fields["backup"]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["backup"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["backup"][field])):
+ del request_init["backup"][field][i][subfield]
+ else:
+ del request_init["backup"][field][subfield]
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = table.Snapshot(
+ return_value = table.Backup(
name="name_value",
- data_size_bytes=1594,
- state=table.Snapshot.State.READY,
- description="description_value",
+ source_table="source_table_value",
+ source_backup="source_backup_value",
+ size_bytes=1089,
+ state=table.Backup.State.CREATING,
+ backup_type=table.Backup.BackupType.STANDARD,
)
# Wrap the value into a proper Response obj
@@ -21975,47 +26267,49 @@ def test_get_snapshot_rest_call_success(request_type):
response_value.status_code = 200
# Convert return value to protobuf type
- return_value = table.Snapshot.pb(return_value)
+ return_value = table.Backup.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.get_snapshot(request)
+ response = client.update_backup(request)
# Establish that the response is the type that we expect.
- assert isinstance(response, table.Snapshot)
+ assert isinstance(response, table.Backup)
assert response.name == "name_value"
- assert response.data_size_bytes == 1594
- assert response.state == table.Snapshot.State.READY
- assert response.description == "description_value"
+ assert response.source_table == "source_table_value"
+ assert response.source_backup == "source_backup_value"
+ assert response.size_bytes == 1089
+ assert response.state == table.Backup.State.CREATING
+ assert response.backup_type == table.Backup.BackupType.STANDARD
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_get_snapshot_rest_interceptors(null_interceptor):
+def test_update_backup_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_get_snapshot"
+ transports.BigtableTableAdminRestInterceptor, "post_update_backup"
) as post, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_get_snapshot_with_metadata"
+ transports.BigtableTableAdminRestInterceptor, "post_update_backup_with_metadata"
) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_get_snapshot"
+ transports.BigtableTableAdminRestInterceptor, "pre_update_backup"
) as pre:
pre.assert_not_called()
post.assert_not_called()
post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.GetSnapshotRequest.pb(
- bigtable_table_admin.GetSnapshotRequest()
+ pb_message = bigtable_table_admin.UpdateBackupRequest.pb(
+ bigtable_table_admin.UpdateBackupRequest()
)
transcode.return_value = {
"method": "post",
@@ -22027,19 +26321,19 @@ def test_get_snapshot_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- return_value = table.Snapshot.to_json(table.Snapshot())
+ return_value = table.Backup.to_json(table.Backup())
req.return_value.content = return_value
- request = bigtable_table_admin.GetSnapshotRequest()
+ request = bigtable_table_admin.UpdateBackupRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
- post.return_value = table.Snapshot()
- post_with_metadata.return_value = table.Snapshot(), metadata
+ post.return_value = table.Backup()
+ post_with_metadata.return_value = table.Backup(), metadata
- client.get_snapshot(
+ client.update_backup(
request,
metadata=[
("key", "val"),
@@ -22052,14 +26346,16 @@ def test_get_snapshot_rest_interceptors(null_interceptor):
post_with_metadata.assert_called_once()
-def test_list_snapshots_rest_bad_request(
- request_type=bigtable_table_admin.ListSnapshotsRequest,
+def test_delete_backup_rest_bad_request(
+ request_type=bigtable_table_admin.DeleteBackupRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"}
+ request_init = {
+ "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4"
+ }
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -22074,76 +26370,65 @@ def test_list_snapshots_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.list_snapshots(request)
+ client.delete_backup(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.ListSnapshotsRequest,
+ bigtable_table_admin.DeleteBackupRequest,
dict,
],
)
-def test_list_snapshots_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_delete_backup_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"}
+ request_init = {
+ "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4"
+ }
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = bigtable_table_admin.ListSnapshotsResponse(
- next_page_token="next_page_token_value",
- )
+ return_value = None
# Wrap the value into a proper Response obj
response_value = mock.Mock()
response_value.status_code = 200
-
- # Convert return value to protobuf type
- return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = ""
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.list_snapshots(request)
+ response = client.delete_backup(request)
# Establish that the response is the type that we expect.
- assert isinstance(response, pagers.ListSnapshotsPager)
- assert response.next_page_token == "next_page_token_value"
+ assert response is None
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_list_snapshots_rest_interceptors(null_interceptor):
+def test_delete_backup_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_list_snapshots"
- ) as post, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor,
- "post_list_snapshots_with_metadata",
- ) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_list_snapshots"
+ transports.BigtableTableAdminRestInterceptor, "pre_delete_backup"
) as pre:
pre.assert_not_called()
- post.assert_not_called()
- post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.ListSnapshotsRequest.pb(
- bigtable_table_admin.ListSnapshotsRequest()
+ pb_message = bigtable_table_admin.DeleteBackupRequest.pb(
+ bigtable_table_admin.DeleteBackupRequest()
)
transcode.return_value = {
"method": "post",
@@ -22155,24 +26440,15 @@ def test_list_snapshots_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- return_value = bigtable_table_admin.ListSnapshotsResponse.to_json(
- bigtable_table_admin.ListSnapshotsResponse()
- )
- req.return_value.content = return_value
- request = bigtable_table_admin.ListSnapshotsRequest()
+ request = bigtable_table_admin.DeleteBackupRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
- post.return_value = bigtable_table_admin.ListSnapshotsResponse()
- post_with_metadata.return_value = (
- bigtable_table_admin.ListSnapshotsResponse(),
- metadata,
- )
- client.list_snapshots(
+ client.delete_backup(
request,
metadata=[
("key", "val"),
@@ -22181,20 +26457,16 @@ def test_list_snapshots_rest_interceptors(null_interceptor):
)
pre.assert_called_once()
- post.assert_called_once()
- post_with_metadata.assert_called_once()
-def test_delete_snapshot_rest_bad_request(
- request_type=bigtable_table_admin.DeleteSnapshotRequest,
+def test_list_backups_rest_bad_request(
+ request_type=bigtable_table_admin.ListBackupsRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {
- "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4"
- }
+ request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -22209,65 +26481,75 @@ def test_delete_snapshot_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.delete_snapshot(request)
+ client.list_backups(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.DeleteSnapshotRequest,
+ bigtable_table_admin.ListBackupsRequest,
dict,
],
)
-def test_delete_snapshot_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_list_backups_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {
- "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4"
- }
+ request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = None
+ return_value = bigtable_table_admin.ListBackupsResponse(
+ next_page_token="next_page_token_value",
+ )
# Wrap the value into a proper Response obj
response_value = mock.Mock()
response_value.status_code = 200
- json_return_value = ""
+
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.delete_snapshot(request)
+ response = client.list_backups(request)
# Establish that the response is the type that we expect.
- assert response is None
+ assert isinstance(response, pagers.ListBackupsPager)
+ assert response.next_page_token == "next_page_token_value"
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_delete_snapshot_rest_interceptors(null_interceptor):
+def test_list_backups_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_delete_snapshot"
+ transports.BigtableTableAdminRestInterceptor, "post_list_backups"
+ ) as post, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "post_list_backups_with_metadata"
+ ) as post_with_metadata, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "pre_list_backups"
) as pre:
pre.assert_not_called()
- pb_message = bigtable_table_admin.DeleteSnapshotRequest.pb(
- bigtable_table_admin.DeleteSnapshotRequest()
+ post.assert_not_called()
+ post_with_metadata.assert_not_called()
+ pb_message = bigtable_table_admin.ListBackupsRequest.pb(
+ bigtable_table_admin.ListBackupsRequest()
)
transcode.return_value = {
"method": "post",
@@ -22279,15 +26561,24 @@ def test_delete_snapshot_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = bigtable_table_admin.ListBackupsResponse.to_json(
+ bigtable_table_admin.ListBackupsResponse()
+ )
+ req.return_value.content = return_value
- request = bigtable_table_admin.DeleteSnapshotRequest()
+ request = bigtable_table_admin.ListBackupsRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
+ post.return_value = bigtable_table_admin.ListBackupsResponse()
+ post_with_metadata.return_value = (
+ bigtable_table_admin.ListBackupsResponse(),
+ metadata,
+ )
- client.delete_snapshot(
+ client.list_backups(
request,
metadata=[
("key", "val"),
@@ -22296,16 +26587,18 @@ def test_delete_snapshot_rest_interceptors(null_interceptor):
)
pre.assert_called_once()
+ post.assert_called_once()
+ post_with_metadata.assert_called_once()
-def test_create_backup_rest_bad_request(
- request_type=bigtable_table_admin.CreateBackupRequest,
+def test__restore_table_rest_bad_request(
+ request_type=bigtable_table_admin.RestoreTableRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"}
+ request_init = {"parent": "projects/sample1/instances/sample2"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -22320,116 +26613,23 @@ def test_create_backup_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.create_backup(request)
+ client._restore_table(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.CreateBackupRequest,
+ bigtable_table_admin.RestoreTableRequest,
dict,
],
)
-def test_create_backup_rest_call_success(request_type):
- client = BigtableTableAdminClient(
- credentials=ga_credentials.AnonymousCredentials(), transport="rest"
- )
-
- # send a request that will satisfy transcoding
- request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"}
- request_init["backup"] = {
- "name": "name_value",
- "source_table": "source_table_value",
- "source_backup": "source_backup_value",
- "expire_time": {"seconds": 751, "nanos": 543},
- "start_time": {},
- "end_time": {},
- "size_bytes": 1089,
- "state": 1,
- "encryption_info": {
- "encryption_type": 1,
- "encryption_status": {
- "code": 411,
- "message": "message_value",
- "details": [
- {
- "type_url": "type.googleapis.com/google.protobuf.Duration",
- "value": b"\x08\x0c\x10\xdb\x07",
- }
- ],
- },
- "kms_key_version": "kms_key_version_value",
- },
- "backup_type": 1,
- "hot_to_standard_time": {},
- }
- # The version of a generated dependency at test runtime may differ from the version used during generation.
- # Delete any fields which are not present in the current runtime dependency
- # See https://github.com/googleapis/gapic-generator-python/issues/1748
-
- # Determine if the message type is proto-plus or protobuf
- test_field = bigtable_table_admin.CreateBackupRequest.meta.fields["backup"]
-
- def get_message_fields(field):
- # Given a field which is a message (composite type), return a list with
- # all the fields of the message.
- # If the field is not a composite type, return an empty list.
- message_fields = []
-
- if hasattr(field, "message") and field.message:
- is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
-
- if is_field_type_proto_plus_type:
- message_fields = field.message.meta.fields.values()
- # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
- else: # pragma: NO COVER
- message_fields = field.message.DESCRIPTOR.fields
- return message_fields
-
- runtime_nested_fields = [
- (field.name, nested_field.name)
- for field in get_message_fields(test_field)
- for nested_field in get_message_fields(field)
- ]
-
- subfields_not_in_runtime = []
-
- # For each item in the sample request, create a list of sub fields which are not present at runtime
- # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
- for field, value in request_init["backup"].items(): # pragma: NO COVER
- result = None
- is_repeated = False
- # For repeated fields
- if isinstance(value, list) and len(value):
- is_repeated = True
- result = value[0]
- # For fields where the type is another message
- if isinstance(value, dict):
- result = value
-
- if result and hasattr(result, "keys"):
- for subfield in result.keys():
- if (field, subfield) not in runtime_nested_fields:
- subfields_not_in_runtime.append(
- {
- "field": field,
- "subfield": subfield,
- "is_repeated": is_repeated,
- }
- )
-
- # Remove fields from the sample request which are not present in the runtime version of the dependency
- # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
- for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
- field = subfield_to_delete.get("field")
- field_repeated = subfield_to_delete.get("is_repeated")
- subfield = subfield_to_delete.get("subfield")
- if subfield:
- if field_repeated:
- for i in range(0, len(request_init["backup"][field])):
- del request_init["backup"][field][i][subfield]
- else:
- del request_init["backup"][field][subfield]
+def test__restore_table_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/instances/sample2"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
@@ -22444,21 +26644,21 @@ def get_message_fields(field):
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.create_backup(request)
+ response = client._restore_table(request)
# Establish that the response is the type that we expect.
json_return_value = json_format.MessageToJson(return_value)
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_create_backup_rest_interceptors(null_interceptor):
+def test__restore_table_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
@@ -22467,17 +26667,17 @@ def test_create_backup_rest_interceptors(null_interceptor):
) as transcode, mock.patch.object(
operation.Operation, "_set_result_from_operation"
), mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_create_backup"
+ transports.BigtableTableAdminRestInterceptor, "post_restore_table"
) as post, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_create_backup_with_metadata"
+ transports.BigtableTableAdminRestInterceptor, "post_restore_table_with_metadata"
) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_create_backup"
+ transports.BigtableTableAdminRestInterceptor, "pre_restore_table"
) as pre:
pre.assert_not_called()
post.assert_not_called()
post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.CreateBackupRequest.pb(
- bigtable_table_admin.CreateBackupRequest()
+ pb_message = bigtable_table_admin.RestoreTableRequest.pb(
+ bigtable_table_admin.RestoreTableRequest()
)
transcode.return_value = {
"method": "post",
@@ -22492,7 +26692,7 @@ def test_create_backup_rest_interceptors(null_interceptor):
return_value = json_format.MessageToJson(operations_pb2.Operation())
req.return_value.content = return_value
- request = bigtable_table_admin.CreateBackupRequest()
+ request = bigtable_table_admin.RestoreTableRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
@@ -22501,7 +26701,7 @@ def test_create_backup_rest_interceptors(null_interceptor):
post.return_value = operations_pb2.Operation()
post_with_metadata.return_value = operations_pb2.Operation(), metadata
- client.create_backup(
+ client._restore_table(
request,
metadata=[
("key", "val"),
@@ -22514,16 +26714,14 @@ def test_create_backup_rest_interceptors(null_interceptor):
post_with_metadata.assert_called_once()
-def test_get_backup_rest_bad_request(
- request_type=bigtable_table_admin.GetBackupRequest,
+def test_copy_backup_rest_bad_request(
+ request_type=bigtable_table_admin.CopyBackupRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {
- "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4"
- }
+ request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -22538,87 +26736,71 @@ def test_get_backup_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.get_backup(request)
+ client.copy_backup(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.GetBackupRequest,
+ bigtable_table_admin.CopyBackupRequest,
dict,
],
)
-def test_get_backup_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_copy_backup_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {
- "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4"
- }
+ request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = table.Backup(
- name="name_value",
- source_table="source_table_value",
- source_backup="source_backup_value",
- size_bytes=1089,
- state=table.Backup.State.CREATING,
- backup_type=table.Backup.BackupType.STANDARD,
- )
+ return_value = operations_pb2.Operation(name="operations/spam")
# Wrap the value into a proper Response obj
response_value = mock.Mock()
response_value.status_code = 200
-
- # Convert return value to protobuf type
- return_value = table.Backup.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.get_backup(request)
+ response = client.copy_backup(request)
# Establish that the response is the type that we expect.
- assert isinstance(response, table.Backup)
- assert response.name == "name_value"
- assert response.source_table == "source_table_value"
- assert response.source_backup == "source_backup_value"
- assert response.size_bytes == 1089
- assert response.state == table.Backup.State.CREATING
- assert response.backup_type == table.Backup.BackupType.STANDARD
+ json_return_value = json_format.MessageToJson(return_value)
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_get_backup_rest_interceptors(null_interceptor):
+def test_copy_backup_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_get_backup"
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "post_copy_backup"
) as post, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_get_backup_with_metadata"
+ transports.BigtableTableAdminRestInterceptor, "post_copy_backup_with_metadata"
) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_get_backup"
+ transports.BigtableTableAdminRestInterceptor, "pre_copy_backup"
) as pre:
pre.assert_not_called()
post.assert_not_called()
post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.GetBackupRequest.pb(
- bigtable_table_admin.GetBackupRequest()
+ pb_message = bigtable_table_admin.CopyBackupRequest.pb(
+ bigtable_table_admin.CopyBackupRequest()
)
transcode.return_value = {
"method": "post",
@@ -22630,19 +26812,19 @@ def test_get_backup_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- return_value = table.Backup.to_json(table.Backup())
+ return_value = json_format.MessageToJson(operations_pb2.Operation())
req.return_value.content = return_value
- request = bigtable_table_admin.GetBackupRequest()
+ request = bigtable_table_admin.CopyBackupRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
- post.return_value = table.Backup()
- post_with_metadata.return_value = table.Backup(), metadata
+ post.return_value = operations_pb2.Operation()
+ post_with_metadata.return_value = operations_pb2.Operation(), metadata
- client.get_backup(
+ client.copy_backup(
request,
metadata=[
("key", "val"),
@@ -22655,18 +26837,14 @@ def test_get_backup_rest_interceptors(null_interceptor):
post_with_metadata.assert_called_once()
-def test_update_backup_rest_bad_request(
- request_type=bigtable_table_admin.UpdateBackupRequest,
+def test_get_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.GetIamPolicyRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {
- "backup": {
- "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4"
- }
- }
+ request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -22681,183 +26859,74 @@ def test_update_backup_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.update_backup(request)
+ client.get_iam_policy(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.UpdateBackupRequest,
+ iam_policy_pb2.GetIamPolicyRequest,
dict,
],
)
-def test_update_backup_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_get_iam_policy_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {
- "backup": {
- "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4"
- }
- }
- request_init["backup"] = {
- "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4",
- "source_table": "source_table_value",
- "source_backup": "source_backup_value",
- "expire_time": {"seconds": 751, "nanos": 543},
- "start_time": {},
- "end_time": {},
- "size_bytes": 1089,
- "state": 1,
- "encryption_info": {
- "encryption_type": 1,
- "encryption_status": {
- "code": 411,
- "message": "message_value",
- "details": [
- {
- "type_url": "type.googleapis.com/google.protobuf.Duration",
- "value": b"\x08\x0c\x10\xdb\x07",
- }
- ],
- },
- "kms_key_version": "kms_key_version_value",
- },
- "backup_type": 1,
- "hot_to_standard_time": {},
- }
- # The version of a generated dependency at test runtime may differ from the version used during generation.
- # Delete any fields which are not present in the current runtime dependency
- # See https://github.com/googleapis/gapic-generator-python/issues/1748
-
- # Determine if the message type is proto-plus or protobuf
- test_field = bigtable_table_admin.UpdateBackupRequest.meta.fields["backup"]
-
- def get_message_fields(field):
- # Given a field which is a message (composite type), return a list with
- # all the fields of the message.
- # If the field is not a composite type, return an empty list.
- message_fields = []
-
- if hasattr(field, "message") and field.message:
- is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
-
- if is_field_type_proto_plus_type:
- message_fields = field.message.meta.fields.values()
- # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
- else: # pragma: NO COVER
- message_fields = field.message.DESCRIPTOR.fields
- return message_fields
-
- runtime_nested_fields = [
- (field.name, nested_field.name)
- for field in get_message_fields(test_field)
- for nested_field in get_message_fields(field)
- ]
-
- subfields_not_in_runtime = []
-
- # For each item in the sample request, create a list of sub fields which are not present at runtime
- # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
- for field, value in request_init["backup"].items(): # pragma: NO COVER
- result = None
- is_repeated = False
- # For repeated fields
- if isinstance(value, list) and len(value):
- is_repeated = True
- result = value[0]
- # For fields where the type is another message
- if isinstance(value, dict):
- result = value
-
- if result and hasattr(result, "keys"):
- for subfield in result.keys():
- if (field, subfield) not in runtime_nested_fields:
- subfields_not_in_runtime.append(
- {
- "field": field,
- "subfield": subfield,
- "is_repeated": is_repeated,
- }
- )
-
- # Remove fields from the sample request which are not present in the runtime version of the dependency
- # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
- for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
- field = subfield_to_delete.get("field")
- field_repeated = subfield_to_delete.get("is_repeated")
- subfield = subfield_to_delete.get("subfield")
- if subfield:
- if field_repeated:
- for i in range(0, len(request_init["backup"][field])):
- del request_init["backup"][field][i][subfield]
- else:
- del request_init["backup"][field][subfield]
+ request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = table.Backup(
- name="name_value",
- source_table="source_table_value",
- source_backup="source_backup_value",
- size_bytes=1089,
- state=table.Backup.State.CREATING,
- backup_type=table.Backup.BackupType.STANDARD,
+ return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
)
# Wrap the value into a proper Response obj
response_value = mock.Mock()
response_value.status_code = 200
-
- # Convert return value to protobuf type
- return_value = table.Backup.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.update_backup(request)
+ response = client.get_iam_policy(request)
# Establish that the response is the type that we expect.
- assert isinstance(response, table.Backup)
- assert response.name == "name_value"
- assert response.source_table == "source_table_value"
- assert response.source_backup == "source_backup_value"
- assert response.size_bytes == 1089
- assert response.state == table.Backup.State.CREATING
- assert response.backup_type == table.Backup.BackupType.STANDARD
+ assert isinstance(response, policy_pb2.Policy)
+ assert response.version == 774
+ assert response.etag == b"etag_blob"
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_update_backup_rest_interceptors(null_interceptor):
+def test_get_iam_policy_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_update_backup"
+ transports.BigtableTableAdminRestInterceptor, "post_get_iam_policy"
) as post, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_update_backup_with_metadata"
+ transports.BigtableTableAdminRestInterceptor,
+ "post_get_iam_policy_with_metadata",
) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_update_backup"
+ transports.BigtableTableAdminRestInterceptor, "pre_get_iam_policy"
) as pre:
pre.assert_not_called()
post.assert_not_called()
post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.UpdateBackupRequest.pb(
- bigtable_table_admin.UpdateBackupRequest()
- )
+ pb_message = iam_policy_pb2.GetIamPolicyRequest()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
@@ -22868,19 +26937,19 @@ def test_update_backup_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- return_value = table.Backup.to_json(table.Backup())
+ return_value = json_format.MessageToJson(policy_pb2.Policy())
req.return_value.content = return_value
- request = bigtable_table_admin.UpdateBackupRequest()
+ request = iam_policy_pb2.GetIamPolicyRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
- post.return_value = table.Backup()
- post_with_metadata.return_value = table.Backup(), metadata
+ post.return_value = policy_pb2.Policy()
+ post_with_metadata.return_value = policy_pb2.Policy(), metadata
- client.update_backup(
+ client.get_iam_policy(
request,
metadata=[
("key", "val"),
@@ -22893,16 +26962,14 @@ def test_update_backup_rest_interceptors(null_interceptor):
post_with_metadata.assert_called_once()
-def test_delete_backup_rest_bad_request(
- request_type=bigtable_table_admin.DeleteBackupRequest,
+def test_set_iam_policy_rest_bad_request(
+ request_type=iam_policy_pb2.SetIamPolicyRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {
- "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4"
- }
+ request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -22917,66 +26984,74 @@ def test_delete_backup_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.delete_backup(request)
+ client.set_iam_policy(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.DeleteBackupRequest,
+ iam_policy_pb2.SetIamPolicyRequest,
dict,
],
)
-def test_delete_backup_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_set_iam_policy_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {
- "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4"
- }
+ request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = None
+ return_value = policy_pb2.Policy(
+ version=774,
+ etag=b"etag_blob",
+ )
# Wrap the value into a proper Response obj
response_value = mock.Mock()
response_value.status_code = 200
- json_return_value = ""
+ json_return_value = json_format.MessageToJson(return_value)
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.delete_backup(request)
+ response = client.set_iam_policy(request)
# Establish that the response is the type that we expect.
- assert response is None
+ assert isinstance(response, policy_pb2.Policy)
+ assert response.version == 774
+ assert response.etag == b"etag_blob"
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_delete_backup_rest_interceptors(null_interceptor):
+def test_set_iam_policy_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_delete_backup"
+ transports.BigtableTableAdminRestInterceptor, "post_set_iam_policy"
+ ) as post, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor,
+ "post_set_iam_policy_with_metadata",
+ ) as post_with_metadata, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "pre_set_iam_policy"
) as pre:
pre.assert_not_called()
- pb_message = bigtable_table_admin.DeleteBackupRequest.pb(
- bigtable_table_admin.DeleteBackupRequest()
- )
+ post.assert_not_called()
+ post_with_metadata.assert_not_called()
+ pb_message = iam_policy_pb2.SetIamPolicyRequest()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
@@ -22987,15 +27062,19 @@ def test_delete_backup_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
+ return_value = json_format.MessageToJson(policy_pb2.Policy())
+ req.return_value.content = return_value
- request = bigtable_table_admin.DeleteBackupRequest()
+ request = iam_policy_pb2.SetIamPolicyRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
+ post.return_value = policy_pb2.Policy()
+ post_with_metadata.return_value = policy_pb2.Policy(), metadata
- client.delete_backup(
+ client.set_iam_policy(
request,
metadata=[
("key", "val"),
@@ -23004,16 +27083,18 @@ def test_delete_backup_rest_interceptors(null_interceptor):
)
pre.assert_called_once()
+ post.assert_called_once()
+ post_with_metadata.assert_called_once()
-def test_list_backups_rest_bad_request(
- request_type=bigtable_table_admin.ListBackupsRequest,
+def test_test_iam_permissions_rest_bad_request(
+ request_type=iam_policy_pb2.TestIamPermissionsRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"}
+ request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -23028,76 +27109,72 @@ def test_list_backups_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.list_backups(request)
+ client.test_iam_permissions(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.ListBackupsRequest,
+ iam_policy_pb2.TestIamPermissionsRequest,
dict,
],
)
-def test_list_backups_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_test_iam_permissions_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"}
+ request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = bigtable_table_admin.ListBackupsResponse(
- next_page_token="next_page_token_value",
+ return_value = iam_policy_pb2.TestIamPermissionsResponse(
+ permissions=["permissions_value"],
)
# Wrap the value into a proper Response obj
response_value = mock.Mock()
response_value.status_code = 200
-
- # Convert return value to protobuf type
- return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.list_backups(request)
+ response = client.test_iam_permissions(request)
# Establish that the response is the type that we expect.
- assert isinstance(response, pagers.ListBackupsPager)
- assert response.next_page_token == "next_page_token_value"
+ assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
+ assert response.permissions == ["permissions_value"]
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_list_backups_rest_interceptors(null_interceptor):
+def test_test_iam_permissions_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_list_backups"
+ transports.BigtableTableAdminRestInterceptor, "post_test_iam_permissions"
) as post, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_list_backups_with_metadata"
+ transports.BigtableTableAdminRestInterceptor,
+ "post_test_iam_permissions_with_metadata",
) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_list_backups"
+ transports.BigtableTableAdminRestInterceptor, "pre_test_iam_permissions"
) as pre:
pre.assert_not_called()
post.assert_not_called()
post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.ListBackupsRequest.pb(
- bigtable_table_admin.ListBackupsRequest()
- )
+ pb_message = iam_policy_pb2.TestIamPermissionsRequest()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
@@ -23108,24 +27185,24 @@ def test_list_backups_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- return_value = bigtable_table_admin.ListBackupsResponse.to_json(
- bigtable_table_admin.ListBackupsResponse()
+ return_value = json_format.MessageToJson(
+ iam_policy_pb2.TestIamPermissionsResponse()
)
req.return_value.content = return_value
- request = bigtable_table_admin.ListBackupsRequest()
+ request = iam_policy_pb2.TestIamPermissionsRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
- post.return_value = bigtable_table_admin.ListBackupsResponse()
+ post.return_value = iam_policy_pb2.TestIamPermissionsResponse()
post_with_metadata.return_value = (
- bigtable_table_admin.ListBackupsResponse(),
+ iam_policy_pb2.TestIamPermissionsResponse(),
metadata,
)
- client.list_backups(
+ client.test_iam_permissions(
request,
metadata=[
("key", "val"),
@@ -23138,14 +27215,14 @@ def test_list_backups_rest_interceptors(null_interceptor):
post_with_metadata.assert_called_once()
-def test_restore_table_rest_bad_request(
- request_type=bigtable_table_admin.RestoreTableRequest,
+def test_create_schema_bundle_rest_bad_request(
+ request_type=bigtable_table_admin.CreateSchemaBundleRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"parent": "projects/sample1/instances/sample2"}
+ request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -23160,23 +27237,97 @@ def test_restore_table_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.restore_table(request)
+ client.create_schema_bundle(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.RestoreTableRequest,
+ bigtable_table_admin.CreateSchemaBundleRequest,
dict,
],
)
-def test_restore_table_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_create_schema_bundle_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"parent": "projects/sample1/instances/sample2"}
+ request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init["schema_bundle"] = {
+ "name": "name_value",
+ "proto_schema": {"proto_descriptors": b"proto_descriptors_blob"},
+ "etag": "etag_value",
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = bigtable_table_admin.CreateSchemaBundleRequest.meta.fields[
+ "schema_bundle"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["schema_bundle"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["schema_bundle"][field])):
+ del request_init["schema_bundle"][field][i][subfield]
+ else:
+ del request_init["schema_bundle"][field][subfield]
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
@@ -23191,21 +27342,21 @@ def test_restore_table_rest_call_success(request_type):
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.restore_table(request)
+ response = client.create_schema_bundle(request)
# Establish that the response is the type that we expect.
json_return_value = json_format.MessageToJson(return_value)
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_restore_table_rest_interceptors(null_interceptor):
+def test_create_schema_bundle_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
@@ -23214,17 +27365,18 @@ def test_restore_table_rest_interceptors(null_interceptor):
) as transcode, mock.patch.object(
operation.Operation, "_set_result_from_operation"
), mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_restore_table"
+ transports.BigtableTableAdminRestInterceptor, "post_create_schema_bundle"
) as post, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_restore_table_with_metadata"
+ transports.BigtableTableAdminRestInterceptor,
+ "post_create_schema_bundle_with_metadata",
) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_restore_table"
+ transports.BigtableTableAdminRestInterceptor, "pre_create_schema_bundle"
) as pre:
pre.assert_not_called()
post.assert_not_called()
post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.RestoreTableRequest.pb(
- bigtable_table_admin.RestoreTableRequest()
+ pb_message = bigtable_table_admin.CreateSchemaBundleRequest.pb(
+ bigtable_table_admin.CreateSchemaBundleRequest()
)
transcode.return_value = {
"method": "post",
@@ -23239,7 +27391,7 @@ def test_restore_table_rest_interceptors(null_interceptor):
return_value = json_format.MessageToJson(operations_pb2.Operation())
req.return_value.content = return_value
- request = bigtable_table_admin.RestoreTableRequest()
+ request = bigtable_table_admin.CreateSchemaBundleRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
@@ -23248,7 +27400,7 @@ def test_restore_table_rest_interceptors(null_interceptor):
post.return_value = operations_pb2.Operation()
post_with_metadata.return_value = operations_pb2.Operation(), metadata
- client.restore_table(
+ client.create_schema_bundle(
request,
metadata=[
("key", "val"),
@@ -23261,14 +27413,18 @@ def test_restore_table_rest_interceptors(null_interceptor):
post_with_metadata.assert_called_once()
-def test_copy_backup_rest_bad_request(
- request_type=bigtable_table_admin.CopyBackupRequest,
+def test_update_schema_bundle_rest_bad_request(
+ request_type=bigtable_table_admin.UpdateSchemaBundleRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"}
+ request_init = {
+ "schema_bundle": {
+ "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4"
+ }
+ }
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -23283,23 +27439,101 @@ def test_copy_backup_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.copy_backup(request)
+ client.update_schema_bundle(request)
@pytest.mark.parametrize(
"request_type",
[
- bigtable_table_admin.CopyBackupRequest,
+ bigtable_table_admin.UpdateSchemaBundleRequest,
dict,
],
)
-def test_copy_backup_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_update_schema_bundle_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"}
+ request_init = {
+ "schema_bundle": {
+ "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4"
+ }
+ }
+ request_init["schema_bundle"] = {
+ "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4",
+ "proto_schema": {"proto_descriptors": b"proto_descriptors_blob"},
+ "etag": "etag_value",
+ }
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = bigtable_table_admin.UpdateSchemaBundleRequest.meta.fields[
+ "schema_bundle"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["schema_bundle"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["schema_bundle"][field])):
+ del request_init["schema_bundle"][field][i][subfield]
+ else:
+ del request_init["schema_bundle"][field][subfield]
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
@@ -23314,21 +27548,21 @@ def test_copy_backup_rest_call_success(request_type):
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.copy_backup(request)
+ response = client.update_schema_bundle(request)
# Establish that the response is the type that we expect.
json_return_value = json_format.MessageToJson(return_value)
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_copy_backup_rest_interceptors(null_interceptor):
+def test_update_schema_bundle_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
@@ -23337,17 +27571,18 @@ def test_copy_backup_rest_interceptors(null_interceptor):
) as transcode, mock.patch.object(
operation.Operation, "_set_result_from_operation"
), mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_copy_backup"
+ transports.BigtableTableAdminRestInterceptor, "post_update_schema_bundle"
) as post, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_copy_backup_with_metadata"
+ transports.BigtableTableAdminRestInterceptor,
+ "post_update_schema_bundle_with_metadata",
) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_copy_backup"
+ transports.BigtableTableAdminRestInterceptor, "pre_update_schema_bundle"
) as pre:
pre.assert_not_called()
post.assert_not_called()
post_with_metadata.assert_not_called()
- pb_message = bigtable_table_admin.CopyBackupRequest.pb(
- bigtable_table_admin.CopyBackupRequest()
+ pb_message = bigtable_table_admin.UpdateSchemaBundleRequest.pb(
+ bigtable_table_admin.UpdateSchemaBundleRequest()
)
transcode.return_value = {
"method": "post",
@@ -23362,7 +27597,7 @@ def test_copy_backup_rest_interceptors(null_interceptor):
return_value = json_format.MessageToJson(operations_pb2.Operation())
req.return_value.content = return_value
- request = bigtable_table_admin.CopyBackupRequest()
+ request = bigtable_table_admin.UpdateSchemaBundleRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
@@ -23371,7 +27606,7 @@ def test_copy_backup_rest_interceptors(null_interceptor):
post.return_value = operations_pb2.Operation()
post_with_metadata.return_value = operations_pb2.Operation(), metadata
- client.copy_backup(
+ client.update_schema_bundle(
request,
metadata=[
("key", "val"),
@@ -23384,14 +27619,16 @@ def test_copy_backup_rest_interceptors(null_interceptor):
post_with_metadata.assert_called_once()
-def test_get_iam_policy_rest_bad_request(
- request_type=iam_policy_pb2.GetIamPolicyRequest,
+def test_get_schema_bundle_rest_bad_request(
+ request_type=bigtable_table_admin.GetSchemaBundleRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {
+ "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4"
+ }
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -23406,74 +27643,81 @@ def test_get_iam_policy_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.get_iam_policy(request)
+ client.get_schema_bundle(request)
@pytest.mark.parametrize(
"request_type",
[
- iam_policy_pb2.GetIamPolicyRequest,
+ bigtable_table_admin.GetSchemaBundleRequest,
dict,
],
)
-def test_get_iam_policy_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_get_schema_bundle_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {
+ "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4"
+ }
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = policy_pb2.Policy(
- version=774,
- etag=b"etag_blob",
+ return_value = table.SchemaBundle(
+ name="name_value",
+ etag="etag_value",
)
# Wrap the value into a proper Response obj
response_value = mock.Mock()
response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = table.SchemaBundle.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.get_iam_policy(request)
+ response = client.get_schema_bundle(request)
# Establish that the response is the type that we expect.
- assert isinstance(response, policy_pb2.Policy)
- assert response.version == 774
- assert response.etag == b"etag_blob"
+ assert isinstance(response, table.SchemaBundle)
+ assert response.name == "name_value"
+ assert response.etag == "etag_value"
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_get_iam_policy_rest_interceptors(null_interceptor):
+def test_get_schema_bundle_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_get_iam_policy"
+ transports.BigtableTableAdminRestInterceptor, "post_get_schema_bundle"
) as post, mock.patch.object(
transports.BigtableTableAdminRestInterceptor,
- "post_get_iam_policy_with_metadata",
+ "post_get_schema_bundle_with_metadata",
) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_get_iam_policy"
+ transports.BigtableTableAdminRestInterceptor, "pre_get_schema_bundle"
) as pre:
pre.assert_not_called()
post.assert_not_called()
post_with_metadata.assert_not_called()
- pb_message = iam_policy_pb2.GetIamPolicyRequest()
+ pb_message = bigtable_table_admin.GetSchemaBundleRequest.pb(
+ bigtable_table_admin.GetSchemaBundleRequest()
+ )
transcode.return_value = {
"method": "post",
"uri": "my_uri",
@@ -23484,19 +27728,19 @@ def test_get_iam_policy_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- return_value = json_format.MessageToJson(policy_pb2.Policy())
+ return_value = table.SchemaBundle.to_json(table.SchemaBundle())
req.return_value.content = return_value
- request = iam_policy_pb2.GetIamPolicyRequest()
+ request = bigtable_table_admin.GetSchemaBundleRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
- post.return_value = policy_pb2.Policy()
- post_with_metadata.return_value = policy_pb2.Policy(), metadata
+ post.return_value = table.SchemaBundle()
+ post_with_metadata.return_value = table.SchemaBundle(), metadata
- client.get_iam_policy(
+ client.get_schema_bundle(
request,
metadata=[
("key", "val"),
@@ -23509,14 +27753,14 @@ def test_get_iam_policy_rest_interceptors(null_interceptor):
post_with_metadata.assert_called_once()
-def test_set_iam_policy_rest_bad_request(
- request_type=iam_policy_pb2.SetIamPolicyRequest,
+def test_list_schema_bundles_rest_bad_request(
+ request_type=bigtable_table_admin.ListSchemaBundlesRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -23531,74 +27775,77 @@ def test_set_iam_policy_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.set_iam_policy(request)
+ client.list_schema_bundles(request)
@pytest.mark.parametrize(
"request_type",
[
- iam_policy_pb2.SetIamPolicyRequest,
+ bigtable_table_admin.ListSchemaBundlesRequest,
dict,
],
)
-def test_set_iam_policy_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_list_schema_bundles_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"}
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = policy_pb2.Policy(
- version=774,
- etag=b"etag_blob",
+ return_value = bigtable_table_admin.ListSchemaBundlesResponse(
+ next_page_token="next_page_token_value",
)
# Wrap the value into a proper Response obj
response_value = mock.Mock()
response_value.status_code = 200
+
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListSchemaBundlesResponse.pb(return_value)
json_return_value = json_format.MessageToJson(return_value)
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.set_iam_policy(request)
+ response = client.list_schema_bundles(request)
# Establish that the response is the type that we expect.
- assert isinstance(response, policy_pb2.Policy)
- assert response.version == 774
- assert response.etag == b"etag_blob"
+ assert isinstance(response, pagers.ListSchemaBundlesPager)
+ assert response.next_page_token == "next_page_token_value"
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_set_iam_policy_rest_interceptors(null_interceptor):
+def test_list_schema_bundles_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_set_iam_policy"
+ transports.BigtableTableAdminRestInterceptor, "post_list_schema_bundles"
) as post, mock.patch.object(
transports.BigtableTableAdminRestInterceptor,
- "post_set_iam_policy_with_metadata",
+ "post_list_schema_bundles_with_metadata",
) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_set_iam_policy"
+ transports.BigtableTableAdminRestInterceptor, "pre_list_schema_bundles"
) as pre:
pre.assert_not_called()
post.assert_not_called()
post_with_metadata.assert_not_called()
- pb_message = iam_policy_pb2.SetIamPolicyRequest()
+ pb_message = bigtable_table_admin.ListSchemaBundlesRequest.pb(
+ bigtable_table_admin.ListSchemaBundlesRequest()
+ )
transcode.return_value = {
"method": "post",
"uri": "my_uri",
@@ -23609,19 +27856,24 @@ def test_set_iam_policy_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- return_value = json_format.MessageToJson(policy_pb2.Policy())
+ return_value = bigtable_table_admin.ListSchemaBundlesResponse.to_json(
+ bigtable_table_admin.ListSchemaBundlesResponse()
+ )
req.return_value.content = return_value
- request = iam_policy_pb2.SetIamPolicyRequest()
+ request = bigtable_table_admin.ListSchemaBundlesRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
- post.return_value = policy_pb2.Policy()
- post_with_metadata.return_value = policy_pb2.Policy(), metadata
+ post.return_value = bigtable_table_admin.ListSchemaBundlesResponse()
+ post_with_metadata.return_value = (
+ bigtable_table_admin.ListSchemaBundlesResponse(),
+ metadata,
+ )
- client.set_iam_policy(
+ client.list_schema_bundles(
request,
metadata=[
("key", "val"),
@@ -23634,14 +27886,16 @@ def test_set_iam_policy_rest_interceptors(null_interceptor):
post_with_metadata.assert_called_once()
-def test_test_iam_permissions_rest_bad_request(
- request_type=iam_policy_pb2.TestIamPermissionsRequest,
+def test_delete_schema_bundle_rest_bad_request(
+ request_type=bigtable_table_admin.DeleteSchemaBundleRequest,
):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {
+ "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4"
+ }
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -23656,72 +27910,66 @@ def test_test_iam_permissions_rest_bad_request(
response_value.request = mock.Mock()
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- client.test_iam_permissions(request)
+ client.delete_schema_bundle(request)
@pytest.mark.parametrize(
"request_type",
[
- iam_policy_pb2.TestIamPermissionsRequest,
+ bigtable_table_admin.DeleteSchemaBundleRequest,
dict,
],
)
-def test_test_iam_permissions_rest_call_success(request_type):
- client = BigtableTableAdminClient(
+def test_delete_schema_bundle_rest_call_success(request_type):
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
# send a request that will satisfy transcoding
- request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"}
+ request_init = {
+ "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4"
+ }
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = iam_policy_pb2.TestIamPermissionsResponse(
- permissions=["permissions_value"],
- )
+ return_value = None
# Wrap the value into a proper Response obj
response_value = mock.Mock()
response_value.status_code = 200
- json_return_value = json_format.MessageToJson(return_value)
+ json_return_value = ""
response_value.content = json_return_value.encode("UTF-8")
req.return_value = response_value
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- response = client.test_iam_permissions(request)
+ response = client.delete_schema_bundle(request)
# Establish that the response is the type that we expect.
- assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
- assert response.permissions == ["permissions_value"]
+ assert response is None
@pytest.mark.parametrize("null_interceptor", [True, False])
-def test_test_iam_permissions_rest_interceptors(null_interceptor):
+def test_delete_schema_bundle_rest_interceptors(null_interceptor):
transport = transports.BigtableTableAdminRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.BigtableTableAdminRestInterceptor(),
)
- client = BigtableTableAdminClient(transport=transport)
+ client = BaseBigtableTableAdminClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "post_test_iam_permissions"
- ) as post, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor,
- "post_test_iam_permissions_with_metadata",
- ) as post_with_metadata, mock.patch.object(
- transports.BigtableTableAdminRestInterceptor, "pre_test_iam_permissions"
+ transports.BigtableTableAdminRestInterceptor, "pre_delete_schema_bundle"
) as pre:
pre.assert_not_called()
- post.assert_not_called()
- post_with_metadata.assert_not_called()
- pb_message = iam_policy_pb2.TestIamPermissionsRequest()
+ pb_message = bigtable_table_admin.DeleteSchemaBundleRequest.pb(
+ bigtable_table_admin.DeleteSchemaBundleRequest()
+ )
transcode.return_value = {
"method": "post",
"uri": "my_uri",
@@ -23732,24 +27980,15 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor):
req.return_value = mock.Mock()
req.return_value.status_code = 200
req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"}
- return_value = json_format.MessageToJson(
- iam_policy_pb2.TestIamPermissionsResponse()
- )
- req.return_value.content = return_value
- request = iam_policy_pb2.TestIamPermissionsRequest()
+ request = bigtable_table_admin.DeleteSchemaBundleRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
- post.return_value = iam_policy_pb2.TestIamPermissionsResponse()
- post_with_metadata.return_value = (
- iam_policy_pb2.TestIamPermissionsResponse(),
- metadata,
- )
- client.test_iam_permissions(
+ client.delete_schema_bundle(
request,
metadata=[
("key", "val"),
@@ -23758,12 +27997,10 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor):
)
pre.assert_called_once()
- post.assert_called_once()
- post_with_metadata.assert_called_once()
def test_initialize_client_w_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
assert client is not None
@@ -23772,7 +28009,7 @@ def test_initialize_client_w_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_create_table_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -23792,7 +28029,7 @@ def test_create_table_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_create_table_from_snapshot_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -23814,7 +28051,7 @@ def test_create_table_from_snapshot_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_list_tables_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -23834,7 +28071,7 @@ def test_list_tables_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_get_table_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -23854,7 +28091,7 @@ def test_get_table_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_update_table_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -23874,7 +28111,7 @@ def test_update_table_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_delete_table_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -23894,7 +28131,7 @@ def test_delete_table_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_undelete_table_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -23914,7 +28151,7 @@ def test_undelete_table_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_create_authorized_view_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -23936,7 +28173,7 @@ def test_create_authorized_view_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_list_authorized_views_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -23958,7 +28195,7 @@ def test_list_authorized_views_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_get_authorized_view_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -23980,7 +28217,7 @@ def test_get_authorized_view_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_update_authorized_view_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -24002,7 +28239,7 @@ def test_update_authorized_view_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_delete_authorized_view_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -24024,7 +28261,7 @@ def test_delete_authorized_view_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_modify_column_families_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -24046,7 +28283,7 @@ def test_modify_column_families_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_drop_row_range_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -24066,7 +28303,7 @@ def test_drop_row_range_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_generate_consistency_token_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -24088,7 +28325,7 @@ def test_generate_consistency_token_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_check_consistency_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -24110,7 +28347,7 @@ def test_check_consistency_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_snapshot_table_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -24130,7 +28367,7 @@ def test_snapshot_table_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_get_snapshot_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -24150,7 +28387,7 @@ def test_get_snapshot_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_list_snapshots_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -24170,7 +28407,7 @@ def test_list_snapshots_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_delete_snapshot_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -24190,7 +28427,7 @@ def test_delete_snapshot_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_create_backup_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -24210,7 +28447,7 @@ def test_create_backup_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_get_backup_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -24230,7 +28467,7 @@ def test_get_backup_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_update_backup_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -24250,7 +28487,7 @@ def test_update_backup_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_delete_backup_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -24270,7 +28507,7 @@ def test_delete_backup_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_list_backups_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -24289,15 +28526,15 @@ def test_list_backups_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
-def test_restore_table_empty_call_rest():
- client = BigtableTableAdminClient(
+def test__restore_table_empty_call_rest():
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
# Mock the actual call, and fake the request.
with mock.patch.object(type(client.transport.restore_table), "__call__") as call:
- client.restore_table(request=None)
+ client._restore_table(request=None)
# Establish that the underlying stub method was called.
call.assert_called()
@@ -24310,7 +28547,7 @@ def test_restore_table_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_copy_backup_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -24330,7 +28567,7 @@ def test_copy_backup_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_get_iam_policy_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -24350,7 +28587,7 @@ def test_get_iam_policy_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_set_iam_policy_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -24370,7 +28607,7 @@ def test_set_iam_policy_empty_call_rest():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
def test_test_iam_permissions_empty_call_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -24389,8 +28626,118 @@ def test_test_iam_permissions_empty_call_rest():
assert args[0] == request_msg
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_create_schema_bundle_empty_call_rest():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.create_schema_bundle), "__call__"
+ ) as call:
+ client.create_schema_bundle(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.CreateSchemaBundleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_update_schema_bundle_empty_call_rest():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_schema_bundle), "__call__"
+ ) as call:
+ client.update_schema_bundle(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.UpdateSchemaBundleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_get_schema_bundle_empty_call_rest():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_schema_bundle), "__call__"
+ ) as call:
+ client.get_schema_bundle(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.GetSchemaBundleRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_list_schema_bundles_empty_call_rest():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_schema_bundles), "__call__"
+ ) as call:
+ client.list_schema_bundles(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.ListSchemaBundlesRequest()
+
+ assert args[0] == request_msg
+
+
+# This test is a coverage failsafe to make sure that totally empty calls,
+# i.e. request == None and no flattened fields passed, work.
+def test_delete_schema_bundle_empty_call_rest():
+ client = BaseBigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(
+ type(client.transport.delete_schema_bundle), "__call__"
+ ) as call:
+ client.delete_schema_bundle(request=None)
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ request_msg = bigtable_table_admin.DeleteSchemaBundleRequest()
+
+ assert args[0] == request_msg
+
+
def test_bigtable_table_admin_rest_lro_client():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="rest",
)
@@ -24408,7 +28755,7 @@ def test_bigtable_table_admin_rest_lro_client():
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
@@ -24469,6 +28816,11 @@ def test_bigtable_table_admin_base_transport():
"get_iam_policy",
"set_iam_policy",
"test_iam_permissions",
+ "create_schema_bundle",
+ "update_schema_bundle",
+ "get_schema_bundle",
+ "list_schema_bundles",
+ "delete_schema_bundle",
)
for method in methods:
with pytest.raises(NotImplementedError):
@@ -24534,7 +28886,7 @@ def test_bigtable_table_admin_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
- BigtableTableAdminClient()
+ BaseBigtableTableAdminClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
@@ -24708,7 +29060,7 @@ def test_bigtable_table_admin_http_transport_client_cert_source_for_mtls():
],
)
def test_bigtable_table_admin_host_no_port(transport_name):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="bigtableadmin.googleapis.com"
@@ -24731,7 +29083,7 @@ def test_bigtable_table_admin_host_no_port(transport_name):
],
)
def test_bigtable_table_admin_host_with_port(transport_name):
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="bigtableadmin.googleapis.com:8000"
@@ -24754,11 +29106,11 @@ def test_bigtable_table_admin_host_with_port(transport_name):
def test_bigtable_table_admin_client_transport_session_collision(transport_name):
creds1 = ga_credentials.AnonymousCredentials()
creds2 = ga_credentials.AnonymousCredentials()
- client1 = BigtableTableAdminClient(
+ client1 = BaseBigtableTableAdminClient(
credentials=creds1,
transport=transport_name,
)
- client2 = BigtableTableAdminClient(
+ client2 = BaseBigtableTableAdminClient(
credentials=creds2,
transport=transport_name,
)
@@ -24852,6 +29204,21 @@ def test_bigtable_table_admin_client_transport_session_collision(transport_name)
session1 = client1.transport.test_iam_permissions._session
session2 = client2.transport.test_iam_permissions._session
assert session1 != session2
+ session1 = client1.transport.create_schema_bundle._session
+ session2 = client2.transport.create_schema_bundle._session
+ assert session1 != session2
+ session1 = client1.transport.update_schema_bundle._session
+ session2 = client2.transport.update_schema_bundle._session
+ assert session1 != session2
+ session1 = client1.transport.get_schema_bundle._session
+ session2 = client2.transport.get_schema_bundle._session
+ assert session1 != session2
+ session1 = client1.transport.list_schema_bundles._session
+ session2 = client2.transport.list_schema_bundles._session
+ assert session1 != session2
+ session1 = client1.transport.delete_schema_bundle._session
+ session2 = client2.transport.delete_schema_bundle._session
+ assert session1 != session2
def test_bigtable_table_admin_grpc_transport_channel():
@@ -24882,6 +29249,7 @@ def test_bigtable_table_admin_grpc_asyncio_transport_channel():
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize(
"transport_class",
[
@@ -24981,7 +29349,7 @@ def test_bigtable_table_admin_transport_channel_mtls_with_adc(transport_class):
def test_bigtable_table_admin_grpc_lro_client():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
@@ -24998,7 +29366,7 @@ def test_bigtable_table_admin_grpc_lro_client():
def test_bigtable_table_admin_grpc_lro_async_client():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc_asyncio",
)
@@ -25025,7 +29393,7 @@ def test_authorized_view_path():
table=table,
authorized_view=authorized_view,
)
- actual = BigtableTableAdminClient.authorized_view_path(
+ actual = BaseBigtableTableAdminClient.authorized_view_path(
project, instance, table, authorized_view
)
assert expected == actual
@@ -25038,10 +29406,10 @@ def test_parse_authorized_view_path():
"table": "cuttlefish",
"authorized_view": "mussel",
}
- path = BigtableTableAdminClient.authorized_view_path(**expected)
+ path = BaseBigtableTableAdminClient.authorized_view_path(**expected)
# Check that the path construction is reversible.
- actual = BigtableTableAdminClient.parse_authorized_view_path(path)
+ actual = BaseBigtableTableAdminClient.parse_authorized_view_path(path)
assert expected == actual
@@ -25056,7 +29424,9 @@ def test_backup_path():
cluster=cluster,
backup=backup,
)
- actual = BigtableTableAdminClient.backup_path(project, instance, cluster, backup)
+ actual = BaseBigtableTableAdminClient.backup_path(
+ project, instance, cluster, backup
+ )
assert expected == actual
@@ -25067,10 +29437,10 @@ def test_parse_backup_path():
"cluster": "whelk",
"backup": "octopus",
}
- path = BigtableTableAdminClient.backup_path(**expected)
+ path = BaseBigtableTableAdminClient.backup_path(**expected)
# Check that the path construction is reversible.
- actual = BigtableTableAdminClient.parse_backup_path(path)
+ actual = BaseBigtableTableAdminClient.parse_backup_path(path)
assert expected == actual
@@ -25083,7 +29453,7 @@ def test_cluster_path():
instance=instance,
cluster=cluster,
)
- actual = BigtableTableAdminClient.cluster_path(project, instance, cluster)
+ actual = BaseBigtableTableAdminClient.cluster_path(project, instance, cluster)
assert expected == actual
@@ -25093,10 +29463,10 @@ def test_parse_cluster_path():
"instance": "winkle",
"cluster": "nautilus",
}
- path = BigtableTableAdminClient.cluster_path(**expected)
+ path = BaseBigtableTableAdminClient.cluster_path(**expected)
# Check that the path construction is reversible.
- actual = BigtableTableAdminClient.parse_cluster_path(path)
+ actual = BaseBigtableTableAdminClient.parse_cluster_path(path)
assert expected == actual
@@ -25113,7 +29483,7 @@ def test_crypto_key_version_path():
crypto_key=crypto_key,
crypto_key_version=crypto_key_version,
)
- actual = BigtableTableAdminClient.crypto_key_version_path(
+ actual = BaseBigtableTableAdminClient.crypto_key_version_path(
project, location, key_ring, crypto_key, crypto_key_version
)
assert expected == actual
@@ -25127,10 +29497,10 @@ def test_parse_crypto_key_version_path():
"crypto_key": "cuttlefish",
"crypto_key_version": "mussel",
}
- path = BigtableTableAdminClient.crypto_key_version_path(**expected)
+ path = BaseBigtableTableAdminClient.crypto_key_version_path(**expected)
# Check that the path construction is reversible.
- actual = BigtableTableAdminClient.parse_crypto_key_version_path(path)
+ actual = BaseBigtableTableAdminClient.parse_crypto_key_version_path(path)
assert expected == actual
@@ -25141,7 +29511,7 @@ def test_instance_path():
project=project,
instance=instance,
)
- actual = BigtableTableAdminClient.instance_path(project, instance)
+ actual = BaseBigtableTableAdminClient.instance_path(project, instance)
assert expected == actual
@@ -25150,25 +29520,56 @@ def test_parse_instance_path():
"project": "scallop",
"instance": "abalone",
}
- path = BigtableTableAdminClient.instance_path(**expected)
+ path = BaseBigtableTableAdminClient.instance_path(**expected)
# Check that the path construction is reversible.
- actual = BigtableTableAdminClient.parse_instance_path(path)
+ actual = BaseBigtableTableAdminClient.parse_instance_path(path)
assert expected == actual
-def test_snapshot_path():
+def test_schema_bundle_path():
project = "squid"
instance = "clam"
- cluster = "whelk"
- snapshot = "octopus"
+ table = "whelk"
+ schema_bundle = "octopus"
+ expected = "projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}".format(
+ project=project,
+ instance=instance,
+ table=table,
+ schema_bundle=schema_bundle,
+ )
+ actual = BaseBigtableTableAdminClient.schema_bundle_path(
+ project, instance, table, schema_bundle
+ )
+ assert expected == actual
+
+
+def test_parse_schema_bundle_path():
+ expected = {
+ "project": "oyster",
+ "instance": "nudibranch",
+ "table": "cuttlefish",
+ "schema_bundle": "mussel",
+ }
+ path = BaseBigtableTableAdminClient.schema_bundle_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = BaseBigtableTableAdminClient.parse_schema_bundle_path(path)
+ assert expected == actual
+
+
+def test_snapshot_path():
+ project = "winkle"
+ instance = "nautilus"
+ cluster = "scallop"
+ snapshot = "abalone"
expected = "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}".format(
project=project,
instance=instance,
cluster=cluster,
snapshot=snapshot,
)
- actual = BigtableTableAdminClient.snapshot_path(
+ actual = BaseBigtableTableAdminClient.snapshot_path(
project, instance, cluster, snapshot
)
assert expected == actual
@@ -25176,144 +29577,144 @@ def test_snapshot_path():
def test_parse_snapshot_path():
expected = {
- "project": "oyster",
- "instance": "nudibranch",
- "cluster": "cuttlefish",
- "snapshot": "mussel",
+ "project": "squid",
+ "instance": "clam",
+ "cluster": "whelk",
+ "snapshot": "octopus",
}
- path = BigtableTableAdminClient.snapshot_path(**expected)
+ path = BaseBigtableTableAdminClient.snapshot_path(**expected)
# Check that the path construction is reversible.
- actual = BigtableTableAdminClient.parse_snapshot_path(path)
+ actual = BaseBigtableTableAdminClient.parse_snapshot_path(path)
assert expected == actual
def test_table_path():
- project = "winkle"
- instance = "nautilus"
- table = "scallop"
+ project = "oyster"
+ instance = "nudibranch"
+ table = "cuttlefish"
expected = "projects/{project}/instances/{instance}/tables/{table}".format(
project=project,
instance=instance,
table=table,
)
- actual = BigtableTableAdminClient.table_path(project, instance, table)
+ actual = BaseBigtableTableAdminClient.table_path(project, instance, table)
assert expected == actual
def test_parse_table_path():
expected = {
- "project": "abalone",
- "instance": "squid",
- "table": "clam",
+ "project": "mussel",
+ "instance": "winkle",
+ "table": "nautilus",
}
- path = BigtableTableAdminClient.table_path(**expected)
+ path = BaseBigtableTableAdminClient.table_path(**expected)
# Check that the path construction is reversible.
- actual = BigtableTableAdminClient.parse_table_path(path)
+ actual = BaseBigtableTableAdminClient.parse_table_path(path)
assert expected == actual
def test_common_billing_account_path():
- billing_account = "whelk"
+ billing_account = "scallop"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
- actual = BigtableTableAdminClient.common_billing_account_path(billing_account)
+ actual = BaseBigtableTableAdminClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
- "billing_account": "octopus",
+ "billing_account": "abalone",
}
- path = BigtableTableAdminClient.common_billing_account_path(**expected)
+ path = BaseBigtableTableAdminClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
- actual = BigtableTableAdminClient.parse_common_billing_account_path(path)
+ actual = BaseBigtableTableAdminClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
- folder = "oyster"
+ folder = "squid"
expected = "folders/{folder}".format(
folder=folder,
)
- actual = BigtableTableAdminClient.common_folder_path(folder)
+ actual = BaseBigtableTableAdminClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
- "folder": "nudibranch",
+ "folder": "clam",
}
- path = BigtableTableAdminClient.common_folder_path(**expected)
+ path = BaseBigtableTableAdminClient.common_folder_path(**expected)
# Check that the path construction is reversible.
- actual = BigtableTableAdminClient.parse_common_folder_path(path)
+ actual = BaseBigtableTableAdminClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
- organization = "cuttlefish"
+ organization = "whelk"
expected = "organizations/{organization}".format(
organization=organization,
)
- actual = BigtableTableAdminClient.common_organization_path(organization)
+ actual = BaseBigtableTableAdminClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
- "organization": "mussel",
+ "organization": "octopus",
}
- path = BigtableTableAdminClient.common_organization_path(**expected)
+ path = BaseBigtableTableAdminClient.common_organization_path(**expected)
# Check that the path construction is reversible.
- actual = BigtableTableAdminClient.parse_common_organization_path(path)
+ actual = BaseBigtableTableAdminClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
- project = "winkle"
+ project = "oyster"
expected = "projects/{project}".format(
project=project,
)
- actual = BigtableTableAdminClient.common_project_path(project)
+ actual = BaseBigtableTableAdminClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
- "project": "nautilus",
+ "project": "nudibranch",
}
- path = BigtableTableAdminClient.common_project_path(**expected)
+ path = BaseBigtableTableAdminClient.common_project_path(**expected)
# Check that the path construction is reversible.
- actual = BigtableTableAdminClient.parse_common_project_path(path)
+ actual = BaseBigtableTableAdminClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
- project = "scallop"
- location = "abalone"
+ project = "cuttlefish"
+ location = "mussel"
expected = "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
- actual = BigtableTableAdminClient.common_location_path(project, location)
+ actual = BaseBigtableTableAdminClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
- "project": "squid",
- "location": "clam",
+ "project": "winkle",
+ "location": "nautilus",
}
- path = BigtableTableAdminClient.common_location_path(**expected)
+ path = BaseBigtableTableAdminClient.common_location_path(**expected)
# Check that the path construction is reversible.
- actual = BigtableTableAdminClient.parse_common_location_path(path)
+ actual = BaseBigtableTableAdminClient.parse_common_location_path(path)
assert expected == actual
@@ -25323,7 +29724,7 @@ def test_client_with_default_client_info():
with mock.patch.object(
transports.BigtableTableAdminTransport, "_prep_wrapped_messages"
) as prep:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
@@ -25332,7 +29733,7 @@ def test_client_with_default_client_info():
with mock.patch.object(
transports.BigtableTableAdminTransport, "_prep_wrapped_messages"
) as prep:
- transport_class = BigtableTableAdminClient.get_transport_class()
+ transport_class = BaseBigtableTableAdminClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
@@ -25341,7 +29742,7 @@ def test_client_with_default_client_info():
def test_transport_close_grpc():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc"
)
with mock.patch.object(
@@ -25354,7 +29755,7 @@ def test_transport_close_grpc():
@pytest.mark.asyncio
async def test_transport_close_grpc_asyncio():
- client = BigtableTableAdminAsyncClient(
+ client = BaseBigtableTableAdminAsyncClient(
credentials=async_anonymous_credentials(), transport="grpc_asyncio"
)
with mock.patch.object(
@@ -25366,7 +29767,7 @@ async def test_transport_close_grpc_asyncio():
def test_transport_close_rest():
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
with mock.patch.object(
@@ -25383,7 +29784,7 @@ def test_client_ctx():
"grpc",
]
for transport in transports:
- client = BigtableTableAdminClient(
+ client = BaseBigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
@@ -25397,9 +29798,9 @@ def test_client_ctx():
@pytest.mark.parametrize(
"client_class,transport_class",
[
- (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport),
+ (BaseBigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport),
(
- BigtableTableAdminAsyncClient,
+ BaseBigtableTableAdminAsyncClient,
transports.BigtableTableAdminGrpcAsyncIOTransport,
),
],
diff --git a/tests/unit/gapic/bigtable_v2/test_bigtable.py b/tests/unit/gapic/bigtable_v2/test_bigtable.py
index 84093a926..ea7f0955d 100644
--- a/tests/unit/gapic/bigtable_v2/test_bigtable.py
+++ b/tests/unit/gapic/bigtable_v2/test_bigtable.py
@@ -151,12 +151,19 @@ def test__read_environment_variables():
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
- with pytest.raises(ValueError) as excinfo:
- BigtableClient._read_environment_variables()
- assert (
- str(excinfo.value)
- == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with pytest.raises(ValueError) as excinfo:
+ BigtableClient._read_environment_variables()
+ assert (
+ str(excinfo.value)
+ == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+ else:
+ assert BigtableClient._read_environment_variables() == (
+ False,
+ "auto",
+ None,
+ )
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
assert BigtableClient._read_environment_variables() == (False, "never", None)
@@ -183,6 +190,105 @@ def test__read_environment_variables():
)
+def test_use_client_cert_effective():
+ # Test case 1: Test when `should_use_client_cert` returns True.
+ # We mock the `should_use_client_cert` function to simulate a scenario where
+ # the google-auth library supports automatic mTLS and determines that a
+ # client certificate should be used.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch(
+ "google.auth.transport.mtls.should_use_client_cert", return_value=True
+ ):
+ assert BigtableClient._use_client_cert_effective() is True
+
+ # Test case 2: Test when `should_use_client_cert` returns False.
+ # We mock the `should_use_client_cert` function to simulate a scenario where
+ # the google-auth library supports automatic mTLS and determines that a
+ # client certificate should NOT be used.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch(
+ "google.auth.transport.mtls.should_use_client_cert", return_value=False
+ ):
+ assert BigtableClient._use_client_cert_effective() is False
+
+ # Test case 3: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "true".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ assert BigtableClient._use_client_cert_effective() is True
+
+ # Test case 4: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "false".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}
+ ):
+ assert BigtableClient._use_client_cert_effective() is False
+
+ # Test case 5: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "True".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "True"}):
+ assert BigtableClient._use_client_cert_effective() is True
+
+ # Test case 6: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "False".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "False"}
+ ):
+ assert BigtableClient._use_client_cert_effective() is False
+
+ # Test case 7: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "TRUE".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "TRUE"}):
+ assert BigtableClient._use_client_cert_effective() is True
+
+ # Test case 8: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "FALSE".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "FALSE"}
+ ):
+ assert BigtableClient._use_client_cert_effective() is False
+
+ # Test case 9: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not set.
+ # In this case, the method should return False, which is the default value.
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, clear=True):
+ assert BigtableClient._use_client_cert_effective() is False
+
+ # Test case 10: Test when `should_use_client_cert` is unavailable and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to an invalid value.
+ # The method should raise a ValueError as the environment variable must be either
+ # "true" or "false".
+ if not hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "unsupported"}
+ ):
+ with pytest.raises(ValueError):
+ BigtableClient._use_client_cert_effective()
+
+ # Test case 11: Test when `should_use_client_cert` is available and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to an invalid value.
+ # The method should return False as the environment variable is set to an invalid value.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "unsupported"}
+ ):
+ assert BigtableClient._use_client_cert_effective() is False
+
+ # Test case 12: Test when `should_use_client_cert` is available and the
+ # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is unset. Also,
+ # the GOOGLE_API_CONFIG environment variable is unset.
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": ""}):
+ with mock.patch.dict(os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": ""}):
+ assert BigtableClient._use_client_cert_effective() is False
+
+
def test__get_client_cert_source():
mock_provided_cert_source = mock.Mock()
mock_default_cert_source = mock.Mock()
@@ -539,17 +645,6 @@ def test_bigtable_client_client_options(client_class, transport_class, transport
== "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
- # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
- with mock.patch.dict(
- os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
- ):
- with pytest.raises(ValueError) as excinfo:
- client = client_class(transport=transport_name)
- assert (
- str(excinfo.value)
- == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
-
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
@@ -761,6 +856,119 @@ def test_bigtable_client_get_mtls_endpoint_and_cert_source(client_class):
assert api_endpoint == mock_api_endpoint
assert cert_source is None
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "Unsupported".
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ mock_client_cert_source = mock.Mock()
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source,
+ api_endpoint=mock_api_endpoint,
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is None
+
+ # Test cases for mTLS enablement when GOOGLE_API_USE_CLIENT_CERTIFICATE is unset.
+ test_cases = [
+ (
+ # With workloads present in config, mTLS is enabled.
+ {
+ "version": 1,
+ "cert_configs": {
+ "workload": {
+ "cert_path": "path/to/cert/file",
+ "key_path": "path/to/key/file",
+ }
+ },
+ },
+ mock_client_cert_source,
+ ),
+ (
+ # With workloads not present in config, mTLS is disabled.
+ {
+ "version": 1,
+ "cert_configs": {},
+ },
+ None,
+ ),
+ ]
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ for config_data, expected_cert_source in test_cases:
+ env = os.environ.copy()
+ env.pop("GOOGLE_API_USE_CLIENT_CERTIFICATE", None)
+ with mock.patch.dict(os.environ, env, clear=True):
+ config_filename = "mock_certificate_config.json"
+ config_file_content = json.dumps(config_data)
+ m = mock.mock_open(read_data=config_file_content)
+ with mock.patch("builtins.open", m):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": config_filename}
+ ):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source,
+ api_endpoint=mock_api_endpoint,
+ )
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source(options)
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is expected_cert_source
+
+ # Test cases for mTLS enablement when GOOGLE_API_USE_CLIENT_CERTIFICATE is unset(empty).
+ test_cases = [
+ (
+ # With workloads present in config, mTLS is enabled.
+ {
+ "version": 1,
+ "cert_configs": {
+ "workload": {
+ "cert_path": "path/to/cert/file",
+ "key_path": "path/to/key/file",
+ }
+ },
+ },
+ mock_client_cert_source,
+ ),
+ (
+ # With workloads not present in config, mTLS is disabled.
+ {
+ "version": 1,
+ "cert_configs": {},
+ },
+ None,
+ ),
+ ]
+ if hasattr(google.auth.transport.mtls, "should_use_client_cert"):
+ for config_data, expected_cert_source in test_cases:
+ env = os.environ.copy()
+ env.pop("GOOGLE_API_USE_CLIENT_CERTIFICATE", "")
+ with mock.patch.dict(os.environ, env, clear=True):
+ config_filename = "mock_certificate_config.json"
+ config_file_content = json.dumps(config_data)
+ m = mock.mock_open(read_data=config_file_content)
+ with mock.patch("builtins.open", m):
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": config_filename}
+ ):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source,
+ api_endpoint=mock_api_endpoint,
+ )
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source(options)
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is expected_cert_source
+
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
@@ -811,18 +1019,6 @@ def test_bigtable_client_get_mtls_endpoint_and_cert_source(client_class):
== "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
- # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
- with mock.patch.dict(
- os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
- ):
- with pytest.raises(ValueError) as excinfo:
- client_class.get_mtls_endpoint_and_cert_source()
-
- assert (
- str(excinfo.value)
- == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
- )
-
@pytest.mark.parametrize("client_class", [BigtableClient, BigtableAsyncClient])
@mock.patch.object(
@@ -6854,9 +7050,11 @@ def test_read_rows_routing_parameters_request_1_grpc():
"table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_read_rows_routing_parameters_request_2_grpc():
@@ -6878,9 +7076,11 @@ def test_read_rows_routing_parameters_request_2_grpc():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_read_rows_routing_parameters_request_3_grpc():
@@ -6894,7 +7094,7 @@ def test_read_rows_routing_parameters_request_3_grpc():
call.return_value = iter([bigtable.ReadRowsResponse()])
client.read_rows(
request={
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
@@ -6903,19 +7103,56 @@ def test_read_rows_routing_parameters_request_3_grpc():
_, args, kw = call.mock_calls[0]
request_msg = bigtable.ReadRowsRequest(
**{
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
assert args[0] == request_msg
expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
+
+
+def test_read_rows_routing_parameters_request_4_grpc():
+ client = BigtableClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.read_rows), "__call__") as call:
+ call.return_value = iter([bigtable.ReadRowsResponse()])
+ client.read_rows(
+ request={
+ "materialized_view_name": "projects/sample1/instances/sample2/sample3"
+ }
+ )
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, kw = call.mock_calls[0]
+ request_msg = bigtable.ReadRowsRequest(
+ **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"}
+ )
+
+ assert args[0] == request_msg
+
+ expected_headers = {
+ "name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
+ }
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
+ )
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_sample_row_keys_routing_parameters_request_1_grpc():
@@ -6944,9 +7181,11 @@ def test_sample_row_keys_routing_parameters_request_1_grpc():
"table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_sample_row_keys_routing_parameters_request_2_grpc():
@@ -6968,9 +7207,11 @@ def test_sample_row_keys_routing_parameters_request_2_grpc():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_sample_row_keys_routing_parameters_request_3_grpc():
@@ -6984,7 +7225,7 @@ def test_sample_row_keys_routing_parameters_request_3_grpc():
call.return_value = iter([bigtable.SampleRowKeysResponse()])
client.sample_row_keys(
request={
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
@@ -6993,19 +7234,56 @@ def test_sample_row_keys_routing_parameters_request_3_grpc():
_, args, kw = call.mock_calls[0]
request_msg = bigtable.SampleRowKeysRequest(
**{
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
assert args[0] == request_msg
expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
+
+
+def test_sample_row_keys_routing_parameters_request_4_grpc():
+ client = BigtableClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call:
+ call.return_value = iter([bigtable.SampleRowKeysResponse()])
+ client.sample_row_keys(
+ request={
+ "materialized_view_name": "projects/sample1/instances/sample2/sample3"
+ }
+ )
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, kw = call.mock_calls[0]
+ request_msg = bigtable.SampleRowKeysRequest(
+ **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"}
+ )
+
+ assert args[0] == request_msg
+
+ expected_headers = {
+ "name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
+ }
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
+ )
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_mutate_row_routing_parameters_request_1_grpc():
@@ -7034,9 +7312,11 @@ def test_mutate_row_routing_parameters_request_1_grpc():
"table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_mutate_row_routing_parameters_request_2_grpc():
@@ -7058,9 +7338,11 @@ def test_mutate_row_routing_parameters_request_2_grpc():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_mutate_row_routing_parameters_request_3_grpc():
@@ -7074,7 +7356,7 @@ def test_mutate_row_routing_parameters_request_3_grpc():
call.return_value = bigtable.MutateRowResponse()
client.mutate_row(
request={
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
@@ -7083,19 +7365,21 @@ def test_mutate_row_routing_parameters_request_3_grpc():
_, args, kw = call.mock_calls[0]
request_msg = bigtable.MutateRowRequest(
**{
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
assert args[0] == request_msg
expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_mutate_rows_routing_parameters_request_1_grpc():
@@ -7124,9 +7408,11 @@ def test_mutate_rows_routing_parameters_request_1_grpc():
"table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_mutate_rows_routing_parameters_request_2_grpc():
@@ -7148,9 +7434,11 @@ def test_mutate_rows_routing_parameters_request_2_grpc():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_mutate_rows_routing_parameters_request_3_grpc():
@@ -7164,7 +7452,7 @@ def test_mutate_rows_routing_parameters_request_3_grpc():
call.return_value = iter([bigtable.MutateRowsResponse()])
client.mutate_rows(
request={
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
@@ -7173,19 +7461,21 @@ def test_mutate_rows_routing_parameters_request_3_grpc():
_, args, kw = call.mock_calls[0]
request_msg = bigtable.MutateRowsRequest(
**{
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
assert args[0] == request_msg
expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_check_and_mutate_row_routing_parameters_request_1_grpc():
@@ -7216,9 +7506,11 @@ def test_check_and_mutate_row_routing_parameters_request_1_grpc():
"table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_check_and_mutate_row_routing_parameters_request_2_grpc():
@@ -7242,9 +7534,11 @@ def test_check_and_mutate_row_routing_parameters_request_2_grpc():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_check_and_mutate_row_routing_parameters_request_3_grpc():
@@ -7260,7 +7554,7 @@ def test_check_and_mutate_row_routing_parameters_request_3_grpc():
call.return_value = bigtable.CheckAndMutateRowResponse()
client.check_and_mutate_row(
request={
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
@@ -7269,19 +7563,21 @@ def test_check_and_mutate_row_routing_parameters_request_3_grpc():
_, args, kw = call.mock_calls[0]
request_msg = bigtable.CheckAndMutateRowRequest(
**{
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
assert args[0] == request_msg
expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_ping_and_warm_routing_parameters_request_1_grpc():
@@ -7308,9 +7604,11 @@ def test_ping_and_warm_routing_parameters_request_1_grpc():
"name": "projects/sample1/instances/sample2",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_ping_and_warm_routing_parameters_request_2_grpc():
@@ -7332,9 +7630,11 @@ def test_ping_and_warm_routing_parameters_request_2_grpc():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_read_modify_write_row_routing_parameters_request_1_grpc():
@@ -7365,9 +7665,11 @@ def test_read_modify_write_row_routing_parameters_request_1_grpc():
"table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_read_modify_write_row_routing_parameters_request_2_grpc():
@@ -7393,9 +7695,11 @@ def test_read_modify_write_row_routing_parameters_request_2_grpc():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_read_modify_write_row_routing_parameters_request_3_grpc():
@@ -7411,7 +7715,7 @@ def test_read_modify_write_row_routing_parameters_request_3_grpc():
call.return_value = bigtable.ReadModifyWriteRowResponse()
client.read_modify_write_row(
request={
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
@@ -7420,19 +7724,21 @@ def test_read_modify_write_row_routing_parameters_request_3_grpc():
_, args, kw = call.mock_calls[0]
request_msg = bigtable.ReadModifyWriteRowRequest(
**{
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
assert args[0] == request_msg
expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_prepare_query_routing_parameters_request_1_grpc():
@@ -7461,9 +7767,11 @@ def test_prepare_query_routing_parameters_request_1_grpc():
"name": "projects/sample1/instances/sample2",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_prepare_query_routing_parameters_request_2_grpc():
@@ -7485,9 +7793,11 @@ def test_prepare_query_routing_parameters_request_2_grpc():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_execute_query_routing_parameters_request_1_grpc():
@@ -7511,13 +7821,16 @@ def test_execute_query_routing_parameters_request_1_grpc():
)
assert args[0] == request_msg
+
expected_headers = {
"name": "projects/sample1/instances/sample2",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_execute_query_routing_parameters_request_2_grpc():
@@ -7539,9 +7852,11 @@ def test_execute_query_routing_parameters_request_2_grpc():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_transport_kind_grpc_asyncio():
@@ -7882,9 +8197,11 @@ async def test_read_rows_routing_parameters_request_1_grpc_asyncio():
"table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -7911,9 +8228,11 @@ async def test_read_rows_routing_parameters_request_2_grpc_asyncio():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -7932,7 +8251,7 @@ async def test_read_rows_routing_parameters_request_3_grpc_asyncio():
)
await client.read_rows(
request={
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
@@ -7941,19 +8260,61 @@ async def test_read_rows_routing_parameters_request_3_grpc_asyncio():
_, args, kw = call.mock_calls[0]
request_msg = bigtable.ReadRowsRequest(
**{
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
assert args[0] == request_msg
expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
+
+
+@pytest.mark.asyncio
+async def test_read_rows_routing_parameters_request_4_grpc_asyncio():
+ client = BigtableAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.read_rows), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(
+ side_effect=[bigtable.ReadRowsResponse()]
+ )
+ await client.read_rows(
+ request={
+ "materialized_view_name": "projects/sample1/instances/sample2/sample3"
+ }
+ )
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, kw = call.mock_calls[0]
+ request_msg = bigtable.ReadRowsRequest(
+ **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"}
+ )
+
+ assert args[0] == request_msg
+
+ expected_headers = {
+ "name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
+ }
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
+ )
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -7987,9 +8348,11 @@ async def test_sample_row_keys_routing_parameters_request_1_grpc_asyncio():
"table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -8016,9 +8379,11 @@ async def test_sample_row_keys_routing_parameters_request_2_grpc_asyncio():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -8037,7 +8402,7 @@ async def test_sample_row_keys_routing_parameters_request_3_grpc_asyncio():
)
await client.sample_row_keys(
request={
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
@@ -8046,19 +8411,61 @@ async def test_sample_row_keys_routing_parameters_request_3_grpc_asyncio():
_, args, kw = call.mock_calls[0]
request_msg = bigtable.SampleRowKeysRequest(
**{
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
assert args[0] == request_msg
expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
+
+
+@pytest.mark.asyncio
+async def test_sample_row_keys_routing_parameters_request_4_grpc_asyncio():
+ client = BigtableAsyncClient(
+ credentials=async_anonymous_credentials(),
+ transport="grpc_asyncio",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True)
+ call.return_value.read = mock.AsyncMock(
+ side_effect=[bigtable.SampleRowKeysResponse()]
+ )
+ await client.sample_row_keys(
+ request={
+ "materialized_view_name": "projects/sample1/instances/sample2/sample3"
+ }
+ )
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, kw = call.mock_calls[0]
+ request_msg = bigtable.SampleRowKeysRequest(
+ **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"}
+ )
+
+ assert args[0] == request_msg
+
+ expected_headers = {
+ "name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
+ }
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
+ )
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -8091,9 +8498,11 @@ async def test_mutate_row_routing_parameters_request_1_grpc_asyncio():
"table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -8119,9 +8528,11 @@ async def test_mutate_row_routing_parameters_request_2_grpc_asyncio():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -8139,7 +8550,7 @@ async def test_mutate_row_routing_parameters_request_3_grpc_asyncio():
)
await client.mutate_row(
request={
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
@@ -8148,19 +8559,21 @@ async def test_mutate_row_routing_parameters_request_3_grpc_asyncio():
_, args, kw = call.mock_calls[0]
request_msg = bigtable.MutateRowRequest(
**{
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
assert args[0] == request_msg
expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -8194,9 +8607,11 @@ async def test_mutate_rows_routing_parameters_request_1_grpc_asyncio():
"table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -8223,9 +8638,11 @@ async def test_mutate_rows_routing_parameters_request_2_grpc_asyncio():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -8244,7 +8661,7 @@ async def test_mutate_rows_routing_parameters_request_3_grpc_asyncio():
)
await client.mutate_rows(
request={
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
@@ -8253,19 +8670,21 @@ async def test_mutate_rows_routing_parameters_request_3_grpc_asyncio():
_, args, kw = call.mock_calls[0]
request_msg = bigtable.MutateRowsRequest(
**{
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
assert args[0] == request_msg
expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -8302,9 +8721,11 @@ async def test_check_and_mutate_row_routing_parameters_request_1_grpc_asyncio():
"table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -8334,9 +8755,11 @@ async def test_check_and_mutate_row_routing_parameters_request_2_grpc_asyncio():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -8358,7 +8781,7 @@ async def test_check_and_mutate_row_routing_parameters_request_3_grpc_asyncio():
)
await client.check_and_mutate_row(
request={
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
@@ -8367,19 +8790,21 @@ async def test_check_and_mutate_row_routing_parameters_request_3_grpc_asyncio():
_, args, kw = call.mock_calls[0]
request_msg = bigtable.CheckAndMutateRowRequest(
**{
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
assert args[0] == request_msg
expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -8412,9 +8837,11 @@ async def test_ping_and_warm_routing_parameters_request_1_grpc_asyncio():
"name": "projects/sample1/instances/sample2",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -8440,9 +8867,11 @@ async def test_ping_and_warm_routing_parameters_request_2_grpc_asyncio():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -8477,9 +8906,11 @@ async def test_read_modify_write_row_routing_parameters_request_1_grpc_asyncio()
"table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -8509,9 +8940,11 @@ async def test_read_modify_write_row_routing_parameters_request_2_grpc_asyncio()
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -8531,7 +8964,7 @@ async def test_read_modify_write_row_routing_parameters_request_3_grpc_asyncio()
)
await client.read_modify_write_row(
request={
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
@@ -8540,19 +8973,21 @@ async def test_read_modify_write_row_routing_parameters_request_3_grpc_asyncio()
_, args, kw = call.mock_calls[0]
request_msg = bigtable.ReadModifyWriteRowRequest(
**{
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
assert args[0] == request_msg
expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -8587,9 +9022,11 @@ async def test_prepare_query_routing_parameters_request_1_grpc_asyncio():
"name": "projects/sample1/instances/sample2",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -8617,9 +9054,11 @@ async def test_prepare_query_routing_parameters_request_2_grpc_asyncio():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -8653,9 +9092,11 @@ async def test_execute_query_routing_parameters_request_1_grpc_asyncio():
"name": "projects/sample1/instances/sample2",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
@pytest.mark.asyncio
@@ -8682,9 +9123,11 @@ async def test_execute_query_routing_parameters_request_2_grpc_asyncio():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_transport_kind_rest():
@@ -10335,9 +10778,11 @@ def test_read_rows_routing_parameters_request_1_rest():
"table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_read_rows_routing_parameters_request_2_rest():
@@ -10358,9 +10803,11 @@ def test_read_rows_routing_parameters_request_2_rest():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_read_rows_routing_parameters_request_3_rest():
@@ -10373,7 +10820,7 @@ def test_read_rows_routing_parameters_request_3_rest():
with mock.patch.object(type(client.transport.read_rows), "__call__") as call:
client.read_rows(
request={
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
@@ -10382,19 +10829,55 @@ def test_read_rows_routing_parameters_request_3_rest():
_, args, kw = call.mock_calls[0]
request_msg = bigtable.ReadRowsRequest(
**{
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
+ }
+ )
+
+ assert args[0] == request_msg
+
+ expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3",
+ "app_profile_id": "",
+ }
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
+ )
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
+
+
+def test_read_rows_routing_parameters_request_4_rest():
+ client = BigtableClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.read_rows), "__call__") as call:
+ client.read_rows(
+ request={
+ "materialized_view_name": "projects/sample1/instances/sample2/sample3"
}
)
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, kw = call.mock_calls[0]
+ request_msg = bigtable.ReadRowsRequest(
+ **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"}
+ )
+
assert args[0] == request_msg
expected_headers = {
+ "name": "projects/sample1/instances/sample2",
"app_profile_id": "",
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_sample_row_keys_routing_parameters_request_1_rest():
@@ -10422,9 +10905,11 @@ def test_sample_row_keys_routing_parameters_request_1_rest():
"table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_sample_row_keys_routing_parameters_request_2_rest():
@@ -10445,9 +10930,11 @@ def test_sample_row_keys_routing_parameters_request_2_rest():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_sample_row_keys_routing_parameters_request_3_rest():
@@ -10460,7 +10947,7 @@ def test_sample_row_keys_routing_parameters_request_3_rest():
with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call:
client.sample_row_keys(
request={
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
@@ -10469,19 +10956,55 @@ def test_sample_row_keys_routing_parameters_request_3_rest():
_, args, kw = call.mock_calls[0]
request_msg = bigtable.SampleRowKeysRequest(
**{
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
assert args[0] == request_msg
expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
+
+
+def test_sample_row_keys_routing_parameters_request_4_rest():
+ client = BigtableClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the actual call, and fake the request.
+ with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call:
+ client.sample_row_keys(
+ request={
+ "materialized_view_name": "projects/sample1/instances/sample2/sample3"
+ }
+ )
+
+ # Establish that the underlying stub method was called.
+ call.assert_called()
+ _, args, kw = call.mock_calls[0]
+ request_msg = bigtable.SampleRowKeysRequest(
+ **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"}
+ )
+
+ assert args[0] == request_msg
+
+ expected_headers = {
+ "name": "projects/sample1/instances/sample2",
+ "app_profile_id": "",
+ }
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
+ )
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_mutate_row_routing_parameters_request_1_rest():
@@ -10509,9 +11032,11 @@ def test_mutate_row_routing_parameters_request_1_rest():
"table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_mutate_row_routing_parameters_request_2_rest():
@@ -10532,9 +11057,11 @@ def test_mutate_row_routing_parameters_request_2_rest():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_mutate_row_routing_parameters_request_3_rest():
@@ -10547,7 +11074,7 @@ def test_mutate_row_routing_parameters_request_3_rest():
with mock.patch.object(type(client.transport.mutate_row), "__call__") as call:
client.mutate_row(
request={
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
@@ -10556,19 +11083,21 @@ def test_mutate_row_routing_parameters_request_3_rest():
_, args, kw = call.mock_calls[0]
request_msg = bigtable.MutateRowRequest(
**{
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
assert args[0] == request_msg
expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_mutate_rows_routing_parameters_request_1_rest():
@@ -10596,9 +11125,11 @@ def test_mutate_rows_routing_parameters_request_1_rest():
"table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_mutate_rows_routing_parameters_request_2_rest():
@@ -10619,9 +11150,11 @@ def test_mutate_rows_routing_parameters_request_2_rest():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_mutate_rows_routing_parameters_request_3_rest():
@@ -10634,7 +11167,7 @@ def test_mutate_rows_routing_parameters_request_3_rest():
with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call:
client.mutate_rows(
request={
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
@@ -10643,19 +11176,21 @@ def test_mutate_rows_routing_parameters_request_3_rest():
_, args, kw = call.mock_calls[0]
request_msg = bigtable.MutateRowsRequest(
**{
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
assert args[0] == request_msg
expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_check_and_mutate_row_routing_parameters_request_1_rest():
@@ -10685,9 +11220,11 @@ def test_check_and_mutate_row_routing_parameters_request_1_rest():
"table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_check_and_mutate_row_routing_parameters_request_2_rest():
@@ -10710,9 +11247,11 @@ def test_check_and_mutate_row_routing_parameters_request_2_rest():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_check_and_mutate_row_routing_parameters_request_3_rest():
@@ -10727,7 +11266,7 @@ def test_check_and_mutate_row_routing_parameters_request_3_rest():
) as call:
client.check_and_mutate_row(
request={
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
@@ -10736,19 +11275,21 @@ def test_check_and_mutate_row_routing_parameters_request_3_rest():
_, args, kw = call.mock_calls[0]
request_msg = bigtable.CheckAndMutateRowRequest(
**{
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
assert args[0] == request_msg
expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_ping_and_warm_routing_parameters_request_1_rest():
@@ -10774,9 +11315,11 @@ def test_ping_and_warm_routing_parameters_request_1_rest():
"name": "projects/sample1/instances/sample2",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_ping_and_warm_routing_parameters_request_2_rest():
@@ -10797,9 +11340,11 @@ def test_ping_and_warm_routing_parameters_request_2_rest():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_read_modify_write_row_routing_parameters_request_1_rest():
@@ -10829,9 +11374,11 @@ def test_read_modify_write_row_routing_parameters_request_1_rest():
"table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_read_modify_write_row_routing_parameters_request_2_rest():
@@ -10856,9 +11403,11 @@ def test_read_modify_write_row_routing_parameters_request_2_rest():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_read_modify_write_row_routing_parameters_request_3_rest():
@@ -10873,7 +11422,7 @@ def test_read_modify_write_row_routing_parameters_request_3_rest():
) as call:
client.read_modify_write_row(
request={
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
@@ -10882,19 +11431,21 @@ def test_read_modify_write_row_routing_parameters_request_3_rest():
_, args, kw = call.mock_calls[0]
request_msg = bigtable.ReadModifyWriteRowRequest(
**{
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4"
+ "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4"
}
)
assert args[0] == request_msg
expected_headers = {
+ "table_name": "projects/sample1/instances/sample2/tables/sample3",
"app_profile_id": "",
- "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_prepare_query_routing_parameters_request_1_rest():
@@ -10922,9 +11473,11 @@ def test_prepare_query_routing_parameters_request_1_rest():
"name": "projects/sample1/instances/sample2",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_prepare_query_routing_parameters_request_2_rest():
@@ -10945,9 +11498,11 @@ def test_prepare_query_routing_parameters_request_2_rest():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_execute_query_routing_parameters_request_1_rest():
@@ -10975,9 +11530,11 @@ def test_execute_query_routing_parameters_request_1_rest():
"name": "projects/sample1/instances/sample2",
"app_profile_id": "",
}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_execute_query_routing_parameters_request_2_rest():
@@ -10998,9 +11555,11 @@ def test_execute_query_routing_parameters_request_2_rest():
assert args[0] == request_msg
expected_headers = {"app_profile_id": "sample1"}
- assert (
- gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"]
+ # assert the expected headers are present, in any order
+ routing_string = next(
+ iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"])
)
+ assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])
def test_transport_grpc_default():
@@ -11393,6 +11952,7 @@ def test_bigtable_grpc_asyncio_transport_channel():
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize(
"transport_class",
[transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport],
diff --git a/tests/unit/v2_client/test_backup.py b/tests/unit/v2_client/test_backup.py
index 9882ca339..a5d205af6 100644
--- a/tests/unit/v2_client/test_backup.py
+++ b/tests/unit/v2_client/test_backup.py
@@ -19,7 +19,6 @@
import pytest
from ._testing import _make_credentials
-from google.cloud._helpers import UTC
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
@@ -38,13 +37,13 @@
def _make_timestamp():
- return datetime.datetime.utcnow().replace(tzinfo=UTC)
+ return datetime.datetime.now(datetime.timezone.utc)
def _make_table_admin_client():
- from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient
+ from google.cloud.bigtable_admin_v2 import BaseBigtableTableAdminClient
- return mock.create_autospec(BigtableTableAdminClient, instance=True)
+ return mock.create_autospec(BaseBigtableTableAdminClient, instance=True)
def _make_backup(*args, **kwargs):
@@ -735,7 +734,7 @@ def test_backup_restore_w_grpc_error():
client = _Client()
api = client.table_admin_client = _make_table_admin_client()
- api.restore_table.side_effect = Unknown("testing")
+ api._restore_table.side_effect = Unknown("testing")
timestamp = _make_timestamp()
backup = _make_backup(
@@ -749,7 +748,7 @@ def test_backup_restore_w_grpc_error():
with pytest.raises(GoogleAPICallError):
backup.restore(TABLE_ID)
- api.restore_table.assert_called_once_with(
+ api._restore_table.assert_called_once_with(
request={"parent": INSTANCE_NAME, "table_id": TABLE_ID, "backup": BACKUP_NAME}
)
@@ -772,7 +771,7 @@ def _restore_helper(instance_id=None, instance_name=None):
op_future = object()
client = _Client()
api = client.table_admin_client = _make_table_admin_client()
- api.restore_table.return_value = op_future
+ api._restore_table.return_value = op_future
timestamp = _make_timestamp()
backup = _make_backup(
@@ -787,14 +786,14 @@ def _restore_helper(instance_id=None, instance_name=None):
assert backup._cluster == CLUSTER_ID
assert future is op_future
- api.restore_table.assert_called_once_with(
+ api._restore_table.assert_called_once_with(
request={
"parent": instance_name or INSTANCE_NAME,
"table_id": TABLE_ID,
"backup": BACKUP_NAME,
}
)
- api.restore_table.reset_mock()
+ api._restore_table.reset_mock()
def test_backup_restore_default():
@@ -808,7 +807,7 @@ def test_backup_restore_to_another_instance():
def test_backup_get_iam_policy():
from google.cloud.bigtable.client import Client
from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- BigtableTableAdminClient,
+ BaseBigtableTableAdminClient,
)
from google.iam.v1 import policy_pb2
from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
@@ -825,7 +824,7 @@ def test_backup_get_iam_policy():
bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}]
iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
- table_api = mock.create_autospec(BigtableTableAdminClient)
+ table_api = mock.create_autospec(BaseBigtableTableAdminClient)
client._table_admin_client = table_api
table_api.get_iam_policy.return_value = iam_policy
@@ -844,7 +843,7 @@ def test_backup_get_iam_policy():
def test_backup_set_iam_policy():
from google.cloud.bigtable.client import Client
from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- BigtableTableAdminClient,
+ BaseBigtableTableAdminClient,
)
from google.iam.v1 import policy_pb2
from google.cloud.bigtable.policy import Policy
@@ -862,7 +861,7 @@ def test_backup_set_iam_policy():
bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}]
iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
- table_api = mock.create_autospec(BigtableTableAdminClient)
+ table_api = mock.create_autospec(BaseBigtableTableAdminClient)
client._table_admin_client = table_api
table_api.set_iam_policy.return_value = iam_policy_pb
@@ -889,7 +888,7 @@ def test_backup_set_iam_policy():
def test_backup_test_iam_permissions():
from google.cloud.bigtable.client import Client
from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- BigtableTableAdminClient,
+ BaseBigtableTableAdminClient,
)
from google.iam.v1 import iam_policy_pb2
@@ -903,7 +902,7 @@ def test_backup_test_iam_permissions():
response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions)
- table_api = mock.create_autospec(BigtableTableAdminClient)
+ table_api = mock.create_autospec(BaseBigtableTableAdminClient)
table_api.test_iam_permissions.return_value = response
client._table_admin_client = table_api
diff --git a/tests/unit/v2_client/test_client.py b/tests/unit/v2_client/test_client.py
index 4338f8553..a4fc0f9cb 100644
--- a/tests/unit/v2_client/test_client.py
+++ b/tests/unit/v2_client/test_client.py
@@ -449,18 +449,18 @@ def test_client_table_admin_client_not_initialized_no_admin_flag():
def test_client_table_admin_client_not_initialized_w_admin_flag():
- from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient
+ from google.cloud.bigtable_admin_v2 import BaseBigtableTableAdminClient
credentials = _make_credentials()
client = _make_client(project=PROJECT, credentials=credentials, admin=True)
table_admin_client = client.table_admin_client
- assert isinstance(table_admin_client, BigtableTableAdminClient)
+ assert isinstance(table_admin_client, BaseBigtableTableAdminClient)
assert client._table_admin_client is table_admin_client
def test_client_table_admin_client_not_initialized_w_client_info():
- from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient
+ from google.cloud.bigtable_admin_v2 import BaseBigtableTableAdminClient
credentials = _make_credentials()
client_info = mock.Mock()
@@ -472,7 +472,7 @@ def test_client_table_admin_client_not_initialized_w_client_info():
)
table_admin_client = client.table_admin_client
- assert isinstance(table_admin_client, BigtableTableAdminClient)
+ assert isinstance(table_admin_client, BaseBigtableTableAdminClient)
assert client._client_info is client_info
assert client._table_admin_client is table_admin_client
@@ -488,7 +488,7 @@ def test_client_table_admin_client_not_initialized_w_client_options():
)
client._create_gapic_client_channel = mock.Mock()
- patch = mock.patch("google.cloud.bigtable_admin_v2.BigtableTableAdminClient")
+ patch = mock.patch("google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient")
with patch as mocked:
table_admin_client = client.table_admin_client
diff --git a/tests/unit/v2_client/test_cluster.py b/tests/unit/v2_client/test_cluster.py
index 65ed47437..a21104549 100644
--- a/tests/unit/v2_client/test_cluster.py
+++ b/tests/unit/v2_client/test_cluster.py
@@ -420,7 +420,7 @@ def test_cluster_create():
from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2
from google.cloud.bigtable.enums import StorageType
- NOW = datetime.datetime.utcnow()
+ NOW = datetime.datetime.now(datetime.timezone.utc)
NOW_PB = _datetime_to_pb_timestamp(NOW)
credentials = _make_credentials()
client = _make_client(project=PROJECT, credentials=credentials, admin=True)
@@ -475,7 +475,7 @@ def test_cluster_create_w_cmek():
from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2
from google.cloud.bigtable.enums import StorageType
- NOW = datetime.datetime.utcnow()
+ NOW = datetime.datetime.now(datetime.timezone.utc)
NOW_PB = _datetime_to_pb_timestamp(NOW)
credentials = _make_credentials()
client = _make_client(project=PROJECT, credentials=credentials, admin=True)
@@ -535,7 +535,7 @@ def test_cluster_create_w_autoscaling():
from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2
from google.cloud.bigtable.enums import StorageType
- NOW = datetime.datetime.utcnow()
+ NOW = datetime.datetime.now(datetime.timezone.utc)
NOW_PB = _datetime_to_pb_timestamp(NOW)
credentials = _make_credentials()
client = _make_client(project=PROJECT, credentials=credentials, admin=True)
@@ -602,7 +602,7 @@ def test_cluster_update():
)
from google.cloud.bigtable.enums import StorageType
- NOW = datetime.datetime.utcnow()
+ NOW = datetime.datetime.now(datetime.timezone.utc)
NOW_PB = _datetime_to_pb_timestamp(NOW)
credentials = _make_credentials()
@@ -669,7 +669,7 @@ def test_cluster_update_w_autoscaling():
)
from google.cloud.bigtable.enums import StorageType
- NOW = datetime.datetime.utcnow()
+ NOW = datetime.datetime.now(datetime.timezone.utc)
NOW_PB = _datetime_to_pb_timestamp(NOW)
credentials = _make_credentials()
@@ -728,7 +728,7 @@ def test_cluster_update_w_partial_autoscaling_config():
)
from google.cloud.bigtable.enums import StorageType
- NOW = datetime.datetime.utcnow()
+ NOW = datetime.datetime.now(datetime.timezone.utc)
NOW_PB = _datetime_to_pb_timestamp(NOW)
credentials = _make_credentials()
@@ -812,7 +812,7 @@ def test_cluster_update_w_both_manual_and_autoscaling():
)
from google.cloud.bigtable.enums import StorageType
- NOW = datetime.datetime.utcnow()
+ NOW = datetime.datetime.now(datetime.timezone.utc)
NOW_PB = _datetime_to_pb_timestamp(NOW)
credentials = _make_credentials()
@@ -873,7 +873,7 @@ def test_cluster_disable_autoscaling():
from google.cloud.bigtable.instance import Instance
from google.cloud.bigtable.enums import StorageType
- NOW = datetime.datetime.utcnow()
+ NOW = datetime.datetime.now(datetime.timezone.utc)
NOW_PB = _datetime_to_pb_timestamp(NOW)
credentials = _make_credentials()
client = _make_client(project=PROJECT, credentials=credentials, admin=True)
diff --git a/tests/unit/v2_client/test_column_family.py b/tests/unit/v2_client/test_column_family.py
index e4f74e264..2480e11cb 100644
--- a/tests/unit/v2_client/test_column_family.py
+++ b/tests/unit/v2_client/test_column_family.py
@@ -338,7 +338,7 @@ def _create_test_helper(gc_rule=None):
)
from ._testing import _FakeStub
from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- BigtableTableAdminClient,
+ BaseBigtableTableAdminClient,
)
project_id = "project-id"
@@ -357,7 +357,7 @@ def _create_test_helper(gc_rule=None):
+ table_id
)
- api = mock.create_autospec(BigtableTableAdminClient)
+ api = mock.create_autospec(BaseBigtableTableAdminClient)
credentials = _make_credentials()
client = _make_client(project=project_id, credentials=credentials, admin=True)
@@ -409,7 +409,7 @@ def _update_test_helper(gc_rule=None):
bigtable_table_admin as table_admin_v2_pb2,
)
from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- BigtableTableAdminClient,
+ BaseBigtableTableAdminClient,
)
project_id = "project-id"
@@ -428,7 +428,7 @@ def _update_test_helper(gc_rule=None):
+ table_id
)
- api = mock.create_autospec(BigtableTableAdminClient)
+ api = mock.create_autospec(BaseBigtableTableAdminClient)
credentials = _make_credentials()
client = _make_client(project=project_id, credentials=credentials, admin=True)
table = _Table(table_name, client=client)
@@ -480,7 +480,7 @@ def test_column_family_delete():
)
from ._testing import _FakeStub
from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- BigtableTableAdminClient,
+ BaseBigtableTableAdminClient,
)
project_id = "project-id"
@@ -499,7 +499,7 @@ def test_column_family_delete():
+ table_id
)
- api = mock.create_autospec(BigtableTableAdminClient)
+ api = mock.create_autospec(BaseBigtableTableAdminClient)
credentials = _make_credentials()
client = _make_client(project=project_id, credentials=credentials, admin=True)
table = _Table(table_name, client=client)
diff --git a/tests/unit/v2_client/test_instance.py b/tests/unit/v2_client/test_instance.py
index de6844a16..c5ef9c9b8 100644
--- a/tests/unit/v2_client/test_instance.py
+++ b/tests/unit/v2_client/test_instance.py
@@ -277,7 +277,7 @@ def _instance_api_response_for_create():
)
from google.cloud.bigtable_admin_v2.types import instance
- NOW = datetime.datetime.utcnow()
+ NOW = datetime.datetime.now(datetime.timezone.utc)
NOW_PB = _datetime_to_pb_timestamp(NOW)
metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB)
type_url = "type.googleapis.com/{}".format(
@@ -503,7 +503,7 @@ def _instance_api_response_for_update():
)
from google.cloud.bigtable_admin_v2.types import instance
- NOW = datetime.datetime.utcnow()
+ NOW = datetime.datetime.now(datetime.timezone.utc)
NOW_PB = _datetime_to_pb_timestamp(NOW)
metadata = messages_v2_pb2.UpdateInstanceMetadata(request_time=NOW_PB)
type_url = "type.googleapis.com/{}".format(
@@ -806,7 +806,7 @@ def _list_tables_helper(table_name=None):
bigtable_table_admin as table_messages_v1_pb2,
)
from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- BigtableTableAdminClient,
+ BaseBigtableTableAdminClient,
)
credentials = _make_credentials()
@@ -816,7 +816,7 @@ def _list_tables_helper(table_name=None):
instance_api = client._instance_admin_client = _make_instance_admin_api()
instance_api.instance_path.return_value = "projects/project/instances/instance-id"
table_api = client._table_admin_client = mock.create_autospec(
- BigtableTableAdminClient
+ BaseBigtableTableAdminClient
)
if table_name is None:
table_name = TABLE_NAME
diff --git a/tests/unit/v2_client/test_table.py b/tests/unit/v2_client/test_table.py
index 032363bd7..6b31a5e23 100644
--- a/tests/unit/v2_client/test_table.py
+++ b/tests/unit/v2_client/test_table.py
@@ -349,7 +349,7 @@ def _make_table_api():
client as bigtable_table_admin,
)
- return mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
+ return mock.create_autospec(bigtable_table_admin.BaseBigtableTableAdminClient)
def _create_table_helper(split_keys=[], column_families={}):
@@ -1378,13 +1378,12 @@ def test_table_backup_factory_defaults():
def test_table_backup_factory_non_defaults():
import datetime
- from google.cloud._helpers import UTC
from google.cloud.bigtable.backup import Backup
from google.cloud.bigtable.instance import Instance
instance = Instance(INSTANCE_ID, None)
table = _make_table(TABLE_ID, instance)
- timestamp = datetime.datetime.utcnow().replace(tzinfo=UTC)
+ timestamp = datetime.datetime.now(datetime.timezone.utc)
backup = table.backup(
BACKUP_ID,
cluster_id=CLUSTER_ID,
@@ -1482,7 +1481,7 @@ def _table_restore_helper(backup_name=None):
table = _make_table(TABLE_ID, instance)
table_api = client._table_admin_client = _make_table_api()
- table_api.restore_table.return_value = op_future
+ table_api._restore_table.return_value = op_future
if backup_name:
future = table.restore(TABLE_ID, backup_name=BACKUP_NAME)
@@ -1496,7 +1495,7 @@ def _table_restore_helper(backup_name=None):
"table_id": TABLE_ID,
"backup": BACKUP_NAME,
}
- table_api.restore_table.assert_called_once_with(request=expected_request)
+ table_api._restore_table.assert_called_once_with(request=expected_request)
def test_table_restore_table_w_backup_id():