diff --git a/.coveragerc b/.coveragerc
index 1ba5bb57d..9b0751055 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -19,6 +19,7 @@
branch = True
omit =
google/cloud/__init__.py
+ google/__init__.py
[report]
fail_under = 100
diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml
index 2567653c0..8cb43804d 100644
--- a/.github/.OwlBot.lock.yaml
+++ b/.github/.OwlBot.lock.yaml
@@ -1,3 +1,16 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
docker:
image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest
- digest: sha256:87eee22d276554e4e52863ec9b1cb6a7245815dfae20439712bf644348215a5a
+ digest: sha256:ed1f9983d5a935a89fe8085e8bb97d94e41015252c5b6c9771257cf8624367e6
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index dc38a1e1d..2f1fee904 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -3,9 +3,10 @@
#
# For syntax help see:
# https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax
+# Note: This file is autogenerated. To make changes to the codeowner team, please update .repo-metadata.json.
+# @googleapis/yoshi-python @googleapis/api-bigtable are the default owners for changes in this repo
+* @googleapis/yoshi-python @googleapis/api-bigtable
-# The api-bigtable team is the default owner for anything not
-# explicitly taken by someone else.
-* @googleapis/api-bigtable @googleapis/yoshi-python
-/samples/ @googleapis/api-bigtable @googleapis/python-samples-owners
+# @googleapis/python-samples-reviewers @googleapis/api-bigtable are the default owners for samples changes
+/samples/ @googleapis/python-samples-reviewers @googleapis/api-bigtable
diff --git a/.github/release-please.yml b/.github/release-please.yml
index 4507ad059..466597e5b 100644
--- a/.github/release-please.yml
+++ b/.github/release-please.yml
@@ -1 +1,2 @@
releaseType: python
+handleGHRelease: true
diff --git a/.github/release-trigger.yml b/.github/release-trigger.yml
new file mode 100644
index 000000000..d4ca94189
--- /dev/null
+++ b/.github/release-trigger.yml
@@ -0,0 +1 @@
+enabled: true
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
new file mode 100644
index 000000000..f7b8344c4
--- /dev/null
+++ b/.github/workflows/docs.yml
@@ -0,0 +1,38 @@
+on:
+ pull_request:
+ branches:
+ - main
+name: docs
+jobs:
+ docs:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Setup Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: "3.10"
+ - name: Install nox
+ run: |
+ python -m pip install --upgrade setuptools pip wheel
+ python -m pip install nox
+ - name: Run docs
+ run: |
+ nox -s docs
+ docfx:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Setup Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: "3.10"
+ - name: Install nox
+ run: |
+ python -m pip install --upgrade setuptools pip wheel
+ python -m pip install nox
+ - name: Run docfx
+ run: |
+ nox -s docfx
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
new file mode 100644
index 000000000..1e8b05c3d
--- /dev/null
+++ b/.github/workflows/lint.yml
@@ -0,0 +1,25 @@
+on:
+ pull_request:
+ branches:
+ - main
+name: lint
+jobs:
+ lint:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Setup Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: "3.10"
+ - name: Install nox
+ run: |
+ python -m pip install --upgrade setuptools pip wheel
+ python -m pip install nox
+ - name: Run lint
+ run: |
+ nox -s lint
+ - name: Run lint_setup_py
+ run: |
+ nox -s lint_setup_py
diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml
new file mode 100644
index 000000000..5a0f0e090
--- /dev/null
+++ b/.github/workflows/mypy.yml
@@ -0,0 +1,22 @@
+on:
+ pull_request:
+ branches:
+ - main
+name: mypy
+jobs:
+ mypy:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Setup Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: "3.8"
+ - name: Install nox
+ run: |
+ python -m pip install --upgrade setuptools pip wheel
+ python -m pip install nox
+ - name: Run mypy
+ run: |
+ nox -s mypy
diff --git a/.github/workflows/system_emulated.yml b/.github/workflows/system_emulated.yml
new file mode 100644
index 000000000..480ae98a4
--- /dev/null
+++ b/.github/workflows/system_emulated.yml
@@ -0,0 +1,29 @@
+name: "Run systests on emulator"
+on:
+ pull_request:
+ branches:
+ - main
+
+jobs:
+
+ run-systests:
+ runs-on: ubuntu-20.04
+
+ steps:
+
+ - name: Checkout
+ uses: actions/checkout@v2
+
+ - name: Setup Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: '3.8'
+
+ - name: Setup GCloud SDK
+ uses: google-github-actions/setup-gcloud@v0.5.0
+
+ - name: Install / run Nox
+ run: |
+ python -m pip install --upgrade setuptools pip
+ python -m pip install nox
+ nox -s system_emulated
diff --git a/.github/workflows/unittest.yml b/.github/workflows/unittest.yml
new file mode 100644
index 000000000..074ee2504
--- /dev/null
+++ b/.github/workflows/unittest.yml
@@ -0,0 +1,57 @@
+on:
+ pull_request:
+ branches:
+ - main
+name: unittest
+jobs:
+ unit:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python: ['3.6', '3.7', '3.8', '3.9', '3.10']
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Setup Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python }}
+ - name: Install nox
+ run: |
+ python -m pip install --upgrade setuptools pip wheel
+ python -m pip install nox
+ - name: Run unit tests
+ env:
+ COVERAGE_FILE: .coverage-${{ matrix.python }}
+ run: |
+ nox -s unit-${{ matrix.python }}
+ - name: Upload coverage results
+ uses: actions/upload-artifact@v2
+ with:
+ name: coverage-artifacts
+ path: .coverage-${{ matrix.python }}
+
+ cover:
+ runs-on: ubuntu-latest
+ needs:
+ - unit
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Setup Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: "3.10"
+ - name: Install coverage
+ run: |
+ python -m pip install --upgrade setuptools pip wheel
+ python -m pip install coverage
+ - name: Download coverage results
+ uses: actions/download-artifact@v2
+ with:
+ name: coverage-artifacts
+ path: .coverage-results/
+ - name: Report coverage results
+ run: |
+ coverage combine .coverage-results/.coverage*
+ coverage report --show-missing --fail-under=100
diff --git a/.kokoro/docs/common.cfg b/.kokoro/docs/common.cfg
index 08aac45ad..9b8937c57 100644
--- a/.kokoro/docs/common.cfg
+++ b/.kokoro/docs/common.cfg
@@ -30,6 +30,7 @@ env_vars: {
env_vars: {
key: "V2_STAGING_BUCKET"
+ # Push google cloud library docs to the Cloud RAD bucket `docs-staging-v2`
value: "docs-staging-v2"
}
diff --git a/.kokoro/release.sh b/.kokoro/release.sh
index d3ffac5f6..f0cb9d5db 100755
--- a/.kokoro/release.sh
+++ b/.kokoro/release.sh
@@ -26,7 +26,7 @@ python3 -m pip install --upgrade twine wheel setuptools
export PYTHONUNBUFFERED=1
# Move into the package, build the distribution and upload.
-TWINE_PASSWORD=$(cat "${KOKORO_GFILE_DIR}/secret_manager/google-cloud-pypi-token")
+TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_google-cloud-pypi-token-keystore-1")
cd github/python-bigtable
python3 setup.py sdist bdist_wheel
twine upload --username __token__ --password "${TWINE_PASSWORD}" dist/*
diff --git a/.kokoro/release/common.cfg b/.kokoro/release/common.cfg
index d964a8f06..8477e4ca6 100644
--- a/.kokoro/release/common.cfg
+++ b/.kokoro/release/common.cfg
@@ -23,8 +23,18 @@ env_vars: {
value: "github/python-bigtable/.kokoro/release.sh"
}
+# Fetch PyPI password
+before_action {
+ fetch_keystore {
+ keystore_resource {
+ keystore_config_id: 73713
+ keyname: "google-cloud-pypi-token-keystore-1"
+ }
+ }
+}
+
# Tokens needed to report release status back to GitHub
env_vars: {
key: "SECRET_MANAGER_KEYS"
- value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem,google-cloud-pypi-token"
+ value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem"
}
diff --git a/.kokoro/samples/lint/common.cfg b/.kokoro/samples/lint/common.cfg
index b597cb22f..54b069fd0 100644
--- a/.kokoro/samples/lint/common.cfg
+++ b/.kokoro/samples/lint/common.cfg
@@ -31,4 +31,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
# Use the trampoline script to run in docker.
-build_file: "python-bigtable/.kokoro/trampoline.sh"
\ No newline at end of file
+build_file: "python-bigtable/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/python3.10/common.cfg b/.kokoro/samples/python3.10/common.cfg
new file mode 100644
index 000000000..0dc18096b
--- /dev/null
+++ b/.kokoro/samples/python3.10/common.cfg
@@ -0,0 +1,40 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Specify which tests to run
+env_vars: {
+ key: "RUN_TESTS_SESSION"
+ value: "py-3.10"
+}
+
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-310"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigtable/.kokoro/test-samples.sh"
+}
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker"
+}
+
+# Download secrets for samples
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "python-bigtable/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/python3.10/continuous.cfg b/.kokoro/samples/python3.10/continuous.cfg
new file mode 100644
index 000000000..a1c8d9759
--- /dev/null
+++ b/.kokoro/samples/python3.10/continuous.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/.kokoro/samples/python3.10/periodic-head.cfg b/.kokoro/samples/python3.10/periodic-head.cfg
new file mode 100644
index 000000000..be25a34f9
--- /dev/null
+++ b/.kokoro/samples/python3.10/periodic-head.cfg
@@ -0,0 +1,11 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigtable/.kokoro/test-samples-against-head.sh"
+}
diff --git a/.kokoro/samples/python3.10/periodic.cfg b/.kokoro/samples/python3.10/periodic.cfg
new file mode 100644
index 000000000..71cd1e597
--- /dev/null
+++ b/.kokoro/samples/python3.10/periodic.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "False"
+}
diff --git a/.kokoro/samples/python3.10/presubmit.cfg b/.kokoro/samples/python3.10/presubmit.cfg
new file mode 100644
index 000000000..a1c8d9759
--- /dev/null
+++ b/.kokoro/samples/python3.10/presubmit.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/.kokoro/samples/python3.6/common.cfg b/.kokoro/samples/python3.6/common.cfg
index f71693fca..21e188507 100644
--- a/.kokoro/samples/python3.6/common.cfg
+++ b/.kokoro/samples/python3.6/common.cfg
@@ -37,4 +37,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
# Use the trampoline script to run in docker.
-build_file: "python-bigtable/.kokoro/trampoline.sh"
\ No newline at end of file
+build_file: "python-bigtable/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/python3.6/periodic.cfg b/.kokoro/samples/python3.6/periodic.cfg
index 50fec9649..71cd1e597 100644
--- a/.kokoro/samples/python3.6/periodic.cfg
+++ b/.kokoro/samples/python3.6/periodic.cfg
@@ -3,4 +3,4 @@
env_vars: {
key: "INSTALL_LIBRARY_FROM_SOURCE"
value: "False"
-}
\ No newline at end of file
+}
diff --git a/.kokoro/samples/python3.7/common.cfg b/.kokoro/samples/python3.7/common.cfg
index 5fa465fda..7db66bb86 100644
--- a/.kokoro/samples/python3.7/common.cfg
+++ b/.kokoro/samples/python3.7/common.cfg
@@ -37,4 +37,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
# Use the trampoline script to run in docker.
-build_file: "python-bigtable/.kokoro/trampoline.sh"
\ No newline at end of file
+build_file: "python-bigtable/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/python3.7/periodic.cfg b/.kokoro/samples/python3.7/periodic.cfg
index 50fec9649..71cd1e597 100644
--- a/.kokoro/samples/python3.7/periodic.cfg
+++ b/.kokoro/samples/python3.7/periodic.cfg
@@ -3,4 +3,4 @@
env_vars: {
key: "INSTALL_LIBRARY_FROM_SOURCE"
value: "False"
-}
\ No newline at end of file
+}
diff --git a/.kokoro/samples/python3.8/common.cfg b/.kokoro/samples/python3.8/common.cfg
index f3a6fa7ec..482008891 100644
--- a/.kokoro/samples/python3.8/common.cfg
+++ b/.kokoro/samples/python3.8/common.cfg
@@ -37,4 +37,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
# Use the trampoline script to run in docker.
-build_file: "python-bigtable/.kokoro/trampoline.sh"
\ No newline at end of file
+build_file: "python-bigtable/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/python3.8/periodic.cfg b/.kokoro/samples/python3.8/periodic.cfg
index 50fec9649..71cd1e597 100644
--- a/.kokoro/samples/python3.8/periodic.cfg
+++ b/.kokoro/samples/python3.8/periodic.cfg
@@ -3,4 +3,4 @@
env_vars: {
key: "INSTALL_LIBRARY_FROM_SOURCE"
value: "False"
-}
\ No newline at end of file
+}
diff --git a/.kokoro/samples/python3.9/common.cfg b/.kokoro/samples/python3.9/common.cfg
index 5bc5fa834..4e3b12fcc 100644
--- a/.kokoro/samples/python3.9/common.cfg
+++ b/.kokoro/samples/python3.9/common.cfg
@@ -37,4 +37,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
# Use the trampoline script to run in docker.
-build_file: "python-bigtable/.kokoro/trampoline.sh"
\ No newline at end of file
+build_file: "python-bigtable/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/python3.9/periodic.cfg b/.kokoro/samples/python3.9/periodic.cfg
index 50fec9649..71cd1e597 100644
--- a/.kokoro/samples/python3.9/periodic.cfg
+++ b/.kokoro/samples/python3.9/periodic.cfg
@@ -3,4 +3,4 @@
env_vars: {
key: "INSTALL_LIBRARY_FROM_SOURCE"
value: "False"
-}
\ No newline at end of file
+}
diff --git a/.kokoro/test-samples-against-head.sh b/.kokoro/test-samples-against-head.sh
index 2dda9815b..ba3a707b0 100755
--- a/.kokoro/test-samples-against-head.sh
+++ b/.kokoro/test-samples-against-head.sh
@@ -23,6 +23,4 @@ set -eo pipefail
# Enables `**` to include files nested inside sub-folders
shopt -s globstar
-cd github/python-bigtable
-
exec .kokoro/test-samples-impl.sh
diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh
index 4666d34f9..11c042d34 100755
--- a/.kokoro/test-samples.sh
+++ b/.kokoro/test-samples.sh
@@ -24,8 +24,6 @@ set -eo pipefail
# Enables `**` to include files nested inside sub-folders
shopt -s globstar
-cd github/python-bigtable
-
# Run periodic samples tests at latest release
if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then
# preserving the test runner implementation.
diff --git a/.repo-metadata.json b/.repo-metadata.json
index 883fd57c0..3c65ac669 100644
--- a/.repo-metadata.json
+++ b/.repo-metadata.json
@@ -1,77 +1,80 @@
{
- "name": "bigtable",
- "name_pretty": "Cloud Bigtable",
- "product_documentation": "https://cloud.google.com/bigtable",
- "client_documentation": "https://googleapis.dev/python/bigtable/latest",
- "issue_tracker": "https://issuetracker.google.com/savedsearches/559777",
- "release_level": "ga",
- "language": "python",
- "library_type": "GAPIC_COMBO",
- "repo": "googleapis/python-bigtable",
- "distribution_name": "google-cloud-bigtable",
- "api_id": "bigtable.googleapis.com",
- "requires_billing": true,
- "samples": [
- {
- "name": "Hello World in Cloud Bigtable",
- "description": "Demonstrates how to connect to Cloud Bigtable and run some basic operations. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello",
- "file": "main.py",
- "runnable": true,
- "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
- "override_path": "hello"
- },
- {
- "name": "Hello World using HappyBase",
- "description": "This sample demonstrates using the Google Cloud Client Library HappyBase package, an implementation of the HappyBase API to connect to and interact with Cloud Bigtable. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello-happybase",
- "file": "main.py",
- "runnable": true,
- "custom_content": "usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
- "override_path": "hello_happybase"
- },
- {
- "name": "cbt Command Demonstration",
- "description": "This page explains how to use the cbt command to connect to a Cloud Bigtable instance, perform basic administrative tasks, and read and write data in a table. More information about this quickstart is available at https://cloud.google.com/bigtable/docs/quickstart-cbt",
- "file": "instanceadmin.py",
- "runnable": true,
- "custom_content": "usage: instanceadmin.py [-h] [run] [dev-instance] [del-instance] [add-cluster] [del-cluster] project_id instance_id cluster_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
- "override_path": "instanceadmin"
- },
- {
- "name": "Metric Scaler",
- "description": "This sample demonstrates how to use Stackdriver Monitoring to scale Cloud Bigtable based on CPU usage.",
- "file": "metricscaler.py",
- "runnable": true,
- "custom_content": "usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD] [--low_cpu_threshold LOW_CPU_THRESHOLD] [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP] bigtable_instance bigtable_cluster
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD]
[--low_cpu_threshold LOW_CPU_THRESHOLD]
[--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP]
bigtable_instance bigtable_cluster
Scales Cloud Bigtable clusters based on CPU usage.
positional arguments:
bigtable_instance ID of the Cloud Bigtable instance to connect to.
bigtable_cluster ID of the Cloud Bigtable cluster to connect to.
optional arguments:
-h, --help show this help message and exit
--high_cpu_threshold HIGH_CPU_THRESHOLD
If Cloud Bigtable CPU usage is above this threshold,
scale up
--low_cpu_threshold LOW_CPU_THRESHOLD
If Cloud Bigtable CPU usage is below this threshold,
scale down
--short_sleep SHORT_SLEEP
How long to sleep in seconds between checking metrics
after no scale operation
--long_sleep LONG_SLEEP
How long to sleep in seconds between checking metrics
after a scaling operation
",
- "override_path": "metricscaler"
- },
- {
- "name": "Quickstart",
- "description": "Demonstrates of Cloud Bigtable. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.",
- "file": "main.py",
- "runnable": true,
- "custom_content": "usage: main.py [-h] [--table TABLE] project_id instance_id
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Existing table used in the quickstart. (default: my-table)
",
- "override_path": "quickstart"
- },
- {
- "name": "Quickstart using HappyBase",
- "description": "Demonstrates of Cloud Bigtable using HappyBase. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.",
- "file": "main.py",
- "runnable": true,
- "custom_content": "usage: main.py [-h] [--table TABLE] project_id instance_id
usage: main.py [-h] [--table TABLE] project_id instance_id
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Existing table used in the quickstart. (default: my-table)usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
- "override_path": "tableadmin"
- }
- ]
-}
\ No newline at end of file
+ "name": "bigtable",
+ "name_pretty": "Cloud Bigtable",
+ "product_documentation": "https://cloud.google.com/bigtable",
+ "client_documentation": "https://cloud.google.com/python/docs/reference/bigtable/latest",
+ "issue_tracker": "https://issuetracker.google.com/savedsearches/559777",
+ "release_level": "stable",
+ "language": "python",
+ "library_type": "GAPIC_COMBO",
+ "repo": "googleapis/python-bigtable",
+ "distribution_name": "google-cloud-bigtable",
+ "api_id": "bigtable.googleapis.com",
+ "requires_billing": true,
+ "samples": [
+ {
+ "name": "Hello World in Cloud Bigtable",
+ "description": "Demonstrates how to connect to Cloud Bigtable and run some basic operations. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello",
+ "file": "main.py",
+ "runnable": true,
+ "custom_content": "usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
+ "override_path": "hello"
+ },
+ {
+ "name": "Hello World using HappyBase",
+ "description": "This sample demonstrates using the Google Cloud Client Library HappyBase package, an implementation of the HappyBase API to connect to and interact with Cloud Bigtable. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello-happybase",
+ "file": "main.py",
+ "runnable": true,
+ "custom_content": "usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
+ "override_path": "hello_happybase"
+ },
+ {
+ "name": "cbt Command Demonstration",
+ "description": "This page explains how to use the cbt command to connect to a Cloud Bigtable instance, perform basic administrative tasks, and read and write data in a table. More information about this quickstart is available at https://cloud.google.com/bigtable/docs/quickstart-cbt",
+ "file": "instanceadmin.py",
+ "runnable": true,
+ "custom_content": "usage: instanceadmin.py [-h] [run] [dev-instance] [del-instance] [add-cluster] [del-cluster] project_id instance_id cluster_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
+ "override_path": "instanceadmin"
+ },
+ {
+ "name": "Metric Scaler",
+ "description": "This sample demonstrates how to use Stackdriver Monitoring to scale Cloud Bigtable based on CPU usage.",
+ "file": "metricscaler.py",
+ "runnable": true,
+ "custom_content": "usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD] [--low_cpu_threshold LOW_CPU_THRESHOLD] [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP] bigtable_instance bigtable_cluster
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD]
[--low_cpu_threshold LOW_CPU_THRESHOLD]
[--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP]
bigtable_instance bigtable_cluster
Scales Cloud Bigtable clusters based on CPU usage.
positional arguments:
bigtable_instance ID of the Cloud Bigtable instance to connect to.
bigtable_cluster ID of the Cloud Bigtable cluster to connect to.
optional arguments:
-h, --help show this help message and exit
--high_cpu_threshold HIGH_CPU_THRESHOLD
If Cloud Bigtable CPU usage is above this threshold,
scale up
--low_cpu_threshold LOW_CPU_THRESHOLD
If Cloud Bigtable CPU usage is below this threshold,
scale down
--short_sleep SHORT_SLEEP
How long to sleep in seconds between checking metrics
after no scale operation
--long_sleep LONG_SLEEP
How long to sleep in seconds between checking metrics
after a scaling operation
",
+ "override_path": "metricscaler"
+ },
+ {
+ "name": "Quickstart",
+ "description": "Demonstrates of Cloud Bigtable. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.",
+ "file": "main.py",
+ "runnable": true,
+ "custom_content": "usage: main.py [-h] [--table TABLE] project_id instance_id
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Existing table used in the quickstart. (default: my-table)
",
+ "override_path": "quickstart"
+ },
+ {
+ "name": "Quickstart using HappyBase",
+ "description": "Demonstrates of Cloud Bigtable using HappyBase. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.",
+ "file": "main.py",
+ "runnable": true,
+ "custom_content": "usage: main.py [-h] [--table TABLE] project_id instance_id
usage: main.py [-h] [--table TABLE] project_id instance_id
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Existing table used in the quickstart. (default: my-table)usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials
positional arguments:
project_id Your Cloud Platform project ID.
instance_id ID of the Cloud Bigtable instance to connect to.
optional arguments:
-h, --help show this help message and exit
--table TABLE Table to create and destroy. (default: Hello-Bigtable)
",
+ "override_path": "tableadmin"
+ }
+ ],
+ "default_version": "v2",
+ "codeowner_team": "@googleapis/api-bigtable",
+ "api_shortname": "bigtable"
+}
diff --git a/.trampolinerc b/.trampolinerc
index 383b6ec89..0eee72ab6 100644
--- a/.trampolinerc
+++ b/.trampolinerc
@@ -16,15 +16,26 @@
# Add required env vars here.
required_envvars+=(
- "STAGING_BUCKET"
- "V2_STAGING_BUCKET"
)
# Add env vars which are passed down into the container here.
pass_down_envvars+=(
+ "NOX_SESSION"
+ ###############
+ # Docs builds
+ ###############
"STAGING_BUCKET"
"V2_STAGING_BUCKET"
- "NOX_SESSION"
+ ##################
+ # Samples builds
+ ##################
+ "INSTALL_LIBRARY_FROM_SOURCE"
+ "RUN_TESTS_SESSION"
+ "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ # Target directories.
+ "RUN_TESTS_DIRS"
+ # The nox session to run.
+ "RUN_TESTS_SESSION"
)
# Prevent unintentional override on the default image.
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0bee749f3..01e2650d5 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,32 @@
[1]: https://pypi.org/project/google-cloud-bigtable/#history
+## [2.5.0](https://github.com/googleapis/python-bigtable/compare/v2.4.0...v2.5.0) (2022-02-07)
+
+
+### Features
+
+* add 'Instance.create_time' field ([#449](https://github.com/googleapis/python-bigtable/issues/449)) ([b9ecfa9](https://github.com/googleapis/python-bigtable/commit/b9ecfa97281ae21dcf233e60c70cacc701f12c32))
+* add api key support ([#497](https://github.com/googleapis/python-bigtable/issues/497)) ([ee3a6c4](https://github.com/googleapis/python-bigtable/commit/ee3a6c4c5f810fab08671db3407195864ecc1972))
+* add Autoscaling API ([#475](https://github.com/googleapis/python-bigtable/issues/475)) ([97b3cdd](https://github.com/googleapis/python-bigtable/commit/97b3cddb908098e255e7a1209cdb985087b95a26))
+* add context manager support in client ([#440](https://github.com/googleapis/python-bigtable/issues/440)) ([a3d2cf1](https://github.com/googleapis/python-bigtable/commit/a3d2cf18b49cddc91e5e6448c46d6b936d86954d))
+* add support for Python 3.10 ([#437](https://github.com/googleapis/python-bigtable/issues/437)) ([3cf0814](https://github.com/googleapis/python-bigtable/commit/3cf08149411f3f4df41e9b5a9894dbfb101bd86f))
+
+
+### Bug Fixes
+
+* **deps:** drop packaging dependency ([a535f99](https://github.com/googleapis/python-bigtable/commit/a535f99e9f0bb16488a5d372a0a6efc3c4b69186))
+* **deps:** require google-api-core >= 1.28.0 ([a535f99](https://github.com/googleapis/python-bigtable/commit/a535f99e9f0bb16488a5d372a0a6efc3c4b69186))
+* improper types in pagers generation ([f9c7699](https://github.com/googleapis/python-bigtable/commit/f9c7699eb6d4071314abbb0477ba47370059e041))
+* improve type hints, mypy checks ([#448](https://github.com/googleapis/python-bigtable/issues/448)) ([a99bf88](https://github.com/googleapis/python-bigtable/commit/a99bf88417d6aec03923447c70c2752f6bb5c459))
+* resolve DuplicateCredentialArgs error when using credentials_file ([d6bff70](https://github.com/googleapis/python-bigtable/commit/d6bff70654b41e31d2ac83d307bdc6bbd111201e))
+
+
+### Documentation
+
+* clarify comments in ReadRowsRequest and RowFilter ([#494](https://github.com/googleapis/python-bigtable/issues/494)) ([1efd9b5](https://github.com/googleapis/python-bigtable/commit/1efd9b598802f766a3c4c8c78ec7b0ca208d3325))
+* list oneofs in docstring ([a535f99](https://github.com/googleapis/python-bigtable/commit/a535f99e9f0bb16488a5d372a0a6efc3c4b69186))
+
## [2.4.0](https://www.github.com/googleapis/python-bigtable/compare/v2.3.3...v2.4.0) (2021-09-24)
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 78b6684a3..a15cf6527 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -22,7 +22,7 @@ In order to add a feature:
documentation.
- The feature must work fully on the following CPython versions:
- 3.6, 3.7, 3.8 and 3.9 on both UNIX and Windows.
+ 3.6, 3.7, 3.8, 3.9 and 3.10 on both UNIX and Windows.
- The feature must not add unnecessary dependencies (where
"unnecessary" is of course subjective, but new dependencies should
@@ -72,7 +72,7 @@ We use `nox `__ to instrument our tests.
- To run a single unit test::
- $ nox -s unit-3.9 -- -k
+ $ nox -s unit-3.10 -- -k
.. note::
@@ -225,11 +225,13 @@ We support:
- `Python 3.7`_
- `Python 3.8`_
- `Python 3.9`_
+- `Python 3.10`_
.. _Python 3.6: https://docs.python.org/3.6/
.. _Python 3.7: https://docs.python.org/3.7/
.. _Python 3.8: https://docs.python.org/3.8/
.. _Python 3.9: https://docs.python.org/3.9/
+.. _Python 3.10: https://docs.python.org/3.10/
Supported versions can be found in our ``noxfile.py`` `config`_.
diff --git a/docs/data-api.rst b/docs/data-api.rst
index 9d9205e6b..01a49178f 100644
--- a/docs/data-api.rst
+++ b/docs/data-api.rst
@@ -337,8 +337,8 @@ Just as with reading, the stream can be canceled:
keys_iterator.cancel()
-.. _ReadRows: https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable_v2/proto/bigtable.proto#L54-L61
-.. _SampleRowKeys: https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable_v2/proto/bigtable.proto#L67-L73
-.. _MutateRow: https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable_v2/proto/bigtable.proto#L77-L84
-.. _CheckAndMutateRow: https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable_v2/proto/bigtable.proto#L99-L106
-.. _ReadModifyWriteRow: https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable_v2/proto/bigtable.proto#L113-L121
+.. _ReadRows: https://github.com/googleapis/python-bigtable/blob/d6bff70654b41e31d2ac83d307bdc6bbd111201e/google/cloud/bigtable_v2/types/bigtable.py#L42-L72
+.. _SampleRowKeys: https://github.com/googleapis/python-bigtable/blob/d6bff70654b41e31d2ac83d307bdc6bbd111201e/google/cloud/bigtable_v2/types/bigtable.py#L184-L199
+.. _MutateRow: https://github.com/googleapis/python-bigtable/blob/d6bff70654b41e31d2ac83d307bdc6bbd111201e/google/cloud/bigtable_v2/types/bigtable.py#L230-L256
+.. _CheckAndMutateRow: https://github.com/googleapis/python-bigtable/blob/d6bff70654b41e31d2ac83d307bdc6bbd111201e/google/cloud/bigtable_v2/types/bigtable.py#L339-L386
+.. _ReadModifyWriteRow: https://github.com/googleapis/python-bigtable/blob/d6bff70654b41e31d2ac83d307bdc6bbd111201e/google/cloud/bigtable_v2/types/bigtable.py#L401-L430
diff --git a/google/__init__.py b/google/__init__.py
new file mode 100644
index 000000000..a5ba80656
--- /dev/null
+++ b/google/__init__.py
@@ -0,0 +1,6 @@
+try:
+ import pkg_resources
+
+ pkg_resources.declare_namespace(__name__)
+except ImportError:
+ pass
diff --git a/google/cloud/__init__.py b/google/cloud/__init__.py
index 2f4b4738a..a5ba80656 100644
--- a/google/cloud/__init__.py
+++ b/google/cloud/__init__.py
@@ -3,6 +3,4 @@
pkg_resources.declare_namespace(__name__)
except ImportError:
- import pkgutil
-
- __path__ = pkgutil.extend_path(__path__, __name__)
+ pass
diff --git a/google/cloud/bigtable/__init__.py b/google/cloud/bigtable/__init__.py
index f2c5a24bd..a54096624 100644
--- a/google/cloud/bigtable/__init__.py
+++ b/google/cloud/bigtable/__init__.py
@@ -15,15 +15,16 @@
"""Google Cloud Bigtable API package."""
+from typing import Optional
import pkg_resources
+from google.cloud.bigtable.client import Client
+
+__version__: Optional[str]
try:
__version__ = pkg_resources.get_distribution("google-cloud-bigtable").version
except pkg_resources.DistributionNotFound:
__version__ = None
-from google.cloud.bigtable.client import Client
-
-
__all__ = ["__version__", "Client"]
diff --git a/google/cloud/bigtable/backup.py b/google/cloud/bigtable/backup.py
index 0991e85f5..c2b5ec9ee 100644
--- a/google/cloud/bigtable/backup.py
+++ b/google/cloud/bigtable/backup.py
@@ -16,12 +16,12 @@
import re
-from google.cloud._helpers import _datetime_to_pb_timestamp
+from google.cloud._helpers import _datetime_to_pb_timestamp # type: ignore
from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient
from google.cloud.bigtable_admin_v2.types import table
from google.cloud.bigtable.encryption_info import EncryptionInfo
from google.cloud.bigtable.policy import Policy
-from google.cloud.exceptions import NotFound
+from google.cloud.exceptions import NotFound # type: ignore
from google.protobuf import field_mask_pb2
_BACKUP_NAME_RE = re.compile(
diff --git a/google/cloud/bigtable/client.py b/google/cloud/bigtable/client.py
index 7249c0b35..c50c20b0f 100644
--- a/google/cloud/bigtable/client.py
+++ b/google/cloud/bigtable/client.py
@@ -29,10 +29,11 @@
"""
import os
import warnings
-import grpc
+import grpc # type: ignore
-from google.api_core.gapic_v1 import client_info
-import google.auth
+from google.api_core.gapic_v1 import client_info as client_info_lib
+import google.auth # type: ignore
+from google.auth.credentials import AnonymousCredentials # type: ignore
from google.cloud import bigtable_v2
from google.cloud import bigtable_admin_v2
@@ -44,21 +45,20 @@
BigtableTableAdminGrpcTransport,
)
-from google.cloud.bigtable import __version__
+from google.cloud import bigtable
from google.cloud.bigtable.instance import Instance
from google.cloud.bigtable.cluster import Cluster
-from google.cloud.client import ClientWithProject
+from google.cloud.client import ClientWithProject # type: ignore
from google.cloud.bigtable_admin_v2.types import instance
from google.cloud.bigtable.cluster import _CLUSTER_NAME_RE
-from google.cloud.environment_vars import BIGTABLE_EMULATOR
+from google.cloud.environment_vars import BIGTABLE_EMULATOR # type: ignore
INSTANCE_TYPE_PRODUCTION = instance.Instance.Type.PRODUCTION
INSTANCE_TYPE_DEVELOPMENT = instance.Instance.Type.DEVELOPMENT
INSTANCE_TYPE_UNSPECIFIED = instance.Instance.Type.TYPE_UNSPECIFIED
-_CLIENT_INFO = client_info.ClientInfo(client_library_version=__version__)
SPANNER_ADMIN_SCOPE = "https://www.googleapis.com/auth/spanner.admin"
ADMIN_SCOPE = "https://www.googleapis.com/auth/bigtable.admin"
"""Scope for interacting with the Cluster Admin and Table Admin APIs."""
@@ -67,6 +67,7 @@
READ_ONLY_SCOPE = "https://www.googleapis.com/auth/bigtable.data.readonly"
"""Scope for reading table data."""
+_DEFAULT_BIGTABLE_EMULATOR_CLIENT = "google-cloud-bigtable-emulator"
_GRPC_CHANNEL_OPTIONS = (
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
@@ -153,11 +154,15 @@ def __init__(
credentials=None,
read_only=False,
admin=False,
- client_info=_CLIENT_INFO,
+ client_info=None,
client_options=None,
admin_client_options=None,
channel=None,
):
+ if client_info is None:
+ client_info = client_info_lib.ClientInfo(
+ client_library_version=bigtable.__version__,
+ )
if read_only and admin:
raise ValueError(
"A read-only client cannot also perform" "administrative actions."
@@ -170,6 +175,12 @@ def __init__(
self._client_info = client_info
self._emulator_host = os.getenv(BIGTABLE_EMULATOR)
+ if self._emulator_host is not None:
+ if credentials is None:
+ credentials = AnonymousCredentials()
+ if project is None:
+ project = _DEFAULT_BIGTABLE_EMULATOR_CLIENT
+
if channel is not None:
warnings.warn(
"'channel' is deprecated and no longer used.",
diff --git a/google/cloud/bigtable/instance.py b/google/cloud/bigtable/instance.py
index e6e2ac027..9c22aaa79 100644
--- a/google/cloud/bigtable/instance.py
+++ b/google/cloud/bigtable/instance.py
@@ -24,7 +24,7 @@
from google.cloud.bigtable_admin_v2.types import instance
-from google.iam.v1 import options_pb2
+from google.iam.v1 import options_pb2 # type: ignore
from google.api_core.exceptions import NotFound
diff --git a/google/cloud/bigtable/policy.py b/google/cloud/bigtable/policy.py
index f5558b6f0..8396642fb 100644
--- a/google/cloud/bigtable/policy.py
+++ b/google/cloud/bigtable/policy.py
@@ -15,8 +15,8 @@
import base64
from google.api_core.iam import Policy as BasePolicy
-from google.cloud._helpers import _to_bytes
-from google.iam.v1 import policy_pb2
+from google.cloud._helpers import _to_bytes # type: ignore
+from google.iam.v1 import policy_pb2 # type: ignore
"""IAM roles supported by Bigtable Instance resource"""
BIGTABLE_ADMIN_ROLE = "roles/bigtable.admin"
diff --git a/google/cloud/bigtable/py.typed b/google/cloud/bigtable/py.typed
new file mode 100644
index 000000000..7bd4705d4
--- /dev/null
+++ b/google/cloud/bigtable/py.typed
@@ -0,0 +1,2 @@
+# Marker file for PEP 561.
+# The google-cloud-bigtable package uses inline types.
diff --git a/google/cloud/bigtable/row.py b/google/cloud/bigtable/row.py
index 3fdc230f7..9127a1aae 100644
--- a/google/cloud/bigtable/row.py
+++ b/google/cloud/bigtable/row.py
@@ -17,9 +17,9 @@
import struct
-from google.cloud._helpers import _datetime_from_microseconds
-from google.cloud._helpers import _microseconds_from_datetime
-from google.cloud._helpers import _to_bytes
+from google.cloud._helpers import _datetime_from_microseconds # type: ignore
+from google.cloud._helpers import _microseconds_from_datetime # type: ignore
+from google.cloud._helpers import _to_bytes # type: ignore
from google.cloud.bigtable_v2.types import data as data_v2_pb2
diff --git a/google/cloud/bigtable/row_data.py b/google/cloud/bigtable/row_data.py
index 18d82153b..6ab1188a8 100644
--- a/google/cloud/bigtable/row_data.py
+++ b/google/cloud/bigtable/row_data.py
@@ -17,12 +17,12 @@
import copy
-import grpc
+import grpc # type: ignore
from google.api_core import exceptions
from google.api_core import retry
-from google.cloud._helpers import _datetime_from_microseconds
-from google.cloud._helpers import _to_bytes
+from google.cloud._helpers import _datetime_from_microseconds # type: ignore
+from google.cloud._helpers import _to_bytes # type: ignore
from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2
from google.cloud.bigtable_v2.types import data as data_v2_pb2
diff --git a/google/cloud/bigtable/row_filters.py b/google/cloud/bigtable/row_filters.py
index b495fb646..53192acc8 100644
--- a/google/cloud/bigtable/row_filters.py
+++ b/google/cloud/bigtable/row_filters.py
@@ -17,8 +17,8 @@
import struct
-from google.cloud._helpers import _microseconds_from_datetime
-from google.cloud._helpers import _to_bytes
+from google.cloud._helpers import _microseconds_from_datetime # type: ignore
+from google.cloud._helpers import _to_bytes # type: ignore
from google.cloud.bigtable_v2.types import data as data_v2_pb2
_PACK_I64 = struct.Struct(">q").pack
diff --git a/google/cloud/bigtable/row_set.py b/google/cloud/bigtable/row_set.py
index 32a9bd1e3..82a540b5a 100644
--- a/google/cloud/bigtable/row_set.py
+++ b/google/cloud/bigtable/row_set.py
@@ -15,7 +15,7 @@
"""User-friendly container for Google Cloud Bigtable RowSet """
-from google.cloud._helpers import _to_bytes
+from google.cloud._helpers import _to_bytes # type: ignore
class RowSet(object):
diff --git a/google/cloud/bigtable/table.py b/google/cloud/bigtable/table.py
index 8dc4f5e42..fddd04809 100644
--- a/google/cloud/bigtable/table.py
+++ b/google/cloud/bigtable/table.py
@@ -14,6 +14,7 @@
"""User-friendly container for Google Cloud Bigtable Table."""
+from typing import Set
import warnings
from google.api_core import timeout
@@ -25,7 +26,7 @@
from google.api_core.gapic_v1.method import DEFAULT
from google.api_core.retry import if_exception_type
from google.api_core.retry import Retry
-from google.cloud._helpers import _to_bytes
+from google.cloud._helpers import _to_bytes # type: ignore
from google.cloud.bigtable.backup import Backup
from google.cloud.bigtable.column_family import _gc_rule_from_pb
from google.cloud.bigtable.column_family import ColumnFamily
@@ -57,6 +58,12 @@
RETRYABLE_MUTATION_ERRORS = (Aborted, DeadlineExceeded, ServiceUnavailable)
"""Errors which can be retried during row mutation."""
+RETRYABLE_CODES: Set[int] = set()
+
+for retryable in RETRYABLE_MUTATION_ERRORS:
+ if retryable.grpc_status_code is not None: # pragma: NO COVER
+ RETRYABLE_CODES.add(retryable.grpc_status_code.value[0])
+
class _BigtableRetryableError(Exception):
"""Retry-able error expected by the default retry strategy."""
@@ -1043,10 +1050,6 @@ class _RetryableMutateRowsWorker(object):
are retryable, any subsequent call on this callable will be a no-op.
"""
- RETRY_CODES = tuple(
- retryable.grpc_status_code.value[0] for retryable in RETRYABLE_MUTATION_ERRORS
- )
-
def __init__(self, client, table_name, rows, app_profile_id=None, timeout=None):
self.client = client
self.table_name = table_name
@@ -1083,7 +1086,7 @@ def __call__(self, retry=DEFAULT_RETRY):
@staticmethod
def _is_retryable(status):
- return status is None or status.code in _RetryableMutateRowsWorker.RETRY_CODES
+ return status is None or status.code in RETRYABLE_CODES
def _do_mutate_retryable_rows(self):
"""Mutate all the rows that are eligible for retry.
@@ -1128,7 +1131,7 @@ def _do_mutate_retryable_rows(self):
**kwargs
)
except RETRYABLE_MUTATION_ERRORS:
- # If an exception, considered retryable by `RETRY_CODES`, is
+ # If an exception, considered retryable by `RETRYABLE_MUTATION_ERRORS`, is
# returned from the initial call, consider
# it to be retryable. Wrap as a Bigtable Retryable Error.
raise _BigtableRetryableError
diff --git a/google/cloud/bigtable_admin_v2/__init__.py b/google/cloud/bigtable_admin_v2/__init__.py
index db670f299..545000fbf 100644
--- a/google/cloud/bigtable_admin_v2/__init__.py
+++ b/google/cloud/bigtable_admin_v2/__init__.py
@@ -36,6 +36,8 @@
from .types.bigtable_instance_admin import ListClustersResponse
from .types.bigtable_instance_admin import ListInstancesRequest
from .types.bigtable_instance_admin import ListInstancesResponse
+from .types.bigtable_instance_admin import PartialUpdateClusterMetadata
+from .types.bigtable_instance_admin import PartialUpdateClusterRequest
from .types.bigtable_instance_admin import PartialUpdateInstanceRequest
from .types.bigtable_instance_admin import UpdateAppProfileMetadata
from .types.bigtable_instance_admin import UpdateAppProfileRequest
@@ -73,6 +75,8 @@
from .types.common import OperationProgress
from .types.common import StorageType
from .types.instance import AppProfile
+from .types.instance import AutoscalingLimits
+from .types.instance import AutoscalingTargets
from .types.instance import Cluster
from .types.instance import Instance
from .types.table import Backup
@@ -89,6 +93,8 @@
"BigtableInstanceAdminAsyncClient",
"BigtableTableAdminAsyncClient",
"AppProfile",
+ "AutoscalingLimits",
+ "AutoscalingTargets",
"Backup",
"BackupInfo",
"BigtableInstanceAdminClient",
@@ -140,6 +146,8 @@
"ModifyColumnFamiliesRequest",
"OperationProgress",
"OptimizeRestoredTableMetadata",
+ "PartialUpdateClusterMetadata",
+ "PartialUpdateClusterRequest",
"PartialUpdateInstanceRequest",
"RestoreInfo",
"RestoreSourceType",
diff --git a/google/cloud/bigtable_admin_v2/gapic_metadata.json b/google/cloud/bigtable_admin_v2/gapic_metadata.json
index f5e134543..c360e7712 100644
--- a/google/cloud/bigtable_admin_v2/gapic_metadata.json
+++ b/google/cloud/bigtable_admin_v2/gapic_metadata.json
@@ -75,6 +75,11 @@
"list_instances"
]
},
+ "PartialUpdateCluster": {
+ "methods": [
+ "partial_update_cluster"
+ ]
+ },
"PartialUpdateInstance": {
"methods": [
"partial_update_instance"
@@ -175,6 +180,11 @@
"list_instances"
]
},
+ "PartialUpdateCluster": {
+ "methods": [
+ "partial_update_cluster"
+ ]
+ },
"PartialUpdateInstance": {
"methods": [
"partial_update_instance"
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py
index c118257de..9cc58c7eb 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py
@@ -16,16 +16,21 @@
from collections import OrderedDict
import functools
import re
-from typing import Dict, Sequence, Tuple, Type, Union
+from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
-import google.api_core.client_options as ClientOptions # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
+from google.api_core.client_options import ClientOptions
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object] # type: ignore
+
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers
@@ -36,6 +41,7 @@
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport
from .client import BigtableInstanceAdminClient
@@ -125,6 +131,42 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
from_service_account_json = from_service_account_file
+ @classmethod
+ def get_mtls_endpoint_and_cert_source(
+ cls, client_options: Optional[ClientOptions] = None
+ ):
+ """Return the API endpoint and client cert source for mutual TLS.
+
+ The client cert source is determined in the following order:
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
+ client cert source is None.
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
+ default client cert source exists, use the default one; otherwise the client cert
+ source is None.
+
+ The API endpoint is determined in the following order:
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
+ default mTLS endpoint; if the environment variabel is "never", use the default API
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
+ use the default API endpoint.
+
+ More details can be found at https://google.aip.dev/auth/4114.
+
+ Args:
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
+ in this method.
+
+ Returns:
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
+ client cert source to use.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
+ """
+ return BigtableInstanceAdminClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
+
@property
def transport(self) -> BigtableInstanceAdminTransport:
"""Returns the transport used by the client instance.
@@ -188,22 +230,26 @@ def __init__(
async def create_instance(
self,
- request: bigtable_instance_admin.CreateInstanceRequest = None,
+ request: Union[bigtable_instance_admin.CreateInstanceRequest, dict] = None,
*,
parent: str = None,
instance_id: str = None,
instance: gba_instance.Instance = None,
- clusters: Sequence[
- bigtable_instance_admin.CreateInstanceRequest.ClustersEntry
- ] = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ clusters: Dict[str, gba_instance.Cluster] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Create an instance within a project.
+ Note that exactly one of Cluster.serve_nodes and
+ Cluster.cluster_config.cluster_autoscaling_config can be set. If
+ serve_nodes is set to non-zero, then the cluster is manually
+ scaled. If cluster_config.cluster_autoscaling_config is
+ non-empty, then autoscaling is enabled.
+
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.CreateInstanceRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]):
The request object. Request message for
BigtableInstanceAdmin.CreateInstance.
parent (:class:`str`):
@@ -229,7 +275,7 @@ async def create_instance(
This corresponds to the ``instance`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- clusters (:class:`Sequence[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest.ClustersEntry]`):
+ clusters (Dict[str, gba_instance.Cluster]):
Required. The clusters to be created within the
instance, mapped by desired cluster ID, e.g., just
``mycluster`` rather than
@@ -258,7 +304,7 @@ async def create_instance(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, instance_id, instance, clusters])
if request is not None and has_flattened_params:
@@ -311,17 +357,17 @@ async def create_instance(
async def get_instance(
self,
- request: bigtable_instance_admin.GetInstanceRequest = None,
+ request: Union[bigtable_instance_admin.GetInstanceRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> instance.Instance:
r"""Gets information about an instance.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.GetInstanceRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.GetInstanceRequest, dict]):
The request object. Request message for
BigtableInstanceAdmin.GetInstance.
name (:class:`str`):
@@ -348,7 +394,7 @@ async def get_instance(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -396,17 +442,17 @@ async def get_instance(
async def list_instances(
self,
- request: bigtable_instance_admin.ListInstancesRequest = None,
+ request: Union[bigtable_instance_admin.ListInstancesRequest, dict] = None,
*,
parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> bigtable_instance_admin.ListInstancesResponse:
r"""Lists information about instances in a project.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.ListInstancesRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.ListInstancesRequest, dict]):
The request object. Request message for
BigtableInstanceAdmin.ListInstances.
parent (:class:`str`):
@@ -430,7 +476,7 @@ async def list_instances(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
@@ -478,9 +524,9 @@ async def list_instances(
async def update_instance(
self,
- request: instance.Instance = None,
+ request: Union[instance.Instance, dict] = None,
*,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> instance.Instance:
@@ -490,7 +536,7 @@ async def update_instance(
PartialUpdateInstance.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.Instance`):
+ request (Union[google.cloud.bigtable_admin_v2.types.Instance, dict]):
The request object. A collection of Bigtable
[Tables][google.bigtable.admin.v2.Table] and the
resources that serve them. All tables in an instance are
@@ -547,11 +593,13 @@ async def update_instance(
async def partial_update_instance(
self,
- request: bigtable_instance_admin.PartialUpdateInstanceRequest = None,
+ request: Union[
+ bigtable_instance_admin.PartialUpdateInstanceRequest, dict
+ ] = None,
*,
instance: gba_instance.Instance = None,
update_mask: field_mask_pb2.FieldMask = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
@@ -560,7 +608,7 @@ async def partial_update_instance(
preferred way to update an Instance.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest, dict]):
The request object. Request message for
BigtableInstanceAdmin.PartialUpdateInstance.
instance (:class:`google.cloud.bigtable_admin_v2.types.Instance`):
@@ -596,7 +644,7 @@ async def partial_update_instance(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([instance, update_mask])
if request is not None and has_flattened_params:
@@ -656,17 +704,17 @@ async def partial_update_instance(
async def delete_instance(
self,
- request: bigtable_instance_admin.DeleteInstanceRequest = None,
+ request: Union[bigtable_instance_admin.DeleteInstanceRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Delete an instance from a project.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest, dict]):
The request object. Request message for
BigtableInstanceAdmin.DeleteInstance.
name (:class:`str`):
@@ -684,7 +732,7 @@ async def delete_instance(
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -721,19 +769,25 @@ async def delete_instance(
async def create_cluster(
self,
- request: bigtable_instance_admin.CreateClusterRequest = None,
+ request: Union[bigtable_instance_admin.CreateClusterRequest, dict] = None,
*,
parent: str = None,
cluster_id: str = None,
cluster: instance.Cluster = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a cluster within an instance.
+ Note that exactly one of Cluster.serve_nodes and
+ Cluster.cluster_config.cluster_autoscaling_config can be set. If
+ serve_nodes is set to non-zero, then the cluster is manually
+ scaled. If cluster_config.cluster_autoscaling_config is
+ non-empty, then autoscaling is enabled.
+
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.CreateClusterRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]):
The request object. Request message for
BigtableInstanceAdmin.CreateCluster.
parent (:class:`str`):
@@ -777,7 +831,7 @@ async def create_cluster(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, cluster_id, cluster])
if request is not None and has_flattened_params:
@@ -827,17 +881,17 @@ async def create_cluster(
async def get_cluster(
self,
- request: bigtable_instance_admin.GetClusterRequest = None,
+ request: Union[bigtable_instance_admin.GetClusterRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> instance.Cluster:
r"""Gets information about a cluster.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.GetClusterRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.GetClusterRequest, dict]):
The request object. Request message for
BigtableInstanceAdmin.GetCluster.
name (:class:`str`):
@@ -863,7 +917,7 @@ async def get_cluster(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -911,17 +965,17 @@ async def get_cluster(
async def list_clusters(
self,
- request: bigtable_instance_admin.ListClustersRequest = None,
+ request: Union[bigtable_instance_admin.ListClustersRequest, dict] = None,
*,
parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> bigtable_instance_admin.ListClustersResponse:
r"""Lists information about clusters in an instance.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.ListClustersRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.ListClustersRequest, dict]):
The request object. Request message for
BigtableInstanceAdmin.ListClusters.
parent (:class:`str`):
@@ -947,7 +1001,7 @@ async def list_clusters(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
@@ -995,16 +1049,20 @@ async def list_clusters(
async def update_cluster(
self,
- request: instance.Cluster = None,
+ request: Union[instance.Cluster, dict] = None,
*,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Updates a cluster within an instance.
+ Note that UpdateCluster does not support updating
+ cluster_config.cluster_autoscaling_config. In order to update
+ it, you must use PartialUpdateCluster.
+
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.Cluster`):
+ request (Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]):
The request object. A resizable group of nodes in a
particular cloud location, capable of serving all
[Tables][google.bigtable.admin.v2.Table] in the parent
@@ -1066,19 +1124,139 @@ async def update_cluster(
# Done; return the response.
return response
+ async def partial_update_cluster(
+ self,
+ request: Union[
+ bigtable_instance_admin.PartialUpdateClusterRequest, dict
+ ] = None,
+ *,
+ cluster: instance.Cluster = None,
+ update_mask: field_mask_pb2.FieldMask = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Partially updates a cluster within a project. This method is the
+ preferred way to update a Cluster.
+
+ To enable and update autoscaling, set
+ cluster_config.cluster_autoscaling_config. When autoscaling is
+ enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning
+ that updates to it are ignored. Note that an update cannot
+ simultaneously set serve_nodes to non-zero and
+ cluster_config.cluster_autoscaling_config to non-empty, and also
+ specify both in the update_mask.
+
+ To disable autoscaling, clear
+ cluster_config.cluster_autoscaling_config, and explicitly set a
+ serve_node count via the update_mask.
+
+ Args:
+ request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest, dict]):
+ The request object. Request message for
+ BigtableInstanceAdmin.PartialUpdateCluster.
+ cluster (:class:`google.cloud.bigtable_admin_v2.types.Cluster`):
+ Required. The Cluster which contains the partial updates
+ to be applied, subject to the update_mask.
+
+ This corresponds to the ``cluster`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
+ Required. The subset of Cluster
+ fields which should be replaced.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable
+ of serving all
+ [Tables][google.bigtable.admin.v2.Table] in the
+ parent [Instance][google.bigtable.admin.v2.Instance].
+
+ """
+ # Create or coerce a protobuf request object.
+ # Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([cluster, update_mask])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = bigtable_instance_admin.PartialUpdateClusterRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if cluster is not None:
+ request.cluster = cluster
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.partial_update_cluster,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=60.0,
+ multiplier=2,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=60.0,
+ ),
+ default_timeout=60.0,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("cluster.name", request.cluster.name),)
+ ),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ instance.Cluster,
+ metadata_type=bigtable_instance_admin.PartialUpdateClusterMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
async def delete_cluster(
self,
- request: bigtable_instance_admin.DeleteClusterRequest = None,
+ request: Union[bigtable_instance_admin.DeleteClusterRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a cluster from an instance.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.DeleteClusterRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.DeleteClusterRequest, dict]):
The request object. Request message for
BigtableInstanceAdmin.DeleteCluster.
name (:class:`str`):
@@ -1096,7 +1274,7 @@ async def delete_cluster(
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -1133,19 +1311,19 @@ async def delete_cluster(
async def create_app_profile(
self,
- request: bigtable_instance_admin.CreateAppProfileRequest = None,
+ request: Union[bigtable_instance_admin.CreateAppProfileRequest, dict] = None,
*,
parent: str = None,
app_profile_id: str = None,
app_profile: instance.AppProfile = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> instance.AppProfile:
r"""Creates an app profile within an instance.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest, dict]):
The request object. Request message for
BigtableInstanceAdmin.CreateAppProfile.
parent (:class:`str`):
@@ -1186,7 +1364,7 @@ async def create_app_profile(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, app_profile_id, app_profile])
if request is not None and has_flattened_params:
@@ -1228,17 +1406,17 @@ async def create_app_profile(
async def get_app_profile(
self,
- request: bigtable_instance_admin.GetAppProfileRequest = None,
+ request: Union[bigtable_instance_admin.GetAppProfileRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> instance.AppProfile:
r"""Gets information about an app profile.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.GetAppProfileRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.GetAppProfileRequest, dict]):
The request object. Request message for
BigtableInstanceAdmin.GetAppProfile.
name (:class:`str`):
@@ -1263,7 +1441,7 @@ async def get_app_profile(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -1311,17 +1489,17 @@ async def get_app_profile(
async def list_app_profiles(
self,
- request: bigtable_instance_admin.ListAppProfilesRequest = None,
+ request: Union[bigtable_instance_admin.ListAppProfilesRequest, dict] = None,
*,
parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListAppProfilesAsyncPager:
r"""Lists information about app profiles in an instance.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest, dict]):
The request object. Request message for
BigtableInstanceAdmin.ListAppProfiles.
parent (:class:`str`):
@@ -1351,7 +1529,7 @@ async def list_app_profiles(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
@@ -1405,18 +1583,18 @@ async def list_app_profiles(
async def update_app_profile(
self,
- request: bigtable_instance_admin.UpdateAppProfileRequest = None,
+ request: Union[bigtable_instance_admin.UpdateAppProfileRequest, dict] = None,
*,
app_profile: instance.AppProfile = None,
update_mask: field_mask_pb2.FieldMask = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Updates an app profile within an instance.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest, dict]):
The request object. Request message for
BigtableInstanceAdmin.UpdateAppProfile.
app_profile (:class:`google.cloud.bigtable_admin_v2.types.AppProfile`):
@@ -1449,7 +1627,7 @@ async def update_app_profile(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([app_profile, update_mask])
if request is not None and has_flattened_params:
@@ -1509,17 +1687,17 @@ async def update_app_profile(
async def delete_app_profile(
self,
- request: bigtable_instance_admin.DeleteAppProfileRequest = None,
+ request: Union[bigtable_instance_admin.DeleteAppProfileRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes an app profile from an instance.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest, dict]):
The request object. Request message for
BigtableInstanceAdmin.DeleteAppProfile.
name (:class:`str`):
@@ -1537,7 +1715,7 @@ async def delete_app_profile(
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -1574,10 +1752,10 @@ async def delete_app_profile(
async def get_iam_policy(
self,
- request: iam_policy_pb2.GetIamPolicyRequest = None,
+ request: Union[iam_policy_pb2.GetIamPolicyRequest, dict] = None,
*,
resource: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
@@ -1586,7 +1764,7 @@ async def get_iam_policy(
but does not have a policy set.
Args:
- request (:class:`google.iam.v1.iam_policy_pb2.GetIamPolicyRequest`):
+ request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]):
The request object. Request message for `GetIamPolicy`
method.
resource (:class:`str`):
@@ -1664,7 +1842,7 @@ async def get_iam_policy(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
@@ -1712,10 +1890,10 @@ async def get_iam_policy(
async def set_iam_policy(
self,
- request: iam_policy_pb2.SetIamPolicyRequest = None,
+ request: Union[iam_policy_pb2.SetIamPolicyRequest, dict] = None,
*,
resource: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
@@ -1723,7 +1901,7 @@ async def set_iam_policy(
resource. Replaces any existing policy.
Args:
- request (:class:`google.iam.v1.iam_policy_pb2.SetIamPolicyRequest`):
+ request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]):
The request object. Request message for `SetIamPolicy`
method.
resource (:class:`str`):
@@ -1801,7 +1979,7 @@ async def set_iam_policy(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
@@ -1839,11 +2017,11 @@ async def set_iam_policy(
async def test_iam_permissions(
self,
- request: iam_policy_pb2.TestIamPermissionsRequest = None,
+ request: Union[iam_policy_pb2.TestIamPermissionsRequest, dict] = None,
*,
resource: str = None,
permissions: Sequence[str] = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
@@ -1851,7 +2029,7 @@ async def test_iam_permissions(
specified instance resource.
Args:
- request (:class:`google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest`):
+ request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]):
The request object. Request message for
`TestIamPermissions` method.
resource (:class:`str`):
@@ -1883,7 +2061,7 @@ async def test_iam_permissions(
Response message for TestIamPermissions method.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource, permissions])
if request is not None and has_flattened_params:
@@ -1931,6 +2109,12 @@ async def test_iam_permissions(
# Done; return the response.
return response
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, exc_type, exc, tb):
+ await self.transport.close()
+
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py
index 9c9a8a152..697d0fd9b 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py
@@ -14,22 +14,26 @@
# limitations under the License.
#
from collections import OrderedDict
-from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
-from google.api_core import client_options as client_options_lib # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
+from google.api_core import client_options as client_options_lib
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object] # type: ignore
+
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers
@@ -40,6 +44,7 @@
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import BigtableInstanceAdminGrpcTransport
from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport
@@ -294,6 +299,73 @@ def parse_common_location_path(path: str) -> Dict[str, str]:
m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
return m.groupdict() if m else {}
+ @classmethod
+ def get_mtls_endpoint_and_cert_source(
+ cls, client_options: Optional[client_options_lib.ClientOptions] = None
+ ):
+ """Return the API endpoint and client cert source for mutual TLS.
+
+ The client cert source is determined in the following order:
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
+ client cert source is None.
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
+ default client cert source exists, use the default one; otherwise the client cert
+ source is None.
+
+ The API endpoint is determined in the following order:
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
+ default mTLS endpoint; if the environment variabel is "never", use the default API
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
+ use the default API endpoint.
+
+ More details can be found at https://google.aip.dev/auth/4114.
+
+ Args:
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
+ in this method.
+
+ Returns:
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
+ client cert source to use.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
+ """
+ if client_options is None:
+ client_options = client_options_lib.ClientOptions()
+ use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
+ if use_client_cert not in ("true", "false"):
+ raise ValueError(
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+ if use_mtls_endpoint not in ("auto", "never", "always"):
+ raise MutualTLSChannelError(
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Figure out the client cert source to use.
+ client_cert_source = None
+ if use_client_cert == "true":
+ if client_options.client_cert_source:
+ client_cert_source = client_options.client_cert_source
+ elif mtls.has_default_client_cert_source():
+ client_cert_source = mtls.default_client_cert_source()
+
+ # Figure out which api endpoint to use.
+ if client_options.api_endpoint is not None:
+ api_endpoint = client_options.api_endpoint
+ elif use_mtls_endpoint == "always" or (
+ use_mtls_endpoint == "auto" and client_cert_source
+ ):
+ api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
+ else:
+ api_endpoint = cls.DEFAULT_ENDPOINT
+
+ return api_endpoint, client_cert_source
+
def __init__(
self,
*,
@@ -344,50 +416,22 @@ def __init__(
if client_options is None:
client_options = client_options_lib.ClientOptions()
- # Create SSL credentials for mutual TLS if needed.
- use_client_cert = bool(
- util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
+ api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
+ client_options
)
- client_cert_source_func = None
- is_mtls = False
- if use_client_cert:
- if client_options.client_cert_source:
- is_mtls = True
- client_cert_source_func = client_options.client_cert_source
- else:
- is_mtls = mtls.has_default_client_cert_source()
- if is_mtls:
- client_cert_source_func = mtls.default_client_cert_source()
- else:
- client_cert_source_func = None
-
- # Figure out which api endpoint to use.
- if client_options.api_endpoint is not None:
- api_endpoint = client_options.api_endpoint
- else:
- use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
- if use_mtls_env == "never":
- api_endpoint = self.DEFAULT_ENDPOINT
- elif use_mtls_env == "always":
- api_endpoint = self.DEFAULT_MTLS_ENDPOINT
- elif use_mtls_env == "auto":
- if is_mtls:
- api_endpoint = self.DEFAULT_MTLS_ENDPOINT
- else:
- api_endpoint = self.DEFAULT_ENDPOINT
- else:
- raise MutualTLSChannelError(
- "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
- "values: never, auto, always"
- )
+ api_key_value = getattr(client_options, "api_key", None)
+ if api_key_value and credentials:
+ raise ValueError(
+ "client_options.api_key and credentials are mutually exclusive"
+ )
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, BigtableInstanceAdminTransport):
# transport is a BigtableInstanceAdminTransport instance.
- if credentials or client_options.credentials_file:
+ if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
@@ -399,6 +443,15 @@ def __init__(
)
self._transport = transport
else:
+ import google.auth._default # type: ignore
+
+ if api_key_value and hasattr(
+ google.auth._default, "get_api_key_credentials"
+ ):
+ credentials = google.auth._default.get_api_key_credentials(
+ api_key_value
+ )
+
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
@@ -408,10 +461,7 @@ def __init__(
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
- always_use_jwt_access=(
- Transport == type(self).get_transport_class("grpc")
- or Transport == type(self).get_transport_class("grpc_asyncio")
- ),
+ always_use_jwt_access=True,
)
def create_instance(
@@ -421,15 +471,19 @@ def create_instance(
parent: str = None,
instance_id: str = None,
instance: gba_instance.Instance = None,
- clusters: Sequence[
- bigtable_instance_admin.CreateInstanceRequest.ClustersEntry
- ] = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ clusters: Dict[str, gba_instance.Cluster] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Create an instance within a project.
+ Note that exactly one of Cluster.serve_nodes and
+ Cluster.cluster_config.cluster_autoscaling_config can be set. If
+ serve_nodes is set to non-zero, then the cluster is manually
+ scaled. If cluster_config.cluster_autoscaling_config is
+ non-empty, then autoscaling is enabled.
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]):
The request object. Request message for
@@ -457,7 +511,7 @@ def create_instance(
This corresponds to the ``instance`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- clusters (Sequence[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest.ClustersEntry]):
+ clusters (Dict[str, gba_instance.Cluster]):
Required. The clusters to be created within the
instance, mapped by desired cluster ID, e.g., just
``mycluster`` rather than
@@ -486,7 +540,7 @@ def create_instance(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, instance_id, instance, clusters])
if request is not None and has_flattened_params:
@@ -541,7 +595,7 @@ def get_instance(
request: Union[bigtable_instance_admin.GetInstanceRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> instance.Instance:
@@ -575,7 +629,7 @@ def get_instance(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -616,7 +670,7 @@ def list_instances(
request: Union[bigtable_instance_admin.ListInstancesRequest, dict] = None,
*,
parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> bigtable_instance_admin.ListInstancesResponse:
@@ -647,7 +701,7 @@ def list_instances(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
@@ -687,7 +741,7 @@ def update_instance(
self,
request: Union[instance.Instance, dict] = None,
*,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> instance.Instance:
@@ -751,7 +805,7 @@ def partial_update_instance(
*,
instance: gba_instance.Instance = None,
update_mask: field_mask_pb2.FieldMask = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
@@ -796,7 +850,7 @@ def partial_update_instance(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([instance, update_mask])
if request is not None and has_flattened_params:
@@ -851,7 +905,7 @@ def delete_instance(
request: Union[bigtable_instance_admin.DeleteInstanceRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
@@ -876,7 +930,7 @@ def delete_instance(
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -918,12 +972,18 @@ def create_cluster(
parent: str = None,
cluster_id: str = None,
cluster: instance.Cluster = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Creates a cluster within an instance.
+ Note that exactly one of Cluster.serve_nodes and
+ Cluster.cluster_config.cluster_autoscaling_config can be set. If
+ serve_nodes is set to non-zero, then the cluster is manually
+ scaled. If cluster_config.cluster_autoscaling_config is
+ non-empty, then autoscaling is enabled.
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]):
The request object. Request message for
@@ -969,7 +1029,7 @@ def create_cluster(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, cluster_id, cluster])
if request is not None and has_flattened_params:
@@ -1022,7 +1082,7 @@ def get_cluster(
request: Union[bigtable_instance_admin.GetClusterRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> instance.Cluster:
@@ -1055,7 +1115,7 @@ def get_cluster(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -1096,7 +1156,7 @@ def list_clusters(
request: Union[bigtable_instance_admin.ListClustersRequest, dict] = None,
*,
parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> bigtable_instance_admin.ListClustersResponse:
@@ -1129,7 +1189,7 @@ def list_clusters(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
@@ -1169,12 +1229,16 @@ def update_cluster(
self,
request: Union[instance.Cluster, dict] = None,
*,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Updates a cluster within an instance.
+ Note that UpdateCluster does not support updating
+ cluster_config.cluster_autoscaling_config. In order to update
+ it, you must use PartialUpdateCluster.
+
Args:
request (Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]):
The request object. A resizable group of nodes in a
@@ -1229,12 +1293,122 @@ def update_cluster(
# Done; return the response.
return response
+ def partial_update_cluster(
+ self,
+ request: Union[
+ bigtable_instance_admin.PartialUpdateClusterRequest, dict
+ ] = None,
+ *,
+ cluster: instance.Cluster = None,
+ update_mask: field_mask_pb2.FieldMask = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation.Operation:
+ r"""Partially updates a cluster within a project. This method is the
+ preferred way to update a Cluster.
+
+ To enable and update autoscaling, set
+ cluster_config.cluster_autoscaling_config. When autoscaling is
+ enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning
+ that updates to it are ignored. Note that an update cannot
+ simultaneously set serve_nodes to non-zero and
+ cluster_config.cluster_autoscaling_config to non-empty, and also
+ specify both in the update_mask.
+
+ To disable autoscaling, clear
+ cluster_config.cluster_autoscaling_config, and explicitly set a
+ serve_node count via the update_mask.
+
+ Args:
+ request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest, dict]):
+ The request object. Request message for
+ BigtableInstanceAdmin.PartialUpdateCluster.
+ cluster (google.cloud.bigtable_admin_v2.types.Cluster):
+ Required. The Cluster which contains the partial updates
+ to be applied, subject to the update_mask.
+
+ This corresponds to the ``cluster`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Required. The subset of Cluster
+ fields which should be replaced.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable
+ of serving all
+ [Tables][google.bigtable.admin.v2.Table] in the
+ parent [Instance][google.bigtable.admin.v2.Instance].
+
+ """
+ # Create or coerce a protobuf request object.
+ # Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([cluster, update_mask])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a bigtable_instance_admin.PartialUpdateClusterRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, bigtable_instance_admin.PartialUpdateClusterRequest):
+ request = bigtable_instance_admin.PartialUpdateClusterRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if cluster is not None:
+ request.cluster = cluster
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.partial_update_cluster]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("cluster.name", request.cluster.name),)
+ ),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ instance.Cluster,
+ metadata_type=bigtable_instance_admin.PartialUpdateClusterMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
def delete_cluster(
self,
request: Union[bigtable_instance_admin.DeleteClusterRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
@@ -1259,7 +1433,7 @@ def delete_cluster(
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -1301,7 +1475,7 @@ def create_app_profile(
parent: str = None,
app_profile_id: str = None,
app_profile: instance.AppProfile = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> instance.AppProfile:
@@ -1349,7 +1523,7 @@ def create_app_profile(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, app_profile_id, app_profile])
if request is not None and has_flattened_params:
@@ -1394,7 +1568,7 @@ def get_app_profile(
request: Union[bigtable_instance_admin.GetAppProfileRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> instance.AppProfile:
@@ -1426,7 +1600,7 @@ def get_app_profile(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -1467,7 +1641,7 @@ def list_app_profiles(
request: Union[bigtable_instance_admin.ListAppProfilesRequest, dict] = None,
*,
parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListAppProfilesPager:
@@ -1504,7 +1678,7 @@ def list_app_profiles(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
@@ -1552,7 +1726,7 @@ def update_app_profile(
*,
app_profile: instance.AppProfile = None,
update_mask: field_mask_pb2.FieldMask = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
@@ -1592,7 +1766,7 @@ def update_app_profile(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([app_profile, update_mask])
if request is not None and has_flattened_params:
@@ -1645,7 +1819,7 @@ def delete_app_profile(
request: Union[bigtable_instance_admin.DeleteAppProfileRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
@@ -1670,7 +1844,7 @@ def delete_app_profile(
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -1710,7 +1884,7 @@ def get_iam_policy(
request: Union[iam_policy_pb2.GetIamPolicyRequest, dict] = None,
*,
resource: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
@@ -1797,7 +1971,7 @@ def get_iam_policy(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
@@ -1837,7 +2011,7 @@ def set_iam_policy(
request: Union[iam_policy_pb2.SetIamPolicyRequest, dict] = None,
*,
resource: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
@@ -1923,7 +2097,7 @@ def set_iam_policy(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
@@ -1964,7 +2138,7 @@ def test_iam_permissions(
*,
resource: str = None,
permissions: Sequence[str] = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
@@ -2004,7 +2178,7 @@ def test_iam_permissions(
Response message for TestIamPermissions method.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource, permissions])
if request is not None and has_flattened_params:
@@ -2041,6 +2215,19 @@ def test_iam_permissions(
# Done; return the response.
return response
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ """Releases underlying transport's resources.
+
+ .. warning::
+ ONLY use as a context manager if the transport is NOT shared
+ with other clients! Exiting the with block will CLOSE the transport
+ and may cause errors in other clients!
+ """
+ self.transport.close()
+
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py
index cf5def768..d220a1b26 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py
@@ -15,13 +15,13 @@
#
from typing import (
Any,
- AsyncIterable,
+ AsyncIterator,
Awaitable,
Callable,
- Iterable,
Sequence,
Tuple,
Optional,
+ Iterator,
)
from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin
@@ -75,14 +75,14 @@ def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
- def pages(self) -> Iterable[bigtable_instance_admin.ListAppProfilesResponse]:
+ def pages(self) -> Iterator[bigtable_instance_admin.ListAppProfilesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
- def __iter__(self) -> Iterable[instance.AppProfile]:
+ def __iter__(self) -> Iterator[instance.AppProfile]:
for page in self.pages:
yield from page.app_profiles
@@ -141,14 +141,14 @@ def __getattr__(self, name: str) -> Any:
@property
async def pages(
self,
- ) -> AsyncIterable[bigtable_instance_admin.ListAppProfilesResponse]:
+ ) -> AsyncIterator[bigtable_instance_admin.ListAppProfilesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
- def __aiter__(self) -> AsyncIterable[instance.AppProfile]:
+ def __aiter__(self) -> AsyncIterator[instance.AppProfile]:
async def async_generator():
async for page in self.pages:
for response in page.app_profiles:
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py
index fa1456714..f86569e0a 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py
@@ -15,15 +15,14 @@
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
-import packaging.version
import pkg_resources
import google.auth # type: ignore
-import google.api_core # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
-from google.api_core import operations_v1 # type: ignore
+import google.api_core
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
@@ -43,15 +42,6 @@
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
-try:
- # google.auth.__version__ was added in 1.26.0
- _GOOGLE_AUTH_VERSION = google.auth.__version__
-except AttributeError:
- try: # try pkg_resources if it is available
- _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
- except pkg_resources.DistributionNotFound: # pragma: NO COVER
- _GOOGLE_AUTH_VERSION = None
-
class BigtableInstanceAdminTransport(abc.ABC):
"""Abstract transport class for BigtableInstanceAdmin."""
@@ -109,7 +99,7 @@ def __init__(
host += ":443"
self._host = host
- scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
+ scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
@@ -125,7 +115,6 @@ def __init__(
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
-
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
@@ -142,29 +131,6 @@ def __init__(
# Save the credentials.
self._credentials = credentials
- # TODO(busunkim): This method is in the base transport
- # to avoid duplicating code across the transport classes. These functions
- # should be deleted once the minimum required versions of google-auth is increased.
-
- # TODO: Remove this function once google-auth >= 1.25.0 is required
- @classmethod
- def _get_scopes_kwargs(
- cls, host: str, scopes: Optional[Sequence[str]]
- ) -> Dict[str, Optional[Sequence[str]]]:
- """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
-
- scopes_kwargs = {}
-
- if _GOOGLE_AUTH_VERSION and (
- packaging.version.parse(_GOOGLE_AUTH_VERSION)
- >= packaging.version.parse("1.25.0")
- ):
- scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
- else:
- scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
-
- return scopes_kwargs
-
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
@@ -282,6 +248,21 @@ def _prep_wrapped_messages(self, client_info):
default_timeout=60.0,
client_info=client_info,
),
+ self.partial_update_cluster: gapic_v1.method.wrap_method(
+ self.partial_update_cluster,
+ default_retry=retries.Retry(
+ initial=1.0,
+ maximum=60.0,
+ multiplier=2,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=60.0,
+ ),
+ default_timeout=60.0,
+ client_info=client_info,
+ ),
self.delete_cluster: gapic_v1.method.wrap_method(
self.delete_cluster, default_timeout=60.0, client_info=client_info,
),
@@ -371,8 +352,17 @@ def _prep_wrapped_messages(self, client_info):
),
}
+ def close(self):
+ """Closes resources associated with the transport.
+
+ .. warning::
+ Only call this method if the transport is NOT shared
+ with other clients - this may cause errors in other clients!
+ """
+ raise NotImplementedError()
+
@property
- def operations_client(self) -> operations_v1.OperationsClient:
+ def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@@ -471,6 +461,15 @@ def update_cluster(
]:
raise NotImplementedError()
+ @property
+ def partial_update_cluster(
+ self,
+ ) -> Callable[
+ [bigtable_instance_admin.PartialUpdateClusterRequest],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
+ ]:
+ raise NotImplementedError()
+
@property
def delete_cluster(
self,
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py
index 7e2e51611..c477ee926 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py
@@ -16,9 +16,9 @@
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
-from google.api_core import grpc_helpers # type: ignore
-from google.api_core import operations_v1 # type: ignore
-from google.api_core import gapic_v1 # type: ignore
+from google.api_core import grpc_helpers
+from google.api_core import operations_v1
+from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
@@ -119,7 +119,7 @@ def __init__(
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
- self._operations_client = None
+ self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
@@ -168,8 +168,11 @@ def __init__(
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
+ # use the credentials which are saved
credentials=self._credentials,
- credentials_file=credentials_file,
+ # Set ``credentials_file`` to ``None`` here as
+ # the credentials that we saved earlier should be used.
+ credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
@@ -242,7 +245,7 @@ def operations_client(self) -> operations_v1.OperationsClient:
This property caches on the instance; repeated calls return the same
client.
"""
- # Sanity check: Only create a new client if we do not already have one.
+ # Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
@@ -259,6 +262,12 @@ def create_instance(
Create an instance within a project.
+ Note that exactly one of Cluster.serve_nodes and
+ Cluster.cluster_config.cluster_autoscaling_config can be set. If
+ serve_nodes is set to non-zero, then the cluster is manually
+ scaled. If cluster_config.cluster_autoscaling_config is
+ non-empty, then autoscaling is enabled.
+
Returns:
Callable[[~.CreateInstanceRequest],
~.Operation]:
@@ -425,6 +434,12 @@ def create_cluster(
Creates a cluster within an instance.
+ Note that exactly one of Cluster.serve_nodes and
+ Cluster.cluster_config.cluster_autoscaling_config can be set. If
+ serve_nodes is set to non-zero, then the cluster is manually
+ scaled. If cluster_config.cluster_autoscaling_config is
+ non-empty, then autoscaling is enabled.
+
Returns:
Callable[[~.CreateClusterRequest],
~.Operation]:
@@ -504,6 +519,10 @@ def update_cluster(self) -> Callable[[instance.Cluster], operations_pb2.Operatio
Updates a cluster within an instance.
+ Note that UpdateCluster does not support updating
+ cluster_config.cluster_autoscaling_config. In order to update
+ it, you must use PartialUpdateCluster.
+
Returns:
Callable[[~.Cluster],
~.Operation]:
@@ -522,6 +541,47 @@ def update_cluster(self) -> Callable[[instance.Cluster], operations_pb2.Operatio
)
return self._stubs["update_cluster"]
+ @property
+ def partial_update_cluster(
+ self,
+ ) -> Callable[
+ [bigtable_instance_admin.PartialUpdateClusterRequest], operations_pb2.Operation
+ ]:
+ r"""Return a callable for the partial update cluster method over gRPC.
+
+ Partially updates a cluster within a project. This method is the
+ preferred way to update a Cluster.
+
+ To enable and update autoscaling, set
+ cluster_config.cluster_autoscaling_config. When autoscaling is
+ enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning
+ that updates to it are ignored. Note that an update cannot
+ simultaneously set serve_nodes to non-zero and
+ cluster_config.cluster_autoscaling_config to non-empty, and also
+ specify both in the update_mask.
+
+ To disable autoscaling, clear
+ cluster_config.cluster_autoscaling_config, and explicitly set a
+ serve_node count via the update_mask.
+
+ Returns:
+ Callable[[~.PartialUpdateClusterRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "partial_update_cluster" not in self._stubs:
+ self._stubs["partial_update_cluster"] = self.grpc_channel.unary_unary(
+ "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateCluster",
+ request_serializer=bigtable_instance_admin.PartialUpdateClusterRequest.serialize,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["partial_update_cluster"]
+
@property
def delete_cluster(
self,
@@ -770,5 +830,8 @@ def test_iam_permissions(
)
return self._stubs["test_iam_permissions"]
+ def close(self):
+ self.grpc_channel.close()
+
__all__ = ("BigtableInstanceAdminGrpcTransport",)
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py
index 9eddeaa02..97c8f1ad9 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py
@@ -16,12 +16,11 @@
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import grpc_helpers_async # type: ignore
-from google.api_core import operations_v1 # type: ignore
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers_async
+from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
-import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
@@ -166,7 +165,7 @@ def __init__(
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
- self._operations_client = None
+ self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
@@ -214,8 +213,11 @@ def __init__(
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
+ # use the credentials which are saved
credentials=self._credentials,
- credentials_file=credentials_file,
+ # Set ``credentials_file`` to ``None`` here as
+ # the credentials that we saved earlier should be used.
+ credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
@@ -245,7 +247,7 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient:
This property caches on the instance; repeated calls return the same
client.
"""
- # Sanity check: Only create a new client if we do not already have one.
+ # Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
@@ -265,6 +267,12 @@ def create_instance(
Create an instance within a project.
+ Note that exactly one of Cluster.serve_nodes and
+ Cluster.cluster_config.cluster_autoscaling_config can be set. If
+ serve_nodes is set to non-zero, then the cluster is manually
+ scaled. If cluster_config.cluster_autoscaling_config is
+ non-empty, then autoscaling is enabled.
+
Returns:
Callable[[~.CreateInstanceRequest],
Awaitable[~.Operation]]:
@@ -439,6 +447,12 @@ def create_cluster(
Creates a cluster within an instance.
+ Note that exactly one of Cluster.serve_nodes and
+ Cluster.cluster_config.cluster_autoscaling_config can be set. If
+ serve_nodes is set to non-zero, then the cluster is manually
+ scaled. If cluster_config.cluster_autoscaling_config is
+ non-empty, then autoscaling is enabled.
+
Returns:
Callable[[~.CreateClusterRequest],
Awaitable[~.Operation]]:
@@ -522,6 +536,10 @@ def update_cluster(
Updates a cluster within an instance.
+ Note that UpdateCluster does not support updating
+ cluster_config.cluster_autoscaling_config. In order to update
+ it, you must use PartialUpdateCluster.
+
Returns:
Callable[[~.Cluster],
Awaitable[~.Operation]]:
@@ -540,6 +558,48 @@ def update_cluster(
)
return self._stubs["update_cluster"]
+ @property
+ def partial_update_cluster(
+ self,
+ ) -> Callable[
+ [bigtable_instance_admin.PartialUpdateClusterRequest],
+ Awaitable[operations_pb2.Operation],
+ ]:
+ r"""Return a callable for the partial update cluster method over gRPC.
+
+ Partially updates a cluster within a project. This method is the
+ preferred way to update a Cluster.
+
+ To enable and update autoscaling, set
+ cluster_config.cluster_autoscaling_config. When autoscaling is
+ enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning
+ that updates to it are ignored. Note that an update cannot
+ simultaneously set serve_nodes to non-zero and
+ cluster_config.cluster_autoscaling_config to non-empty, and also
+ specify both in the update_mask.
+
+ To disable autoscaling, clear
+ cluster_config.cluster_autoscaling_config, and explicitly set a
+ serve_node count via the update_mask.
+
+ Returns:
+ Callable[[~.PartialUpdateClusterRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "partial_update_cluster" not in self._stubs:
+ self._stubs["partial_update_cluster"] = self.grpc_channel.unary_unary(
+ "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateCluster",
+ request_serializer=bigtable_instance_admin.PartialUpdateClusterRequest.serialize,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["partial_update_cluster"]
+
@property
def delete_cluster(
self,
@@ -796,5 +856,8 @@ def test_iam_permissions(
)
return self._stubs["test_iam_permissions"]
+ def close(self):
+ return self.grpc_channel.close()
+
__all__ = ("BigtableInstanceAdminGrpcAsyncIOTransport",)
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py
index 62bef2e7b..303bf2d33 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py
@@ -16,16 +16,21 @@
from collections import OrderedDict
import functools
import re
-from typing import Dict, Sequence, Tuple, Type, Union
+from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
-import google.api_core.client_options as ClientOptions # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
+from google.api_core.client_options import ClientOptions
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object] # type: ignore
+
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers
@@ -128,6 +133,42 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
from_service_account_json = from_service_account_file
+ @classmethod
+ def get_mtls_endpoint_and_cert_source(
+ cls, client_options: Optional[ClientOptions] = None
+ ):
+ """Return the API endpoint and client cert source for mutual TLS.
+
+ The client cert source is determined in the following order:
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
+ client cert source is None.
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
+ default client cert source exists, use the default one; otherwise the client cert
+ source is None.
+
+ The API endpoint is determined in the following order:
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
+ default mTLS endpoint; if the environment variabel is "never", use the default API
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
+ use the default API endpoint.
+
+ More details can be found at https://google.aip.dev/auth/4114.
+
+ Args:
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
+ in this method.
+
+ Returns:
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
+ client cert source to use.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
+ """
+ return BigtableTableAdminClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
+
@property
def transport(self) -> BigtableTableAdminTransport:
"""Returns the transport used by the client instance.
@@ -191,12 +232,12 @@ def __init__(
async def create_table(
self,
- request: bigtable_table_admin.CreateTableRequest = None,
+ request: Union[bigtable_table_admin.CreateTableRequest, dict] = None,
*,
parent: str = None,
table_id: str = None,
table: gba_table.Table = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gba_table.Table:
@@ -205,7 +246,7 @@ async def create_table(
column families, specified in the request.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.CreateTableRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.CreateTableRequest, dict]):
The request object. Request message for
[google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable]
parent (:class:`str`):
@@ -245,7 +286,7 @@ async def create_table(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, table_id, table])
if request is not None and has_flattened_params:
@@ -287,12 +328,14 @@ async def create_table(
async def create_table_from_snapshot(
self,
- request: bigtable_table_admin.CreateTableFromSnapshotRequest = None,
+ request: Union[
+ bigtable_table_admin.CreateTableFromSnapshotRequest, dict
+ ] = None,
*,
parent: str = None,
table_id: str = None,
source_snapshot: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
@@ -307,7 +350,7 @@ async def create_table_from_snapshot(
SLA or deprecation policy.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]):
The request object. Request message for
[google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot]
Note: This is a private alpha release of Cloud Bigtable
@@ -357,7 +400,7 @@ async def create_table_from_snapshot(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, table_id, source_snapshot])
if request is not None and has_flattened_params:
@@ -407,17 +450,17 @@ async def create_table_from_snapshot(
async def list_tables(
self,
- request: bigtable_table_admin.ListTablesRequest = None,
+ request: Union[bigtable_table_admin.ListTablesRequest, dict] = None,
*,
parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTablesAsyncPager:
r"""Lists all tables served from a specified instance.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.ListTablesRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.ListTablesRequest, dict]):
The request object. Request message for
[google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables]
parent (:class:`str`):
@@ -444,7 +487,7 @@ async def list_tables(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
@@ -498,17 +541,17 @@ async def list_tables(
async def get_table(
self,
- request: bigtable_table_admin.GetTableRequest = None,
+ request: Union[bigtable_table_admin.GetTableRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> table.Table:
r"""Gets metadata information about the specified table.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.GetTableRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.GetTableRequest, dict]):
The request object. Request message for
[google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable]
name (:class:`str`):
@@ -534,7 +577,7 @@ async def get_table(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -582,10 +625,10 @@ async def get_table(
async def delete_table(
self,
- request: bigtable_table_admin.DeleteTableRequest = None,
+ request: Union[bigtable_table_admin.DeleteTableRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
@@ -593,7 +636,7 @@ async def delete_table(
data.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.DeleteTableRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.DeleteTableRequest, dict]):
The request object. Request message for
[google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable]
name (:class:`str`):
@@ -611,7 +654,7 @@ async def delete_table(
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -648,13 +691,13 @@ async def delete_table(
async def modify_column_families(
self,
- request: bigtable_table_admin.ModifyColumnFamiliesRequest = None,
+ request: Union[bigtable_table_admin.ModifyColumnFamiliesRequest, dict] = None,
*,
name: str = None,
modifications: Sequence[
bigtable_table_admin.ModifyColumnFamiliesRequest.Modification
] = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> table.Table:
@@ -665,7 +708,7 @@ async def modify_column_families(
table where only some modifications have taken effect.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest, dict]):
The request object. Request message for
[google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies]
name (:class:`str`):
@@ -703,7 +746,7 @@ async def modify_column_families(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, modifications])
if request is not None and has_flattened_params:
@@ -743,9 +786,9 @@ async def modify_column_families(
async def drop_row_range(
self,
- request: bigtable_table_admin.DropRowRangeRequest = None,
+ request: Union[bigtable_table_admin.DropRowRangeRequest, dict] = None,
*,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
@@ -755,7 +798,7 @@ async def drop_row_range(
prefix.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.DropRowRangeRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]):
The request object. Request message for
[google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange]
retry (google.api_core.retry.Retry): Designation of what errors, if any,
@@ -788,10 +831,12 @@ async def drop_row_range(
async def generate_consistency_token(
self,
- request: bigtable_table_admin.GenerateConsistencyTokenRequest = None,
+ request: Union[
+ bigtable_table_admin.GenerateConsistencyTokenRequest, dict
+ ] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> bigtable_table_admin.GenerateConsistencyTokenResponse:
@@ -802,7 +847,7 @@ async def generate_consistency_token(
days.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest, dict]):
The request object. Request message for
[google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken]
name (:class:`str`):
@@ -826,7 +871,7 @@ async def generate_consistency_token(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -874,11 +919,11 @@ async def generate_consistency_token(
async def check_consistency(
self,
- request: bigtable_table_admin.CheckConsistencyRequest = None,
+ request: Union[bigtable_table_admin.CheckConsistencyRequest, dict] = None,
*,
name: str = None,
consistency_token: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> bigtable_table_admin.CheckConsistencyResponse:
@@ -888,7 +933,7 @@ async def check_consistency(
request.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest, dict]):
The request object. Request message for
[google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency]
name (:class:`str`):
@@ -919,7 +964,7 @@ async def check_consistency(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, consistency_token])
if request is not None and has_flattened_params:
@@ -969,13 +1014,13 @@ async def check_consistency(
async def snapshot_table(
self,
- request: bigtable_table_admin.SnapshotTableRequest = None,
+ request: Union[bigtable_table_admin.SnapshotTableRequest, dict] = None,
*,
name: str = None,
cluster: str = None,
snapshot_id: str = None,
description: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
@@ -990,7 +1035,7 @@ async def snapshot_table(
SLA or deprecation policy.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.SnapshotTableRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]):
The request object. Request message for
[google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable]
Note: This is a private alpha release of Cloud Bigtable
@@ -1053,7 +1098,7 @@ async def snapshot_table(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, cluster, snapshot_id, description])
if request is not None and has_flattened_params:
@@ -1105,10 +1150,10 @@ async def snapshot_table(
async def get_snapshot(
self,
- request: bigtable_table_admin.GetSnapshotRequest = None,
+ request: Union[bigtable_table_admin.GetSnapshotRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> table.Snapshot:
@@ -1122,7 +1167,7 @@ async def get_snapshot(
SLA or deprecation policy.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.GetSnapshotRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]):
The request object. Request message for
[google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot]
Note: This is a private alpha release of Cloud Bigtable
@@ -1162,7 +1207,7 @@ async def get_snapshot(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -1210,10 +1255,10 @@ async def get_snapshot(
async def list_snapshots(
self,
- request: bigtable_table_admin.ListSnapshotsRequest = None,
+ request: Union[bigtable_table_admin.ListSnapshotsRequest, dict] = None,
*,
parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListSnapshotsAsyncPager:
@@ -1227,7 +1272,7 @@ async def list_snapshots(
SLA or deprecation policy.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]):
The request object. Request message for
[google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots]
Note: This is a private alpha release of Cloud Bigtable
@@ -1270,7 +1315,7 @@ async def list_snapshots(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
@@ -1324,10 +1369,10 @@ async def list_snapshots(
async def delete_snapshot(
self,
- request: bigtable_table_admin.DeleteSnapshotRequest = None,
+ request: Union[bigtable_table_admin.DeleteSnapshotRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
@@ -1340,7 +1385,7 @@ async def delete_snapshot(
SLA or deprecation policy.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]):
The request object. Request message for
[google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot]
Note: This is a private alpha release of Cloud Bigtable
@@ -1364,7 +1409,7 @@ async def delete_snapshot(
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -1401,12 +1446,12 @@ async def delete_snapshot(
async def create_backup(
self,
- request: bigtable_table_admin.CreateBackupRequest = None,
+ request: Union[bigtable_table_admin.CreateBackupRequest, dict] = None,
*,
parent: str = None,
backup_id: str = None,
backup: table.Backup = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
@@ -1421,7 +1466,7 @@ async def create_backup(
delete the backup.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.CreateBackupRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.CreateBackupRequest, dict]):
The request object. The request for
[CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup].
parent (:class:`str`):
@@ -1466,7 +1511,7 @@ async def create_backup(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, backup_id, backup])
if request is not None and has_flattened_params:
@@ -1516,10 +1561,10 @@ async def create_backup(
async def get_backup(
self,
- request: bigtable_table_admin.GetBackupRequest = None,
+ request: Union[bigtable_table_admin.GetBackupRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> table.Backup:
@@ -1527,7 +1572,7 @@ async def get_backup(
Bigtable Backup.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.GetBackupRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.GetBackupRequest, dict]):
The request object. The request for
[GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup].
name (:class:`str`):
@@ -1548,7 +1593,7 @@ async def get_backup(
A backup of a Cloud Bigtable table.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -1596,18 +1641,18 @@ async def get_backup(
async def update_backup(
self,
- request: bigtable_table_admin.UpdateBackupRequest = None,
+ request: Union[bigtable_table_admin.UpdateBackupRequest, dict] = None,
*,
backup: table.Backup = None,
update_mask: field_mask_pb2.FieldMask = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> table.Backup:
r"""Updates a pending or completed Cloud Bigtable Backup.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.UpdateBackupRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.UpdateBackupRequest, dict]):
The request object. The request for
[UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup].
backup (:class:`google.cloud.bigtable_admin_v2.types.Backup`):
@@ -1644,7 +1689,7 @@ async def update_backup(
A backup of a Cloud Bigtable table.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([backup, update_mask])
if request is not None and has_flattened_params:
@@ -1686,17 +1731,17 @@ async def update_backup(
async def delete_backup(
self,
- request: bigtable_table_admin.DeleteBackupRequest = None,
+ request: Union[bigtable_table_admin.DeleteBackupRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a pending or completed Cloud Bigtable backup.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.DeleteBackupRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.DeleteBackupRequest, dict]):
The request object. The request for
[DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup].
name (:class:`str`):
@@ -1714,7 +1759,7 @@ async def delete_backup(
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -1751,10 +1796,10 @@ async def delete_backup(
async def list_backups(
self,
- request: bigtable_table_admin.ListBackupsRequest = None,
+ request: Union[bigtable_table_admin.ListBackupsRequest, dict] = None,
*,
parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListBackupsAsyncPager:
@@ -1762,7 +1807,7 @@ async def list_backups(
and pending backups.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.ListBackupsRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.ListBackupsRequest, dict]):
The request object. The request for
[ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups].
parent (:class:`str`):
@@ -1792,7 +1837,7 @@ async def list_backups(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
@@ -1846,9 +1891,9 @@ async def list_backups(
async def restore_table(
self,
- request: bigtable_table_admin.RestoreTableRequest = None,
+ request: Union[bigtable_table_admin.RestoreTableRequest, dict] = None,
*,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
@@ -1863,7 +1908,7 @@ async def restore_table(
[Table][google.bigtable.admin.v2.Table], if successful.
Args:
- request (:class:`google.cloud.bigtable_admin_v2.types.RestoreTableRequest`):
+ request (Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]):
The request object. The request for
[RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
@@ -1914,10 +1959,10 @@ async def restore_table(
async def get_iam_policy(
self,
- request: iam_policy_pb2.GetIamPolicyRequest = None,
+ request: Union[iam_policy_pb2.GetIamPolicyRequest, dict] = None,
*,
resource: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
@@ -1926,7 +1971,7 @@ async def get_iam_policy(
but does not have a policy set.
Args:
- request (:class:`google.iam.v1.iam_policy_pb2.GetIamPolicyRequest`):
+ request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]):
The request object. Request message for `GetIamPolicy`
method.
resource (:class:`str`):
@@ -2004,7 +2049,7 @@ async def get_iam_policy(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
@@ -2052,10 +2097,10 @@ async def get_iam_policy(
async def set_iam_policy(
self,
- request: iam_policy_pb2.SetIamPolicyRequest = None,
+ request: Union[iam_policy_pb2.SetIamPolicyRequest, dict] = None,
*,
resource: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
@@ -2063,7 +2108,7 @@ async def set_iam_policy(
resource. Replaces any existing policy.
Args:
- request (:class:`google.iam.v1.iam_policy_pb2.SetIamPolicyRequest`):
+ request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]):
The request object. Request message for `SetIamPolicy`
method.
resource (:class:`str`):
@@ -2141,7 +2186,7 @@ async def set_iam_policy(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
@@ -2179,11 +2224,11 @@ async def set_iam_policy(
async def test_iam_permissions(
self,
- request: iam_policy_pb2.TestIamPermissionsRequest = None,
+ request: Union[iam_policy_pb2.TestIamPermissionsRequest, dict] = None,
*,
resource: str = None,
permissions: Sequence[str] = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
@@ -2191,7 +2236,7 @@ async def test_iam_permissions(
specified Table or Backup resource.
Args:
- request (:class:`google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest`):
+ request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]):
The request object. Request message for
`TestIamPermissions` method.
resource (:class:`str`):
@@ -2223,7 +2268,7 @@ async def test_iam_permissions(
Response message for TestIamPermissions method.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource, permissions])
if request is not None and has_flattened_params:
@@ -2271,6 +2316,12 @@ async def test_iam_permissions(
# Done; return the response.
return response
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, exc_type, exc, tb):
+ await self.transport.close()
+
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py
index 8c891fd87..070423018 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py
@@ -14,22 +14,26 @@
# limitations under the License.
#
from collections import OrderedDict
-from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
-from google.api_core import client_options as client_options_lib # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
+from google.api_core import client_options as client_options_lib
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object] # type: ignore
+
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers
@@ -332,6 +336,73 @@ def parse_common_location_path(path: str) -> Dict[str, str]:
m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
return m.groupdict() if m else {}
+ @classmethod
+ def get_mtls_endpoint_and_cert_source(
+ cls, client_options: Optional[client_options_lib.ClientOptions] = None
+ ):
+ """Return the API endpoint and client cert source for mutual TLS.
+
+ The client cert source is determined in the following order:
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
+ client cert source is None.
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
+ default client cert source exists, use the default one; otherwise the client cert
+ source is None.
+
+ The API endpoint is determined in the following order:
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
+ default mTLS endpoint; if the environment variabel is "never", use the default API
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
+ use the default API endpoint.
+
+ More details can be found at https://google.aip.dev/auth/4114.
+
+ Args:
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
+ in this method.
+
+ Returns:
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
+ client cert source to use.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
+ """
+ if client_options is None:
+ client_options = client_options_lib.ClientOptions()
+ use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
+ if use_client_cert not in ("true", "false"):
+ raise ValueError(
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+ if use_mtls_endpoint not in ("auto", "never", "always"):
+ raise MutualTLSChannelError(
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Figure out the client cert source to use.
+ client_cert_source = None
+ if use_client_cert == "true":
+ if client_options.client_cert_source:
+ client_cert_source = client_options.client_cert_source
+ elif mtls.has_default_client_cert_source():
+ client_cert_source = mtls.default_client_cert_source()
+
+ # Figure out which api endpoint to use.
+ if client_options.api_endpoint is not None:
+ api_endpoint = client_options.api_endpoint
+ elif use_mtls_endpoint == "always" or (
+ use_mtls_endpoint == "auto" and client_cert_source
+ ):
+ api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
+ else:
+ api_endpoint = cls.DEFAULT_ENDPOINT
+
+ return api_endpoint, client_cert_source
+
def __init__(
self,
*,
@@ -382,50 +453,22 @@ def __init__(
if client_options is None:
client_options = client_options_lib.ClientOptions()
- # Create SSL credentials for mutual TLS if needed.
- use_client_cert = bool(
- util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
+ api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
+ client_options
)
- client_cert_source_func = None
- is_mtls = False
- if use_client_cert:
- if client_options.client_cert_source:
- is_mtls = True
- client_cert_source_func = client_options.client_cert_source
- else:
- is_mtls = mtls.has_default_client_cert_source()
- if is_mtls:
- client_cert_source_func = mtls.default_client_cert_source()
- else:
- client_cert_source_func = None
-
- # Figure out which api endpoint to use.
- if client_options.api_endpoint is not None:
- api_endpoint = client_options.api_endpoint
- else:
- use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
- if use_mtls_env == "never":
- api_endpoint = self.DEFAULT_ENDPOINT
- elif use_mtls_env == "always":
- api_endpoint = self.DEFAULT_MTLS_ENDPOINT
- elif use_mtls_env == "auto":
- if is_mtls:
- api_endpoint = self.DEFAULT_MTLS_ENDPOINT
- else:
- api_endpoint = self.DEFAULT_ENDPOINT
- else:
- raise MutualTLSChannelError(
- "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
- "values: never, auto, always"
- )
+ api_key_value = getattr(client_options, "api_key", None)
+ if api_key_value and credentials:
+ raise ValueError(
+ "client_options.api_key and credentials are mutually exclusive"
+ )
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, BigtableTableAdminTransport):
# transport is a BigtableTableAdminTransport instance.
- if credentials or client_options.credentials_file:
+ if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
@@ -437,6 +480,15 @@ def __init__(
)
self._transport = transport
else:
+ import google.auth._default # type: ignore
+
+ if api_key_value and hasattr(
+ google.auth._default, "get_api_key_credentials"
+ ):
+ credentials = google.auth._default.get_api_key_credentials(
+ api_key_value
+ )
+
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
@@ -446,10 +498,7 @@ def __init__(
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
- always_use_jwt_access=(
- Transport == type(self).get_transport_class("grpc")
- or Transport == type(self).get_transport_class("grpc_asyncio")
- ),
+ always_use_jwt_access=True,
)
def create_table(
@@ -459,7 +508,7 @@ def create_table(
parent: str = None,
table_id: str = None,
table: gba_table.Table = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gba_table.Table:
@@ -508,7 +557,7 @@ def create_table(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, table_id, table])
if request is not None and has_flattened_params:
@@ -557,7 +606,7 @@ def create_table_from_snapshot(
parent: str = None,
table_id: str = None,
source_snapshot: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
@@ -622,7 +671,7 @@ def create_table_from_snapshot(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, table_id, source_snapshot])
if request is not None and has_flattened_params:
@@ -677,7 +726,7 @@ def list_tables(
request: Union[bigtable_table_admin.ListTablesRequest, dict] = None,
*,
parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTablesPager:
@@ -711,7 +760,7 @@ def list_tables(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
@@ -758,7 +807,7 @@ def get_table(
request: Union[bigtable_table_admin.GetTableRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> table.Table:
@@ -791,7 +840,7 @@ def get_table(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -832,7 +881,7 @@ def delete_table(
request: Union[bigtable_table_admin.DeleteTableRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
@@ -858,7 +907,7 @@ def delete_table(
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -901,7 +950,7 @@ def modify_column_families(
modifications: Sequence[
bigtable_table_admin.ModifyColumnFamiliesRequest.Modification
] = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> table.Table:
@@ -950,7 +999,7 @@ def modify_column_families(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, modifications])
if request is not None and has_flattened_params:
@@ -992,7 +1041,7 @@ def drop_row_range(
self,
request: Union[bigtable_table_admin.DropRowRangeRequest, dict] = None,
*,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
@@ -1041,7 +1090,7 @@ def generate_consistency_token(
] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> bigtable_table_admin.GenerateConsistencyTokenResponse:
@@ -1076,7 +1125,7 @@ def generate_consistency_token(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -1122,7 +1171,7 @@ def check_consistency(
*,
name: str = None,
consistency_token: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> bigtable_table_admin.CheckConsistencyResponse:
@@ -1163,7 +1212,7 @@ def check_consistency(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, consistency_token])
if request is not None and has_flattened_params:
@@ -1209,7 +1258,7 @@ def snapshot_table(
cluster: str = None,
snapshot_id: str = None,
description: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
@@ -1287,7 +1336,7 @@ def snapshot_table(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, cluster, snapshot_id, description])
if request is not None and has_flattened_params:
@@ -1342,7 +1391,7 @@ def get_snapshot(
request: Union[bigtable_table_admin.GetSnapshotRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> table.Snapshot:
@@ -1396,7 +1445,7 @@ def get_snapshot(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -1437,7 +1486,7 @@ def list_snapshots(
request: Union[bigtable_table_admin.ListSnapshotsRequest, dict] = None,
*,
parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListSnapshotsPager:
@@ -1494,7 +1543,7 @@ def list_snapshots(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
@@ -1541,7 +1590,7 @@ def delete_snapshot(
request: Union[bigtable_table_admin.DeleteSnapshotRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
@@ -1578,7 +1627,7 @@ def delete_snapshot(
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -1620,7 +1669,7 @@ def create_backup(
parent: str = None,
backup_id: str = None,
backup: table.Backup = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
@@ -1680,7 +1729,7 @@ def create_backup(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, backup_id, backup])
if request is not None and has_flattened_params:
@@ -1733,7 +1782,7 @@ def get_backup(
request: Union[bigtable_table_admin.GetBackupRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> table.Backup:
@@ -1762,7 +1811,7 @@ def get_backup(
A backup of a Cloud Bigtable table.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -1804,7 +1853,7 @@ def update_backup(
*,
backup: table.Backup = None,
update_mask: field_mask_pb2.FieldMask = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> table.Backup:
@@ -1848,7 +1897,7 @@ def update_backup(
A backup of a Cloud Bigtable table.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([backup, update_mask])
if request is not None and has_flattened_params:
@@ -1893,7 +1942,7 @@ def delete_backup(
request: Union[bigtable_table_admin.DeleteBackupRequest, dict] = None,
*,
name: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
@@ -1918,7 +1967,7 @@ def delete_backup(
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
@@ -1958,7 +2007,7 @@ def list_backups(
request: Union[bigtable_table_admin.ListBackupsRequest, dict] = None,
*,
parent: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListBackupsPager:
@@ -1996,7 +2045,7 @@ def list_backups(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
@@ -2042,7 +2091,7 @@ def restore_table(
self,
request: Union[bigtable_table_admin.RestoreTableRequest, dict] = None,
*,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
@@ -2112,7 +2161,7 @@ def get_iam_policy(
request: Union[iam_policy_pb2.GetIamPolicyRequest, dict] = None,
*,
resource: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
@@ -2199,7 +2248,7 @@ def get_iam_policy(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
@@ -2239,7 +2288,7 @@ def set_iam_policy(
request: Union[iam_policy_pb2.SetIamPolicyRequest, dict] = None,
*,
resource: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
@@ -2325,7 +2374,7 @@ def set_iam_policy(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
@@ -2366,7 +2415,7 @@ def test_iam_permissions(
*,
resource: str = None,
permissions: Sequence[str] = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
@@ -2406,7 +2455,7 @@ def test_iam_permissions(
Response message for TestIamPermissions method.
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource, permissions])
if request is not None and has_flattened_params:
@@ -2443,6 +2492,19 @@ def test_iam_permissions(
# Done; return the response.
return response
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ """Releases underlying transport's resources.
+
+ .. warning::
+ ONLY use as a context manager if the transport is NOT shared
+ with other clients! Exiting the with block will CLOSE the transport
+ and may cause errors in other clients!
+ """
+ self.transport.close()
+
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py
index 84ead0192..07e82255a 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py
@@ -15,13 +15,13 @@
#
from typing import (
Any,
- AsyncIterable,
+ AsyncIterator,
Awaitable,
Callable,
- Iterable,
Sequence,
Tuple,
Optional,
+ Iterator,
)
from google.cloud.bigtable_admin_v2.types import bigtable_table_admin
@@ -75,14 +75,14 @@ def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
- def pages(self) -> Iterable[bigtable_table_admin.ListTablesResponse]:
+ def pages(self) -> Iterator[bigtable_table_admin.ListTablesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
- def __iter__(self) -> Iterable[table.Table]:
+ def __iter__(self) -> Iterator[table.Table]:
for page in self.pages:
yield from page.tables
@@ -137,14 +137,14 @@ def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
- async def pages(self) -> AsyncIterable[bigtable_table_admin.ListTablesResponse]:
+ async def pages(self) -> AsyncIterator[bigtable_table_admin.ListTablesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
- def __aiter__(self) -> AsyncIterable[table.Table]:
+ def __aiter__(self) -> AsyncIterator[table.Table]:
async def async_generator():
async for page in self.pages:
for response in page.tables:
@@ -203,14 +203,14 @@ def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
- def pages(self) -> Iterable[bigtable_table_admin.ListSnapshotsResponse]:
+ def pages(self) -> Iterator[bigtable_table_admin.ListSnapshotsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
- def __iter__(self) -> Iterable[table.Snapshot]:
+ def __iter__(self) -> Iterator[table.Snapshot]:
for page in self.pages:
yield from page.snapshots
@@ -265,14 +265,14 @@ def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
- async def pages(self) -> AsyncIterable[bigtable_table_admin.ListSnapshotsResponse]:
+ async def pages(self) -> AsyncIterator[bigtable_table_admin.ListSnapshotsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
- def __aiter__(self) -> AsyncIterable[table.Snapshot]:
+ def __aiter__(self) -> AsyncIterator[table.Snapshot]:
async def async_generator():
async for page in self.pages:
for response in page.snapshots:
@@ -331,14 +331,14 @@ def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
- def pages(self) -> Iterable[bigtable_table_admin.ListBackupsResponse]:
+ def pages(self) -> Iterator[bigtable_table_admin.ListBackupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
- def __iter__(self) -> Iterable[table.Backup]:
+ def __iter__(self) -> Iterator[table.Backup]:
for page in self.pages:
yield from page.backups
@@ -393,14 +393,14 @@ def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
- async def pages(self) -> AsyncIterable[bigtable_table_admin.ListBackupsResponse]:
+ async def pages(self) -> AsyncIterator[bigtable_table_admin.ListBackupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
- def __aiter__(self) -> AsyncIterable[table.Backup]:
+ def __aiter__(self) -> AsyncIterator[table.Backup]:
async def async_generator():
async for page in self.pages:
for response in page.backups:
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py
index e136c81c6..e8937e539 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py
@@ -15,15 +15,14 @@
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
-import packaging.version
import pkg_resources
import google.auth # type: ignore
-import google.api_core # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
-from google.api_core import operations_v1 # type: ignore
+import google.api_core
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
+from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
@@ -44,15 +43,6 @@
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
-try:
- # google.auth.__version__ was added in 1.26.0
- _GOOGLE_AUTH_VERSION = google.auth.__version__
-except AttributeError:
- try: # try pkg_resources if it is available
- _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
- except pkg_resources.DistributionNotFound: # pragma: NO COVER
- _GOOGLE_AUTH_VERSION = None
-
class BigtableTableAdminTransport(abc.ABC):
"""Abstract transport class for BigtableTableAdmin."""
@@ -109,7 +99,7 @@ def __init__(
host += ":443"
self._host = host
- scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
+ scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
@@ -125,7 +115,6 @@ def __init__(
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
-
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
@@ -142,29 +131,6 @@ def __init__(
# Save the credentials.
self._credentials = credentials
- # TODO(busunkim): This method is in the base transport
- # to avoid duplicating code across the transport classes. These functions
- # should be deleted once the minimum required versions of google-auth is increased.
-
- # TODO: Remove this function once google-auth >= 1.25.0 is required
- @classmethod
- def _get_scopes_kwargs(
- cls, host: str, scopes: Optional[Sequence[str]]
- ) -> Dict[str, Optional[Sequence[str]]]:
- """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
-
- scopes_kwargs = {}
-
- if _GOOGLE_AUTH_VERSION and (
- packaging.version.parse(_GOOGLE_AUTH_VERSION)
- >= packaging.version.parse("1.25.0")
- ):
- scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
- else:
- scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
-
- return scopes_kwargs
-
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
@@ -360,8 +326,17 @@ def _prep_wrapped_messages(self, client_info):
),
}
+ def close(self):
+ """Closes resources associated with the transport.
+
+ .. warning::
+ Only call this method if the transport is NOT shared
+ with other clients - this may cause errors in other clients!
+ """
+ raise NotImplementedError()
+
@property
- def operations_client(self) -> operations_v1.OperationsClient:
+ def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py
index 37ecdb039..906d6b13d 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py
@@ -16,9 +16,9 @@
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
-from google.api_core import grpc_helpers # type: ignore
-from google.api_core import operations_v1 # type: ignore
-from google.api_core import gapic_v1 # type: ignore
+from google.api_core import grpc_helpers
+from google.api_core import operations_v1
+from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
@@ -121,7 +121,7 @@ def __init__(
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
- self._operations_client = None
+ self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
@@ -170,8 +170,11 @@ def __init__(
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
+ # use the credentials which are saved
credentials=self._credentials,
- credentials_file=credentials_file,
+ # Set ``credentials_file`` to ``None`` here as
+ # the credentials that we saved earlier should be used.
+ credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
@@ -244,7 +247,7 @@ def operations_client(self) -> operations_v1.OperationsClient:
This property caches on the instance; repeated calls return the same
client.
"""
- # Sanity check: Only create a new client if we do not already have one.
+ # Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
@@ -920,5 +923,8 @@ def test_iam_permissions(
)
return self._stubs["test_iam_permissions"]
+ def close(self):
+ self.grpc_channel.close()
+
__all__ = ("BigtableTableAdminGrpcTransport",)
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py
index e797ff875..790568cee 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py
@@ -16,12 +16,11 @@
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import grpc_helpers_async # type: ignore
-from google.api_core import operations_v1 # type: ignore
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers_async
+from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
-import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
@@ -168,7 +167,7 @@ def __init__(
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
- self._operations_client = None
+ self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
@@ -216,8 +215,11 @@ def __init__(
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
+ # use the credentials which are saved
credentials=self._credentials,
- credentials_file=credentials_file,
+ # Set ``credentials_file`` to ``None`` here as
+ # the credentials that we saved earlier should be used.
+ credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
@@ -247,7 +249,7 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient:
This property caches on the instance; repeated calls return the same
client.
"""
- # Sanity check: Only create a new client if we do not already have one.
+ # Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
@@ -942,5 +944,8 @@ def test_iam_permissions(
)
return self._stubs["test_iam_permissions"]
+ def close(self):
+ return self.grpc_channel.close()
+
__all__ = ("BigtableTableAdminGrpcAsyncIOTransport",)
diff --git a/google/cloud/bigtable_admin_v2/types/__init__.py b/google/cloud/bigtable_admin_v2/types/__init__.py
index aeeed3466..d1e4c8f1c 100644
--- a/google/cloud/bigtable_admin_v2/types/__init__.py
+++ b/google/cloud/bigtable_admin_v2/types/__init__.py
@@ -31,6 +31,8 @@
ListClustersResponse,
ListInstancesRequest,
ListInstancesResponse,
+ PartialUpdateClusterMetadata,
+ PartialUpdateClusterRequest,
PartialUpdateInstanceRequest,
UpdateAppProfileMetadata,
UpdateAppProfileRequest,
@@ -74,6 +76,8 @@
)
from .instance import (
AppProfile,
+ AutoscalingLimits,
+ AutoscalingTargets,
Cluster,
Instance,
)
@@ -107,6 +111,8 @@
"ListClustersResponse",
"ListInstancesRequest",
"ListInstancesResponse",
+ "PartialUpdateClusterMetadata",
+ "PartialUpdateClusterRequest",
"PartialUpdateInstanceRequest",
"UpdateAppProfileMetadata",
"UpdateAppProfileRequest",
@@ -144,6 +150,8 @@
"OperationProgress",
"StorageType",
"AppProfile",
+ "AutoscalingLimits",
+ "AutoscalingTargets",
"Cluster",
"Instance",
"Backup",
diff --git a/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py
index 69b251f65..842b0e5fe 100644
--- a/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py
+++ b/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py
@@ -38,6 +38,8 @@
"UpdateInstanceMetadata",
"CreateClusterMetadata",
"UpdateClusterMetadata",
+ "PartialUpdateClusterMetadata",
+ "PartialUpdateClusterRequest",
"CreateAppProfileRequest",
"GetAppProfileRequest",
"ListAppProfilesRequest",
@@ -51,6 +53,7 @@
class CreateInstanceRequest(proto.Message):
r"""Request message for BigtableInstanceAdmin.CreateInstance.
+
Attributes:
parent (str):
Required. The unique name of the project in which to create
@@ -82,6 +85,7 @@ class CreateInstanceRequest(proto.Message):
class GetInstanceRequest(proto.Message):
r"""Request message for BigtableInstanceAdmin.GetInstance.
+
Attributes:
name (str):
Required. The unique name of the requested instance. Values
@@ -93,6 +97,7 @@ class GetInstanceRequest(proto.Message):
class ListInstancesRequest(proto.Message):
r"""Request message for BigtableInstanceAdmin.ListInstances.
+
Attributes:
parent (str):
Required. The unique name of the project for which a list of
@@ -108,6 +113,7 @@ class ListInstancesRequest(proto.Message):
class ListInstancesResponse(proto.Message):
r"""Response message for BigtableInstanceAdmin.ListInstances.
+
Attributes:
instances (Sequence[google.cloud.bigtable_admin_v2.types.Instance]):
The list of requested instances.
@@ -155,6 +161,7 @@ class PartialUpdateInstanceRequest(proto.Message):
class DeleteInstanceRequest(proto.Message):
r"""Request message for BigtableInstanceAdmin.DeleteInstance.
+
Attributes:
name (str):
Required. The unique name of the instance to be deleted.
@@ -167,6 +174,7 @@ class DeleteInstanceRequest(proto.Message):
class CreateClusterRequest(proto.Message):
r"""Request message for BigtableInstanceAdmin.CreateCluster.
+
Attributes:
parent (str):
Required. The unique name of the instance in which to create
@@ -189,6 +197,7 @@ class CreateClusterRequest(proto.Message):
class GetClusterRequest(proto.Message):
r"""Request message for BigtableInstanceAdmin.GetCluster.
+
Attributes:
name (str):
Required. The unique name of the requested cluster. Values
@@ -201,6 +210,7 @@ class GetClusterRequest(proto.Message):
class ListClustersRequest(proto.Message):
r"""Request message for BigtableInstanceAdmin.ListClusters.
+
Attributes:
parent (str):
Required. The unique name of the instance for which a list
@@ -218,6 +228,7 @@ class ListClustersRequest(proto.Message):
class ListClustersResponse(proto.Message):
r"""Response message for BigtableInstanceAdmin.ListClusters.
+
Attributes:
clusters (Sequence[google.cloud.bigtable_admin_v2.types.Cluster]):
The list of requested clusters.
@@ -245,6 +256,7 @@ def raw_page(self):
class DeleteClusterRequest(proto.Message):
r"""Request message for BigtableInstanceAdmin.DeleteCluster.
+
Attributes:
name (str):
Required. The unique name of the cluster to be deleted.
@@ -257,6 +269,7 @@ class DeleteClusterRequest(proto.Message):
class CreateInstanceMetadata(proto.Message):
r"""The metadata for the Operation returned by CreateInstance.
+
Attributes:
original_request (google.cloud.bigtable_admin_v2.types.CreateInstanceRequest):
The request that prompted the initiation of
@@ -280,6 +293,7 @@ class CreateInstanceMetadata(proto.Message):
class UpdateInstanceMetadata(proto.Message):
r"""The metadata for the Operation returned by UpdateInstance.
+
Attributes:
original_request (google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest):
The request that prompted the initiation of
@@ -303,6 +317,7 @@ class UpdateInstanceMetadata(proto.Message):
class CreateClusterMetadata(proto.Message):
r"""The metadata for the Operation returned by CreateCluster.
+
Attributes:
original_request (google.cloud.bigtable_admin_v2.types.CreateClusterRequest):
The request that prompted the initiation of
@@ -326,6 +341,7 @@ class CreateClusterMetadata(proto.Message):
class UpdateClusterMetadata(proto.Message):
r"""The metadata for the Operation returned by UpdateCluster.
+
Attributes:
original_request (google.cloud.bigtable_admin_v2.types.Cluster):
The request that prompted the initiation of
@@ -347,8 +363,53 @@ class UpdateClusterMetadata(proto.Message):
finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
+class PartialUpdateClusterMetadata(proto.Message):
+ r"""The metadata for the Operation returned by
+ PartialUpdateCluster.
+
+ Attributes:
+ request_time (google.protobuf.timestamp_pb2.Timestamp):
+ The time at which the original request was
+ received.
+ finish_time (google.protobuf.timestamp_pb2.Timestamp):
+ The time at which the operation failed or was
+ completed successfully.
+ original_request (google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest):
+ The original request for
+ PartialUpdateCluster.
+ """
+
+ request_time = proto.Field(
+ proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,
+ )
+ finish_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
+ original_request = proto.Field(
+ proto.MESSAGE, number=3, message="PartialUpdateClusterRequest",
+ )
+
+
+class PartialUpdateClusterRequest(proto.Message):
+ r"""Request message for
+ BigtableInstanceAdmin.PartialUpdateCluster.
+
+ Attributes:
+ cluster (google.cloud.bigtable_admin_v2.types.Cluster):
+ Required. The Cluster which contains the partial updates to
+ be applied, subject to the update_mask.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Required. The subset of Cluster fields which
+ should be replaced.
+ """
+
+ cluster = proto.Field(proto.MESSAGE, number=1, message=gba_instance.Cluster,)
+ update_mask = proto.Field(
+ proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
+ )
+
+
class CreateAppProfileRequest(proto.Message):
r"""Request message for BigtableInstanceAdmin.CreateAppProfile.
+
Attributes:
parent (str):
Required. The unique name of the instance in which to create
@@ -375,6 +436,7 @@ class CreateAppProfileRequest(proto.Message):
class GetAppProfileRequest(proto.Message):
r"""Request message for BigtableInstanceAdmin.GetAppProfile.
+
Attributes:
name (str):
Required. The unique name of the requested app profile.
@@ -387,6 +449,7 @@ class GetAppProfileRequest(proto.Message):
class ListAppProfilesRequest(proto.Message):
r"""Request message for BigtableInstanceAdmin.ListAppProfiles.
+
Attributes:
parent (str):
Required. The unique name of the instance for which a list
@@ -418,6 +481,7 @@ class ListAppProfilesRequest(proto.Message):
class ListAppProfilesResponse(proto.Message):
r"""Response message for BigtableInstanceAdmin.ListAppProfiles.
+
Attributes:
app_profiles (Sequence[google.cloud.bigtable_admin_v2.types.AppProfile]):
The list of requested app profiles.
@@ -446,6 +510,7 @@ def raw_page(self):
class UpdateAppProfileRequest(proto.Message):
r"""Request message for BigtableInstanceAdmin.UpdateAppProfile.
+
Attributes:
app_profile (google.cloud.bigtable_admin_v2.types.AppProfile):
Required. The app profile which will
@@ -468,6 +533,7 @@ class UpdateAppProfileRequest(proto.Message):
class DeleteAppProfileRequest(proto.Message):
r"""Request message for BigtableInstanceAdmin.DeleteAppProfile.
+
Attributes:
name (str):
Required. The unique name of the app profile to be deleted.
@@ -483,7 +549,8 @@ class DeleteAppProfileRequest(proto.Message):
class UpdateAppProfileMetadata(proto.Message):
- r"""The metadata for the Operation returned by UpdateAppProfile. """
+ r"""The metadata for the Operation returned by UpdateAppProfile.
+ """
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py
index 1d93991ad..b8ff4e60e 100644
--- a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py
+++ b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py
@@ -62,6 +62,9 @@ class RestoreTableRequest(proto.Message):
r"""The request for
[RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable].
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
Attributes:
parent (str):
Required. The name of the instance in which to create the
@@ -77,6 +80,8 @@ class RestoreTableRequest(proto.Message):
Name of the backup from which to restore. Values are of the
form
``projects//instances//clusters//backups/``.
+
+ This field is a member of `oneof`_ ``source``.
"""
parent = proto.Field(proto.STRING, number=1,)
@@ -88,6 +93,9 @@ class RestoreTableMetadata(proto.Message):
r"""Metadata type for the long-running operation returned by
[RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable].
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
Attributes:
name (str):
Name of the table being created and restored
@@ -96,6 +104,7 @@ class RestoreTableMetadata(proto.Message):
The type of the restore source.
backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo):
+ This field is a member of `oneof`_ ``source_info``.
optimize_table_operation_name (str):
If exists, the name of the long-running operation that will
be used to track the post-restore optimization process to
@@ -183,6 +192,7 @@ class CreateTableRequest(proto.Message):
class Split(proto.Message):
r"""An initial split point for a newly created table.
+
Attributes:
key (bytes):
Row key to use as an initial tablet boundary.
@@ -231,6 +241,13 @@ class DropRowRangeRequest(proto.Message):
r"""Request message for
[google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange]
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
Attributes:
name (str):
Required. The unique name of the table on which to drop a
@@ -239,9 +256,13 @@ class DropRowRangeRequest(proto.Message):
row_key_prefix (bytes):
Delete all rows that start with this row key
prefix. Prefix cannot be zero length.
+
+ This field is a member of `oneof`_ ``target``.
delete_all_data_from_table (bool):
Delete all rows in the table. Setting this to
false is a no-op.
+
+ This field is a member of `oneof`_ ``target``.
"""
name = proto.Field(proto.STRING, number=1,)
@@ -357,6 +378,14 @@ class ModifyColumnFamiliesRequest(proto.Message):
class Modification(proto.Message):
r"""A create, update, or delete of a particular column family.
+
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
Attributes:
id (str):
The ID of the column family to be modified.
@@ -364,13 +393,19 @@ class Modification(proto.Message):
Create a new column family with the specified
schema, or fail if one already exists with the
given ID.
+
+ This field is a member of `oneof`_ ``mod``.
update (google.cloud.bigtable_admin_v2.types.ColumnFamily):
Update an existing column family to the
specified schema, or fail if no column family
exists with the given ID.
+
+ This field is a member of `oneof`_ ``mod``.
drop (bool):
Drop (delete) the column family with the
given ID, or fail if no such family exists.
+
+ This field is a member of `oneof`_ ``mod``.
"""
id = proto.Field(proto.STRING, number=1,)
@@ -591,9 +626,9 @@ class SnapshotTableMetadata(proto.Message):
r"""The metadata for the Operation returned by SnapshotTable.
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to most Cloud
- Bigtable customers. This feature might be changed in backward-
- incompatible ways and is not recommended for production use. It
- is not subject to any SLA or deprecation policy.
+ Bigtable customers. This feature might be changed in
+ backward-incompatible ways and is not recommended for production
+ use. It is not subject to any SLA or deprecation policy.
Attributes:
original_request (google.cloud.bigtable_admin_v2.types.SnapshotTableRequest):
@@ -621,9 +656,9 @@ class CreateTableFromSnapshotMetadata(proto.Message):
CreateTableFromSnapshot.
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to most Cloud
- Bigtable customers. This feature might be changed in backward-
- incompatible ways and is not recommended for production use. It
- is not subject to any SLA or deprecation policy.
+ Bigtable customers. This feature might be changed in
+ backward-incompatible ways and is not recommended for production
+ use. It is not subject to any SLA or deprecation policy.
Attributes:
original_request (google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest):
diff --git a/google/cloud/bigtable_admin_v2/types/instance.py b/google/cloud/bigtable_admin_v2/types/instance.py
index f1ba750e1..206cb40c4 100644
--- a/google/cloud/bigtable_admin_v2/types/instance.py
+++ b/google/cloud/bigtable_admin_v2/types/instance.py
@@ -16,10 +16,18 @@
import proto # type: ignore
from google.cloud.bigtable_admin_v2.types import common
+from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
- package="google.bigtable.admin.v2", manifest={"Instance", "Cluster", "AppProfile",},
+ package="google.bigtable.admin.v2",
+ manifest={
+ "Instance",
+ "AutoscalingTargets",
+ "AutoscalingLimits",
+ "Cluster",
+ "AppProfile",
+ },
)
@@ -57,6 +65,11 @@ class Instance(proto.Message):
- No more than 64 labels can be associated with a given
resource.
- Keys and values must both be under 128 bytes.
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. A server-assigned timestamp representing when
+ this Instance was created. For instances created before this
+ field was added (August 2021), this value is
+ ``seconds: 0, nanos: 1``.
"""
class State(proto.Enum):
@@ -76,6 +89,39 @@ class Type(proto.Enum):
state = proto.Field(proto.ENUM, number=3, enum=State,)
type_ = proto.Field(proto.ENUM, number=4, enum=Type,)
labels = proto.MapField(proto.STRING, proto.STRING, number=5,)
+ create_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,)
+
+
+class AutoscalingTargets(proto.Message):
+ r"""The Autoscaling targets for a Cluster. These determine the
+ recommended nodes.
+
+ Attributes:
+ cpu_utilization_percent (int):
+ The cpu utilization that the Autoscaler
+ should be trying to achieve. This number is on a
+ scale from 0 (no utilization) to 100 (total
+ utilization).
+ """
+
+ cpu_utilization_percent = proto.Field(proto.INT32, number=2,)
+
+
+class AutoscalingLimits(proto.Message):
+ r"""Limits for the number of nodes a Cluster can autoscale
+ up/down to.
+
+ Attributes:
+ min_serve_nodes (int):
+ Required. Minimum number of nodes to scale
+ down to.
+ max_serve_nodes (int):
+ Required. Maximum number of nodes to scale up
+ to.
+ """
+
+ min_serve_nodes = proto.Field(proto.INT32, number=1,)
+ max_serve_nodes = proto.Field(proto.INT32, number=2,)
class Cluster(proto.Message):
@@ -83,6 +129,9 @@ class Cluster(proto.Message):
of serving all [Tables][google.bigtable.admin.v2.Table] in the
parent [Instance][google.bigtable.admin.v2.Instance].
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
Attributes:
name (str):
The unique name of the cluster. Values are of the form
@@ -96,9 +145,13 @@ class Cluster(proto.Message):
state (google.cloud.bigtable_admin_v2.types.Cluster.State):
The current state of the cluster.
serve_nodes (int):
- Required. The number of nodes allocated to
- this cluster. More nodes enable higher
- throughput and more consistent performance.
+ The number of nodes allocated to this
+ cluster. More nodes enable higher throughput and
+ more consistent performance.
+ cluster_config (google.cloud.bigtable_admin_v2.types.Cluster.ClusterConfig):
+ Configuration for this cluster.
+
+ This field is a member of `oneof`_ ``config``.
default_storage_type (google.cloud.bigtable_admin_v2.types.StorageType):
(``CreationOnly``) The type of storage used by this cluster
to serve its parent instance's tables, unless explicitly
@@ -116,9 +169,40 @@ class State(proto.Enum):
RESIZING = 3
DISABLED = 4
+ class ClusterAutoscalingConfig(proto.Message):
+ r"""Autoscaling config for a cluster.
+
+ Attributes:
+ autoscaling_limits (google.cloud.bigtable_admin_v2.types.AutoscalingLimits):
+ Required. Autoscaling limits for this
+ cluster.
+ autoscaling_targets (google.cloud.bigtable_admin_v2.types.AutoscalingTargets):
+ Required. Autoscaling targets for this
+ cluster.
+ """
+
+ autoscaling_limits = proto.Field(
+ proto.MESSAGE, number=1, message="AutoscalingLimits",
+ )
+ autoscaling_targets = proto.Field(
+ proto.MESSAGE, number=2, message="AutoscalingTargets",
+ )
+
+ class ClusterConfig(proto.Message):
+ r"""Configuration for a cluster.
+
+ Attributes:
+ cluster_autoscaling_config (google.cloud.bigtable_admin_v2.types.Cluster.ClusterAutoscalingConfig):
+ Autoscaling configuration for this cluster.
+ """
+
+ cluster_autoscaling_config = proto.Field(
+ proto.MESSAGE, number=1, message="Cluster.ClusterAutoscalingConfig",
+ )
+
class EncryptionConfig(proto.Message):
- r"""Cloud Key Management Service (Cloud KMS) settings for a CMEK-
- rotected cluster.
+ r"""Cloud Key Management Service (Cloud KMS) settings for a
+ CMEK-protected cluster.
Attributes:
kms_key_name (str):
@@ -142,6 +226,9 @@ class EncryptionConfig(proto.Message):
location = proto.Field(proto.STRING, number=2,)
state = proto.Field(proto.ENUM, number=3, enum=State,)
serve_nodes = proto.Field(proto.INT32, number=4,)
+ cluster_config = proto.Field(
+ proto.MESSAGE, number=7, oneof="config", message=ClusterConfig,
+ )
default_storage_type = proto.Field(proto.ENUM, number=5, enum=common.StorageType,)
encryption_config = proto.Field(proto.MESSAGE, number=6, message=EncryptionConfig,)
@@ -150,6 +237,13 @@ class AppProfile(proto.Message):
r"""A configuration object describing how Cloud Bigtable should
treat traffic from a particular end user application.
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
Attributes:
name (str):
(``OutputOnly``) The unique name of the app profile. Values
@@ -171,8 +265,12 @@ class AppProfile(proto.Message):
case for this AppProfile.
multi_cluster_routing_use_any (google.cloud.bigtable_admin_v2.types.AppProfile.MultiClusterRoutingUseAny):
Use a multi-cluster routing policy.
+
+ This field is a member of `oneof`_ ``routing_policy``.
single_cluster_routing (google.cloud.bigtable_admin_v2.types.AppProfile.SingleClusterRouting):
Use a single-cluster routing policy.
+
+ This field is a member of `oneof`_ ``routing_policy``.
"""
class MultiClusterRoutingUseAny(proto.Message):
diff --git a/google/cloud/bigtable_admin_v2/types/table.py b/google/cloud/bigtable_admin_v2/types/table.py
index 75ceaf263..7ced1216c 100644
--- a/google/cloud/bigtable_admin_v2/types/table.py
+++ b/google/cloud/bigtable_admin_v2/types/table.py
@@ -44,12 +44,17 @@ class RestoreSourceType(proto.Enum):
class RestoreInfo(proto.Message):
r"""Information about a table restore.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
Attributes:
source_type (google.cloud.bigtable_admin_v2.types.RestoreSourceType):
The type of the restore source.
backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo):
Information about the backup used to restore
the table. The backup may no longer exist.
+
+ This field is a member of `oneof`_ ``source_info``.
"""
source_type = proto.Field(proto.ENUM, number=1, enum="RestoreSourceType",)
@@ -111,6 +116,7 @@ class View(proto.Enum):
class ClusterState(proto.Message):
r"""The state of a table's data in a particular cluster.
+
Attributes:
replication_state (google.cloud.bigtable_admin_v2.types.Table.ClusterState.ReplicationState):
Output only. The state of replication for the
@@ -174,25 +180,41 @@ class GcRule(proto.Message):
r"""Rule for determining which cells to delete during garbage
collection.
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
Attributes:
max_num_versions (int):
Delete all cells in a column except the most
recent N.
+
+ This field is a member of `oneof`_ ``rule``.
max_age (google.protobuf.duration_pb2.Duration):
Delete cells in a column older than the given
age. Values must be at least one millisecond,
and will be truncated to microsecond
granularity.
+
+ This field is a member of `oneof`_ ``rule``.
intersection (google.cloud.bigtable_admin_v2.types.GcRule.Intersection):
Delete cells that would be deleted by every
nested rule.
+
+ This field is a member of `oneof`_ ``rule``.
union (google.cloud.bigtable_admin_v2.types.GcRule.Union):
Delete cells that would be deleted by any
nested rule.
+
+ This field is a member of `oneof`_ ``rule``.
"""
class Intersection(proto.Message):
r"""A GcRule which deletes cells matching all of the given rules.
+
Attributes:
rules (Sequence[google.cloud.bigtable_admin_v2.types.GcRule]):
Only delete cells which would be deleted by every element of
@@ -203,6 +225,7 @@ class Intersection(proto.Message):
class Union(proto.Message):
r"""A GcRule which deletes cells matching any of the given rules.
+
Attributes:
rules (Sequence[google.cloud.bigtable_admin_v2.types.GcRule]):
Delete cells which would be deleted by any element of
@@ -259,9 +282,9 @@ class Snapshot(proto.Message):
new table.
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to most Cloud
- Bigtable customers. This feature might be changed in backward-
- incompatible ways and is not recommended for production use. It
- is not subject to any SLA or deprecation policy.
+ Bigtable customers. This feature might be changed in
+ backward-incompatible ways and is not recommended for production
+ use. It is not subject to any SLA or deprecation policy.
Attributes:
name (str):
@@ -310,6 +333,7 @@ class State(proto.Enum):
class Backup(proto.Message):
r"""A backup of a Cloud Bigtable table.
+
Attributes:
name (str):
Output only. A globally unique identifier for the backup
@@ -369,6 +393,7 @@ class State(proto.Enum):
class BackupInfo(proto.Message):
r"""Information about a backup.
+
Attributes:
backup (str):
Output only. Name of the backup.
diff --git a/google/cloud/bigtable_v2/services/bigtable/async_client.py b/google/cloud/bigtable_v2/services/bigtable/async_client.py
index 9aa15e391..9db7ac1cb 100644
--- a/google/cloud/bigtable_v2/services/bigtable/async_client.py
+++ b/google/cloud/bigtable_v2/services/bigtable/async_client.py
@@ -16,16 +16,30 @@
from collections import OrderedDict
import functools
import re
-from typing import Dict, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union
+from typing import (
+ Dict,
+ Optional,
+ AsyncIterable,
+ Awaitable,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+)
import pkg_resources
-import google.api_core.client_options as ClientOptions # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
+from google.api_core.client_options import ClientOptions
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object] # type: ignore
+
from google.cloud.bigtable_v2.types import bigtable
from google.cloud.bigtable_v2.types import data
from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO
@@ -95,6 +109,42 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
from_service_account_json = from_service_account_file
+ @classmethod
+ def get_mtls_endpoint_and_cert_source(
+ cls, client_options: Optional[ClientOptions] = None
+ ):
+ """Return the API endpoint and client cert source for mutual TLS.
+
+ The client cert source is determined in the following order:
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
+ client cert source is None.
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
+ default client cert source exists, use the default one; otherwise the client cert
+ source is None.
+
+ The API endpoint is determined in the following order:
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
+ default mTLS endpoint; if the environment variabel is "never", use the default API
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
+ use the default API endpoint.
+
+ More details can be found at https://google.aip.dev/auth/4114.
+
+ Args:
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
+ in this method.
+
+ Returns:
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
+ client cert source to use.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
+ """
+ return BigtableClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
+
@property
def transport(self) -> BigtableTransport:
"""Returns the transport used by the client instance.
@@ -157,11 +207,11 @@ def __init__(
def read_rows(
self,
- request: bigtable.ReadRowsRequest = None,
+ request: Union[bigtable.ReadRowsRequest, dict] = None,
*,
table_name: str = None,
app_profile_id: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Awaitable[AsyncIterable[bigtable.ReadRowsResponse]]:
@@ -173,7 +223,7 @@ def read_rows(
ReadRowsResponse documentation for details.
Args:
- request (:class:`google.cloud.bigtable_v2.types.ReadRowsRequest`):
+ request (Union[google.cloud.bigtable_v2.types.ReadRowsRequest, dict]):
The request object. Request message for
Bigtable.ReadRows.
table_name (:class:`str`):
@@ -206,7 +256,7 @@ def read_rows(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([table_name, app_profile_id])
if request is not None and has_flattened_params:
@@ -255,11 +305,11 @@ def read_rows(
def sample_row_keys(
self,
- request: bigtable.SampleRowKeysRequest = None,
+ request: Union[bigtable.SampleRowKeysRequest, dict] = None,
*,
table_name: str = None,
app_profile_id: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Awaitable[AsyncIterable[bigtable.SampleRowKeysResponse]]:
@@ -270,7 +320,7 @@ def sample_row_keys(
mapreduces.
Args:
- request (:class:`google.cloud.bigtable_v2.types.SampleRowKeysRequest`):
+ request (Union[google.cloud.bigtable_v2.types.SampleRowKeysRequest, dict]):
The request object. Request message for
Bigtable.SampleRowKeys.
table_name (:class:`str`):
@@ -303,7 +353,7 @@ def sample_row_keys(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([table_name, app_profile_id])
if request is not None and has_flattened_params:
@@ -352,13 +402,13 @@ def sample_row_keys(
async def mutate_row(
self,
- request: bigtable.MutateRowRequest = None,
+ request: Union[bigtable.MutateRowRequest, dict] = None,
*,
table_name: str = None,
row_key: bytes = None,
mutations: Sequence[data.Mutation] = None,
app_profile_id: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> bigtable.MutateRowResponse:
@@ -366,7 +416,7 @@ async def mutate_row(
left unchanged unless explicitly changed by ``mutation``.
Args:
- request (:class:`google.cloud.bigtable_v2.types.MutateRowRequest`):
+ request (Union[google.cloud.bigtable_v2.types.MutateRowRequest, dict]):
The request object. Request message for
Bigtable.MutateRow.
table_name (:class:`str`):
@@ -417,7 +467,7 @@ async def mutate_row(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([table_name, row_key, mutations, app_profile_id])
if request is not None and has_flattened_params:
@@ -473,12 +523,12 @@ async def mutate_row(
def mutate_rows(
self,
- request: bigtable.MutateRowsRequest = None,
+ request: Union[bigtable.MutateRowsRequest, dict] = None,
*,
table_name: str = None,
entries: Sequence[bigtable.MutateRowsRequest.Entry] = None,
app_profile_id: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Awaitable[AsyncIterable[bigtable.MutateRowsResponse]]:
@@ -487,7 +537,7 @@ def mutate_rows(
batch is not executed atomically.
Args:
- request (:class:`google.cloud.bigtable_v2.types.MutateRowsRequest`):
+ request (Union[google.cloud.bigtable_v2.types.MutateRowsRequest, dict]):
The request object. Request message for
BigtableService.MutateRows.
table_name (:class:`str`):
@@ -534,7 +584,7 @@ def mutate_rows(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([table_name, entries, app_profile_id])
if request is not None and has_flattened_params:
@@ -585,7 +635,7 @@ def mutate_rows(
async def check_and_mutate_row(
self,
- request: bigtable.CheckAndMutateRowRequest = None,
+ request: Union[bigtable.CheckAndMutateRowRequest, dict] = None,
*,
table_name: str = None,
row_key: bytes = None,
@@ -593,7 +643,7 @@ async def check_and_mutate_row(
true_mutations: Sequence[data.Mutation] = None,
false_mutations: Sequence[data.Mutation] = None,
app_profile_id: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> bigtable.CheckAndMutateRowResponse:
@@ -601,7 +651,7 @@ async def check_and_mutate_row(
predicate Reader filter.
Args:
- request (:class:`google.cloud.bigtable_v2.types.CheckAndMutateRowRequest`):
+ request (Union[google.cloud.bigtable_v2.types.CheckAndMutateRowRequest, dict]):
The request object. Request message for
Bigtable.CheckAndMutateRow.
table_name (:class:`str`):
@@ -675,7 +725,7 @@ async def check_and_mutate_row(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any(
[
@@ -741,26 +791,26 @@ async def check_and_mutate_row(
async def read_modify_write_row(
self,
- request: bigtable.ReadModifyWriteRowRequest = None,
+ request: Union[bigtable.ReadModifyWriteRowRequest, dict] = None,
*,
table_name: str = None,
row_key: bytes = None,
rules: Sequence[data.ReadModifyWriteRule] = None,
app_profile_id: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> bigtable.ReadModifyWriteRowResponse:
r"""Modifies a row atomically on the server. The method
reads the latest existing timestamp and value from the
- specified columns and writes a new entry based on pre-
- defined read/modify/write rules. The new value for the
- timestamp is the greater of the existing timestamp or
- the current server time. The method returns the new
+ specified columns and writes a new entry based on
+ pre-defined read/modify/write rules. The new value for
+ the timestamp is the greater of the existing timestamp
+ or the current server time. The method returns the new
contents of all modified cells.
Args:
- request (:class:`google.cloud.bigtable_v2.types.ReadModifyWriteRowRequest`):
+ request (Union[google.cloud.bigtable_v2.types.ReadModifyWriteRowRequest, dict]):
The request object. Request message for
Bigtable.ReadModifyWriteRow.
table_name (:class:`str`):
@@ -813,7 +863,7 @@ async def read_modify_write_row(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([table_name, row_key, rules, app_profile_id])
if request is not None and has_flattened_params:
@@ -864,6 +914,12 @@ async def read_modify_write_row(
# Done; return the response.
return response
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, exc_type, exc, tb):
+ await self.transport.close()
+
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
diff --git a/google/cloud/bigtable_v2/services/bigtable/client.py b/google/cloud/bigtable_v2/services/bigtable/client.py
index 32dd6739c..90a753606 100644
--- a/google/cloud/bigtable_v2/services/bigtable/client.py
+++ b/google/cloud/bigtable_v2/services/bigtable/client.py
@@ -14,22 +14,26 @@
# limitations under the License.
#
from collections import OrderedDict
-from distutils import util
import os
import re
from typing import Dict, Optional, Iterable, Sequence, Tuple, Type, Union
import pkg_resources
-from google.api_core import client_options as client_options_lib # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
+from google.api_core import client_options as client_options_lib
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
+try:
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
+except AttributeError: # pragma: NO COVER
+ OptionalRetry = Union[retries.Retry, object] # type: ignore
+
from google.cloud.bigtable_v2.types import bigtable
from google.cloud.bigtable_v2.types import data
from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO
@@ -230,6 +234,73 @@ def parse_common_location_path(path: str) -> Dict[str, str]:
m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
return m.groupdict() if m else {}
+ @classmethod
+ def get_mtls_endpoint_and_cert_source(
+ cls, client_options: Optional[client_options_lib.ClientOptions] = None
+ ):
+ """Return the API endpoint and client cert source for mutual TLS.
+
+ The client cert source is determined in the following order:
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
+ client cert source is None.
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
+ default client cert source exists, use the default one; otherwise the client cert
+ source is None.
+
+ The API endpoint is determined in the following order:
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
+ default mTLS endpoint; if the environment variabel is "never", use the default API
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
+ use the default API endpoint.
+
+ More details can be found at https://google.aip.dev/auth/4114.
+
+ Args:
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
+ in this method.
+
+ Returns:
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
+ client cert source to use.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
+ """
+ if client_options is None:
+ client_options = client_options_lib.ClientOptions()
+ use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
+ if use_client_cert not in ("true", "false"):
+ raise ValueError(
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
+ )
+ if use_mtls_endpoint not in ("auto", "never", "always"):
+ raise MutualTLSChannelError(
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
+ )
+
+ # Figure out the client cert source to use.
+ client_cert_source = None
+ if use_client_cert == "true":
+ if client_options.client_cert_source:
+ client_cert_source = client_options.client_cert_source
+ elif mtls.has_default_client_cert_source():
+ client_cert_source = mtls.default_client_cert_source()
+
+ # Figure out which api endpoint to use.
+ if client_options.api_endpoint is not None:
+ api_endpoint = client_options.api_endpoint
+ elif use_mtls_endpoint == "always" or (
+ use_mtls_endpoint == "auto" and client_cert_source
+ ):
+ api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
+ else:
+ api_endpoint = cls.DEFAULT_ENDPOINT
+
+ return api_endpoint, client_cert_source
+
def __init__(
self,
*,
@@ -280,50 +351,22 @@ def __init__(
if client_options is None:
client_options = client_options_lib.ClientOptions()
- # Create SSL credentials for mutual TLS if needed.
- use_client_cert = bool(
- util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
+ api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
+ client_options
)
- client_cert_source_func = None
- is_mtls = False
- if use_client_cert:
- if client_options.client_cert_source:
- is_mtls = True
- client_cert_source_func = client_options.client_cert_source
- else:
- is_mtls = mtls.has_default_client_cert_source()
- if is_mtls:
- client_cert_source_func = mtls.default_client_cert_source()
- else:
- client_cert_source_func = None
-
- # Figure out which api endpoint to use.
- if client_options.api_endpoint is not None:
- api_endpoint = client_options.api_endpoint
- else:
- use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
- if use_mtls_env == "never":
- api_endpoint = self.DEFAULT_ENDPOINT
- elif use_mtls_env == "always":
- api_endpoint = self.DEFAULT_MTLS_ENDPOINT
- elif use_mtls_env == "auto":
- if is_mtls:
- api_endpoint = self.DEFAULT_MTLS_ENDPOINT
- else:
- api_endpoint = self.DEFAULT_ENDPOINT
- else:
- raise MutualTLSChannelError(
- "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
- "values: never, auto, always"
- )
+ api_key_value = getattr(client_options, "api_key", None)
+ if api_key_value and credentials:
+ raise ValueError(
+ "client_options.api_key and credentials are mutually exclusive"
+ )
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, BigtableTransport):
# transport is a BigtableTransport instance.
- if credentials or client_options.credentials_file:
+ if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
@@ -335,6 +378,15 @@ def __init__(
)
self._transport = transport
else:
+ import google.auth._default # type: ignore
+
+ if api_key_value and hasattr(
+ google.auth._default, "get_api_key_credentials"
+ ):
+ credentials = google.auth._default.get_api_key_credentials(
+ api_key_value
+ )
+
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
@@ -344,10 +396,7 @@ def __init__(
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
- always_use_jwt_access=(
- Transport == type(self).get_transport_class("grpc")
- or Transport == type(self).get_transport_class("grpc_asyncio")
- ),
+ always_use_jwt_access=True,
)
def read_rows(
@@ -356,7 +405,7 @@ def read_rows(
*,
table_name: str = None,
app_profile_id: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Iterable[bigtable.ReadRowsResponse]:
@@ -401,7 +450,7 @@ def read_rows(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([table_name, app_profile_id])
if request is not None and has_flattened_params:
@@ -447,7 +496,7 @@ def sample_row_keys(
*,
table_name: str = None,
app_profile_id: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Iterable[bigtable.SampleRowKeysResponse]:
@@ -491,7 +540,7 @@ def sample_row_keys(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([table_name, app_profile_id])
if request is not None and has_flattened_params:
@@ -539,7 +588,7 @@ def mutate_row(
row_key: bytes = None,
mutations: Sequence[data.Mutation] = None,
app_profile_id: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> bigtable.MutateRowResponse:
@@ -598,7 +647,7 @@ def mutate_row(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([table_name, row_key, mutations, app_profile_id])
if request is not None and has_flattened_params:
@@ -649,7 +698,7 @@ def mutate_rows(
table_name: str = None,
entries: Sequence[bigtable.MutateRowsRequest.Entry] = None,
app_profile_id: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Iterable[bigtable.MutateRowsResponse]:
@@ -705,7 +754,7 @@ def mutate_rows(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([table_name, entries, app_profile_id])
if request is not None and has_flattened_params:
@@ -757,7 +806,7 @@ def check_and_mutate_row(
true_mutations: Sequence[data.Mutation] = None,
false_mutations: Sequence[data.Mutation] = None,
app_profile_id: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> bigtable.CheckAndMutateRowResponse:
@@ -839,7 +888,7 @@ def check_and_mutate_row(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any(
[
@@ -904,16 +953,16 @@ def read_modify_write_row(
row_key: bytes = None,
rules: Sequence[data.ReadModifyWriteRule] = None,
app_profile_id: str = None,
- retry: retries.Retry = gapic_v1.method.DEFAULT,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> bigtable.ReadModifyWriteRowResponse:
r"""Modifies a row atomically on the server. The method
reads the latest existing timestamp and value from the
- specified columns and writes a new entry based on pre-
- defined read/modify/write rules. The new value for the
- timestamp is the greater of the existing timestamp or
- the current server time. The method returns the new
+ specified columns and writes a new entry based on
+ pre-defined read/modify/write rules. The new value for
+ the timestamp is the greater of the existing timestamp
+ or the current server time. The method returns the new
contents of all modified cells.
Args:
@@ -970,7 +1019,7 @@ def read_modify_write_row(
"""
# Create or coerce a protobuf request object.
- # Sanity check: If we got a request object, we should *not* have
+ # Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([table_name, row_key, rules, app_profile_id])
if request is not None and has_flattened_params:
@@ -1014,6 +1063,19 @@ def read_modify_write_row(
# Done; return the response.
return response
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ """Releases underlying transport's resources.
+
+ .. warning::
+ ONLY use as a context manager if the transport is NOT shared
+ with other clients! Exiting the with block will CLOSE the transport
+ and may cause errors in other clients!
+ """
+ self.transport.close()
+
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/google/cloud/bigtable_v2/services/bigtable/transports/base.py
index a6dbca220..bb727d67e 100644
--- a/google/cloud/bigtable_v2/services/bigtable/transports/base.py
+++ b/google/cloud/bigtable_v2/services/bigtable/transports/base.py
@@ -15,14 +15,13 @@
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
-import packaging.version
import pkg_resources
import google.auth # type: ignore
-import google.api_core # type: ignore
-from google.api_core import exceptions as core_exceptions # type: ignore
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import retry as retries # type: ignore
+import google.api_core
+from google.api_core import exceptions as core_exceptions
+from google.api_core import gapic_v1
+from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
@@ -35,15 +34,6 @@
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
-try:
- # google.auth.__version__ was added in 1.26.0
- _GOOGLE_AUTH_VERSION = google.auth.__version__
-except AttributeError:
- try: # try pkg_resources if it is available
- _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
- except pkg_resources.DistributionNotFound: # pragma: NO COVER
- _GOOGLE_AUTH_VERSION = None
-
class BigtableTransport(abc.ABC):
"""Abstract transport class for Bigtable."""
@@ -100,7 +90,7 @@ def __init__(
host += ":443"
self._host = host
- scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
+ scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
@@ -116,7 +106,6 @@ def __init__(
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
-
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
@@ -133,29 +122,6 @@ def __init__(
# Save the credentials.
self._credentials = credentials
- # TODO(busunkim): This method is in the base transport
- # to avoid duplicating code across the transport classes. These functions
- # should be deleted once the minimum required versions of google-auth is increased.
-
- # TODO: Remove this function once google-auth >= 1.25.0 is required
- @classmethod
- def _get_scopes_kwargs(
- cls, host: str, scopes: Optional[Sequence[str]]
- ) -> Dict[str, Optional[Sequence[str]]]:
- """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
-
- scopes_kwargs = {}
-
- if _GOOGLE_AUTH_VERSION and (
- packaging.version.parse(_GOOGLE_AUTH_VERSION)
- >= packaging.version.parse("1.25.0")
- ):
- scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
- else:
- scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
-
- return scopes_kwargs
-
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
@@ -236,6 +202,15 @@ def _prep_wrapped_messages(self, client_info):
),
}
+ def close(self):
+ """Closes resources associated with the transport.
+
+ .. warning::
+ Only call this method if the transport is NOT shared
+ with other clients - this may cause errors in other clients!
+ """
+ raise NotImplementedError()
+
@property
def read_rows(
self,
diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py
index 2df844a9c..78b2215ff 100644
--- a/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py
+++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py
@@ -16,8 +16,8 @@
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
-from google.api_core import grpc_helpers # type: ignore
-from google.api_core import gapic_v1 # type: ignore
+from google.api_core import grpc_helpers
+from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
@@ -159,8 +159,11 @@ def __init__(
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
+ # use the credentials which are saved
credentials=self._credentials,
- credentials_file=credentials_file,
+ # Set ``credentials_file`` to ``None`` here as
+ # the credentials that we saved earlier should be used.
+ credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
@@ -381,10 +384,10 @@ def read_modify_write_row(
Modifies a row atomically on the server. The method
reads the latest existing timestamp and value from the
- specified columns and writes a new entry based on pre-
- defined read/modify/write rules. The new value for the
- timestamp is the greater of the existing timestamp or
- the current server time. The method returns the new
+ specified columns and writes a new entry based on
+ pre-defined read/modify/write rules. The new value for
+ the timestamp is the greater of the existing timestamp
+ or the current server time. The method returns the new
contents of all modified cells.
Returns:
@@ -405,5 +408,8 @@ def read_modify_write_row(
)
return self._stubs["read_modify_write_row"]
+ def close(self):
+ self.grpc_channel.close()
+
__all__ = ("BigtableGrpcTransport",)
diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py
index 56bf684bd..aa3b80f13 100644
--- a/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py
+++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py
@@ -16,11 +16,10 @@
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
-from google.api_core import gapic_v1 # type: ignore
-from google.api_core import grpc_helpers_async # type: ignore
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
-import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
@@ -205,8 +204,11 @@ def __init__(
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
+ # use the credentials which are saved
credentials=self._credentials,
- credentials_file=credentials_file,
+ # Set ``credentials_file`` to ``None`` here as
+ # the credentials that we saved earlier should be used.
+ credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
@@ -388,10 +390,10 @@ def read_modify_write_row(
Modifies a row atomically on the server. The method
reads the latest existing timestamp and value from the
- specified columns and writes a new entry based on pre-
- defined read/modify/write rules. The new value for the
- timestamp is the greater of the existing timestamp or
- the current server time. The method returns the new
+ specified columns and writes a new entry based on
+ pre-defined read/modify/write rules. The new value for
+ the timestamp is the greater of the existing timestamp
+ or the current server time. The method returns the new
contents of all modified cells.
Returns:
@@ -412,5 +414,8 @@ def read_modify_write_row(
)
return self._stubs["read_modify_write_row"]
+ def close(self):
+ return self.grpc_channel.close()
+
__all__ = ("BigtableGrpcAsyncIOTransport",)
diff --git a/google/cloud/bigtable_v2/types/bigtable.py b/google/cloud/bigtable_v2/types/bigtable.py
index 35a19e2d1..956eeca5c 100644
--- a/google/cloud/bigtable_v2/types/bigtable.py
+++ b/google/cloud/bigtable_v2/types/bigtable.py
@@ -41,6 +41,7 @@
class ReadRowsRequest(proto.Message):
r"""Request message for Bigtable.ReadRows.
+
Attributes:
table_name (str):
Required. The unique name of the table from which to read.
@@ -51,14 +52,15 @@ class ReadRowsRequest(proto.Message):
If not specified, the "default" application
profile will be used.
rows (google.cloud.bigtable_v2.types.RowSet):
- The row keys and/or ranges to read. If not
- specified, reads from all rows.
+ The row keys and/or ranges to read
+ sequentially. If not specified, reads from all
+ rows.
filter (google.cloud.bigtable_v2.types.RowFilter):
The filter to apply to the contents of the
specified row(s). If unset, reads the entirety
of each row.
rows_limit (int):
- The read will terminate after committing to N
+ The read will stop after committing to N
rows' worth of results. The default (zero) is to
return all results.
"""
@@ -72,6 +74,7 @@ class ReadRowsRequest(proto.Message):
class ReadRowsResponse(proto.Message):
r"""Response message for Bigtable.ReadRows.
+
Attributes:
chunks (Sequence[google.cloud.bigtable_v2.types.ReadRowsResponse.CellChunk]):
A collection of a row's contents as part of
@@ -93,6 +96,13 @@ class CellChunk(proto.Message):
r"""Specifies a piece of a row's contents returned as part of the
read response stream.
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
Attributes:
row_key (bytes):
The row key for this chunk of data. If the
@@ -144,9 +154,13 @@ class CellChunk(proto.Message):
reset_row (bool):
Indicates that the client should drop all previous chunks
for ``row_key``, as it will be re-read from the beginning.
+
+ This field is a member of `oneof`_ ``row_status``.
commit_row (bool):
Indicates that the client can safely process all previous
chunks for ``row_key``, as its data has been fully read.
+
+ This field is a member of `oneof`_ ``row_status``.
"""
row_key = proto.Field(proto.BYTES, number=1,)
@@ -169,6 +183,7 @@ class CellChunk(proto.Message):
class SampleRowKeysRequest(proto.Message):
r"""Request message for Bigtable.SampleRowKeys.
+
Attributes:
table_name (str):
Required. The unique name of the table from which to sample
@@ -186,6 +201,7 @@ class SampleRowKeysRequest(proto.Message):
class SampleRowKeysResponse(proto.Message):
r"""Response message for Bigtable.SampleRowKeys.
+
Attributes:
row_key (bytes):
Sorted streamed sequence of sample row keys
@@ -213,6 +229,7 @@ class SampleRowKeysResponse(proto.Message):
class MutateRowRequest(proto.Message):
r"""Request message for Bigtable.MutateRow.
+
Attributes:
table_name (str):
Required. The unique name of the table to which the mutation
@@ -240,11 +257,13 @@ class MutateRowRequest(proto.Message):
class MutateRowResponse(proto.Message):
- r"""Response message for Bigtable.MutateRow. """
+ r"""Response message for Bigtable.MutateRow.
+ """
class MutateRowsRequest(proto.Message):
r"""Request message for BigtableService.MutateRows.
+
Attributes:
table_name (str):
Required. The unique name of the table to
@@ -265,6 +284,7 @@ class MutateRowsRequest(proto.Message):
class Entry(proto.Message):
r"""A mutation for a given row.
+
Attributes:
row_key (bytes):
The key of the row to which the ``mutations`` should be
@@ -287,6 +307,7 @@ class Entry(proto.Message):
class MutateRowsResponse(proto.Message):
r"""Response message for BigtableService.MutateRows.
+
Attributes:
entries (Sequence[google.cloud.bigtable_v2.types.MutateRowsResponse.Entry]):
One or more results for Entries from the
@@ -317,6 +338,7 @@ class Entry(proto.Message):
class CheckAndMutateRowRequest(proto.Message):
r"""Request message for Bigtable.CheckAndMutateRow.
+
Attributes:
table_name (str):
Required. The unique name of the table to which the
@@ -366,6 +388,7 @@ class CheckAndMutateRowRequest(proto.Message):
class CheckAndMutateRowResponse(proto.Message):
r"""Response message for Bigtable.CheckAndMutateRow.
+
Attributes:
predicate_matched (bool):
Whether or not the request's ``predicate_filter`` yielded
@@ -377,6 +400,7 @@ class CheckAndMutateRowResponse(proto.Message):
class ReadModifyWriteRowRequest(proto.Message):
r"""Request message for Bigtable.ReadModifyWriteRow.
+
Attributes:
table_name (str):
Required. The unique name of the table to which the
@@ -408,6 +432,7 @@ class ReadModifyWriteRowRequest(proto.Message):
class ReadModifyWriteRowResponse(proto.Message):
r"""Response message for Bigtable.ReadModifyWriteRow.
+
Attributes:
row (google.cloud.bigtable_v2.types.Row):
A Row containing the new contents of all
diff --git a/google/cloud/bigtable_v2/types/data.py b/google/cloud/bigtable_v2/types/data.py
index ca2302889..7cd74b047 100644
--- a/google/cloud/bigtable_v2/types/data.py
+++ b/google/cloud/bigtable_v2/types/data.py
@@ -129,19 +129,35 @@ class Cell(proto.Message):
class RowRange(proto.Message):
r"""Specifies a contiguous range of rows.
+
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
Attributes:
start_key_closed (bytes):
Used when giving an inclusive lower bound for
the range.
+
+ This field is a member of `oneof`_ ``start_key``.
start_key_open (bytes):
Used when giving an exclusive lower bound for
the range.
+
+ This field is a member of `oneof`_ ``start_key``.
end_key_open (bytes):
Used when giving an exclusive upper bound for
the range.
+
+ This field is a member of `oneof`_ ``end_key``.
end_key_closed (bytes):
Used when giving an inclusive upper bound for
the range.
+
+ This field is a member of `oneof`_ ``end_key``.
"""
start_key_closed = proto.Field(proto.BYTES, number=1, oneof="start_key",)
@@ -152,6 +168,7 @@ class RowRange(proto.Message):
class RowSet(proto.Message):
r"""Specifies a non-contiguous set of rows.
+
Attributes:
row_keys (Sequence[bytes]):
Single rows included in the set.
@@ -169,6 +186,13 @@ class ColumnRange(proto.Message):
:, where both bounds can be either
inclusive or exclusive.
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
Attributes:
family_name (str):
The name of the column family within which
@@ -176,15 +200,23 @@ class ColumnRange(proto.Message):
start_qualifier_closed (bytes):
Used when giving an inclusive lower bound for
the range.
+
+ This field is a member of `oneof`_ ``start_qualifier``.
start_qualifier_open (bytes):
Used when giving an exclusive lower bound for
the range.
+
+ This field is a member of `oneof`_ ``start_qualifier``.
end_qualifier_closed (bytes):
Used when giving an inclusive upper bound for
the range.
+
+ This field is a member of `oneof`_ ``end_qualifier``.
end_qualifier_open (bytes):
Used when giving an exclusive upper bound for
the range.
+
+ This field is a member of `oneof`_ ``end_qualifier``.
"""
family_name = proto.Field(proto.STRING, number=1,)
@@ -198,6 +230,7 @@ class ColumnRange(proto.Message):
class TimestampRange(proto.Message):
r"""Specified a contiguous range of microsecond timestamps.
+
Attributes:
start_timestamp_micros (int):
Inclusive lower bound. If left empty,
@@ -213,19 +246,35 @@ class TimestampRange(proto.Message):
class ValueRange(proto.Message):
r"""Specifies a contiguous range of raw byte values.
+
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
Attributes:
start_value_closed (bytes):
Used when giving an inclusive lower bound for
the range.
+
+ This field is a member of `oneof`_ ``start_value``.
start_value_open (bytes):
Used when giving an exclusive lower bound for
the range.
+
+ This field is a member of `oneof`_ ``start_value``.
end_value_closed (bytes):
Used when giving an inclusive upper bound for
the range.
+
+ This field is a member of `oneof`_ ``end_value``.
end_value_open (bytes):
Used when giving an exclusive upper bound for
the range.
+
+ This field is a member of `oneof`_ ``end_value``.
"""
start_value_closed = proto.Field(proto.BYTES, number=1, oneof="start_value",)
@@ -270,20 +319,33 @@ class RowFilter(proto.Message):
RowFilter.Chain and RowFilter.Interleave documentation.
The total serialized size of a RowFilter message must not exceed
- 4096 bytes, and RowFilters may not be nested within each other (in
+ 20480 bytes, and RowFilters may not be nested within each other (in
Chains or Interleaves) to a depth of more than 20.
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
Attributes:
chain (google.cloud.bigtable_v2.types.RowFilter.Chain):
Applies several RowFilters to the data in
sequence, progressively narrowing the results.
+
+ This field is a member of `oneof`_ ``filter``.
interleave (google.cloud.bigtable_v2.types.RowFilter.Interleave):
Applies several RowFilters to the data in
parallel and combines the results.
+
+ This field is a member of `oneof`_ ``filter``.
condition (google.cloud.bigtable_v2.types.RowFilter.Condition):
Applies one of two possible RowFilters to the
data based on the output of a predicate
RowFilter.
+
+ This field is a member of `oneof`_ ``filter``.
sink (bool):
ADVANCED USE ONLY. Hook for introspection into the
RowFilter. Outputs all cells directly to the output of the
@@ -350,14 +412,20 @@ class RowFilter(proto.Message):
Cannot be used within the ``predicate_filter``,
``true_filter``, or ``false_filter`` of a
[Condition][google.bigtable.v2.RowFilter.Condition].
+
+ This field is a member of `oneof`_ ``filter``.
pass_all_filter (bool):
Matches all cells, regardless of input. Functionally
equivalent to leaving ``filter`` unset, but included for
completeness.
+
+ This field is a member of `oneof`_ ``filter``.
block_all_filter (bool):
Does not match any cells, regardless of
input. Useful for temporarily disabling just
part of a filter.
+
+ This field is a member of `oneof`_ ``filter``.
row_key_regex_filter (bytes):
Matches only cells from rows whose keys satisfy the given
RE2 regex. In other words, passes through the entire row
@@ -366,10 +434,14 @@ class RowFilter(proto.Message):
``\C`` escape sequence must be used if a true wildcard is
desired. The ``.`` character will not match the new line
character ``\n``, which may be present in a binary key.
+
+ This field is a member of `oneof`_ ``filter``.
row_sample_filter (float):
Matches all cells from a row with probability
p, and matches no cells from the row with
probability 1-p.
+
+ This field is a member of `oneof`_ ``filter``.
family_name_regex_filter (str):
Matches only cells from columns whose families satisfy the
given RE2 regex. For technical reasons, the regex must not
@@ -377,6 +449,8 @@ class RowFilter(proto.Message):
a literal. Note that, since column families cannot contain
the new line character ``\n``, it is sufficient to use ``.``
as a full wildcard when matching column family names.
+
+ This field is a member of `oneof`_ ``filter``.
column_qualifier_regex_filter (bytes):
Matches only cells from columns whose qualifiers satisfy the
given RE2 regex. Note that, since column qualifiers can
@@ -384,12 +458,18 @@ class RowFilter(proto.Message):
used if a true wildcard is desired. The ``.`` character will
not match the new line character ``\n``, which may be
present in a binary qualifier.
+
+ This field is a member of `oneof`_ ``filter``.
column_range_filter (google.cloud.bigtable_v2.types.ColumnRange):
Matches only cells from columns within the
given range.
+
+ This field is a member of `oneof`_ ``filter``.
timestamp_range_filter (google.cloud.bigtable_v2.types.TimestampRange):
Matches only cells with timestamps within the
given range.
+
+ This field is a member of `oneof`_ ``filter``.
value_regex_filter (bytes):
Matches only cells with values that satisfy the given
regular expression. Note that, since cell values can contain
@@ -397,20 +477,28 @@ class RowFilter(proto.Message):
a true wildcard is desired. The ``.`` character will not
match the new line character ``\n``, which may be present in
a binary value.
+
+ This field is a member of `oneof`_ ``filter``.
value_range_filter (google.cloud.bigtable_v2.types.ValueRange):
Matches only cells with values that fall
within the given range.
+
+ This field is a member of `oneof`_ ``filter``.
cells_per_row_offset_filter (int):
Skips the first N cells of each row, matching
all subsequent cells. If duplicate cells are
present, as is possible when using an
Interleave, each copy of the cell is counted
separately.
+
+ This field is a member of `oneof`_ ``filter``.
cells_per_row_limit_filter (int):
Matches only the first N cells of each row.
If duplicate cells are present, as is possible
when using an Interleave, each copy of the cell
is counted separately.
+
+ This field is a member of `oneof`_ ``filter``.
cells_per_column_limit_filter (int):
Matches only the most recent N cells within each column. For
example, if N=2, this filter would match column ``foo:bar``
@@ -419,9 +507,13 @@ class RowFilter(proto.Message):
``foo:bar2``. If duplicate cells are present, as is possible
when using an Interleave, each copy of the cell is counted
separately.
+
+ This field is a member of `oneof`_ ``filter``.
strip_value_transformer (bool):
Replaces each cell's value with the empty
string.
+
+ This field is a member of `oneof`_ ``filter``.
apply_label_transformer (str):
Applies the given label to all cells in the output row. This
allows the client to determine which results were produced
@@ -437,6 +529,8 @@ class RowFilter(proto.Message):
contain multiple ``apply_label_transformers``, as they will
be applied to separate copies of the input. This may be
relaxed in the future.
+
+ This field is a member of `oneof`_ ``filter``.
"""
class Chain(proto.Message):
@@ -554,19 +648,35 @@ class Mutation(proto.Message):
r"""Specifies a particular change to be made to the contents of a
row.
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
Attributes:
set_cell (google.cloud.bigtable_v2.types.Mutation.SetCell):
Set a cell's value.
+
+ This field is a member of `oneof`_ ``mutation``.
delete_from_column (google.cloud.bigtable_v2.types.Mutation.DeleteFromColumn):
Deletes cells from a column.
+
+ This field is a member of `oneof`_ ``mutation``.
delete_from_family (google.cloud.bigtable_v2.types.Mutation.DeleteFromFamily):
Deletes cells from a column family.
+
+ This field is a member of `oneof`_ ``mutation``.
delete_from_row (google.cloud.bigtable_v2.types.Mutation.DeleteFromRow):
Deletes cells from the entire row.
+
+ This field is a member of `oneof`_ ``mutation``.
"""
class SetCell(proto.Message):
r"""A Mutation which sets the value of the specified cell.
+
Attributes:
family_name (str):
The name of the family into which new data should be
@@ -627,7 +737,8 @@ class DeleteFromFamily(proto.Message):
family_name = proto.Field(proto.STRING, number=1,)
class DeleteFromRow(proto.Message):
- r"""A Mutation which deletes all cells from the containing row. """
+ r"""A Mutation which deletes all cells from the containing row.
+ """
set_cell = proto.Field(proto.MESSAGE, number=1, oneof="mutation", message=SetCell,)
delete_from_column = proto.Field(
@@ -645,6 +756,13 @@ class ReadModifyWriteRule(proto.Message):
r"""Specifies an atomic read/modify/write operation on the latest
value of the specified column.
+ This message has `oneof`_ fields (mutually exclusive fields).
+ For each oneof, at most one member field can be set at the same time.
+ Setting any member of the oneof automatically clears all other
+ members.
+
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
+
Attributes:
family_name (str):
The name of the family to which the read/modify/write should
@@ -658,12 +776,16 @@ class ReadModifyWriteRule(proto.Message):
Rule specifying that ``append_value`` be appended to the
existing value. If the targeted cell is unset, it will be
treated as containing the empty string.
+
+ This field is a member of `oneof`_ ``rule``.
increment_amount (int):
Rule specifying that ``increment_amount`` be added to the
existing value. If the targeted cell is unset, it will be
treated as containing a zero. Otherwise, the targeted cell
must contain an 8-byte value (interpreted as a 64-bit
big-endian signed integer), or the entire request will fail.
+
+ This field is a member of `oneof`_ ``rule``.
"""
family_name = proto.Field(proto.STRING, number=1,)
diff --git a/mypy.ini b/mypy.ini
new file mode 100644
index 000000000..f12ed46fc
--- /dev/null
+++ b/mypy.ini
@@ -0,0 +1,28 @@
+[mypy]
+python_version = 3.6
+namespace_packages = True
+exclude = tests/unit/gapic/
+
+[mypy-grpc.*]
+ignore_missing_imports = True
+
+[mypy-google.auth.*]
+ignore_missing_imports = True
+
+[mypy-google.iam.*]
+ignore_missing_imports = True
+
+[mypy-google.longrunning.*]
+ignore_missing_imports = True
+
+[mypy-google.oauth2.*]
+ignore_missing_imports = True
+
+[mypy-google.rpc.*]
+ignore_missing_imports = True
+
+[mypy-proto.*]
+ignore_missing_imports = True
+
+[mypy-pytest]
+ignore_missing_imports = True
diff --git a/noxfile.py b/noxfile.py
index d938c5d2b..6ae044f00 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -29,7 +29,7 @@
DEFAULT_PYTHON_VERSION = "3.8"
SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"]
-UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
+UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"]
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
@@ -38,6 +38,7 @@
"unit",
"system_emulated",
"system",
+ "mypy",
"cover",
"lint",
"lint_setup_py",
@@ -72,6 +73,16 @@ def blacken(session):
)
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def mypy(session):
+ """Verify type hints are mypy compatible."""
+ session.install("-e", ".")
+ session.install("mypy", "types-setuptools", "types-protobuf", "types-mock")
+ session.install("google-cloud-testutils")
+ # TODO: also verify types on tests, all of google package
+ session.run("mypy", "google/", "tests/")
+
+
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
@@ -102,7 +113,7 @@ def default(session):
"py.test",
"--quiet",
f"--junitxml=unit_{session.python}_sponge_log.xml",
- "--cov=google/cloud",
+ "--cov=google",
"--cov=tests/unit",
"--cov-append",
"--cov-config=.coveragerc",
@@ -119,7 +130,7 @@ def unit(session):
default(session)
-@nox.session(python="3.8")
+@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
def system_emulated(session):
import subprocess
import signal
@@ -133,15 +144,17 @@ def system_emulated(session):
subprocess.call(["gcloud", "components", "install", "beta", "bigtable"])
hostport = "localhost:8789"
+ session.env["BIGTABLE_EMULATOR_HOST"] = hostport
+
p = subprocess.Popen(
["gcloud", "beta", "emulators", "bigtable", "start", "--host-port", hostport]
)
- session.env["BIGTABLE_EMULATOR_HOST"] = hostport
- system(session)
-
- # Stop Emulator
- os.killpg(os.getpgid(p.pid), signal.SIGTERM)
+ try:
+ system(session)
+ finally:
+ # Stop Emulator
+ os.killpg(os.getpgid(p.pid), signal.SIGKILL)
@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
diff --git a/owlbot.py b/owlbot.py
index 081c12574..a5e1b09de 100644
--- a/owlbot.py
+++ b/owlbot.py
@@ -15,6 +15,7 @@
"""This script is used to synthesize generated parts of this library."""
from pathlib import Path
+import re
from typing import List, Optional
import synthtool as s
@@ -85,6 +86,7 @@ def get_staging_dirs(
# ----------------------------------------------------------------------------
templated_files = common.py_library(
samples=True, # set to True only if there are samples
+ unit_test_python_versions=["3.6", "3.7", "3.8", "3.9", "3.10"],
split_system_tests=True,
microgenerator=True,
cov_level=100,
@@ -104,7 +106,7 @@ def place_before(path, text, *before_text, escape=None):
s.replace([path], text, replacement)
system_emulated_session = """
-@nox.session(python="3.8")
+@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
def system_emulated(session):
import subprocess
import signal
@@ -118,15 +120,17 @@ def system_emulated(session):
subprocess.call(["gcloud", "components", "install", "beta", "bigtable"])
hostport = "localhost:8789"
+ session.env["BIGTABLE_EMULATOR_HOST"] = hostport
+
p = subprocess.Popen(
["gcloud", "beta", "emulators", "bigtable", "start", "--host-port", hostport]
)
- session.env["BIGTABLE_EMULATOR_HOST"] = hostport
- system(session)
-
- # Stop Emulator
- os.killpg(os.getpgid(p.pid), signal.SIGTERM)
+ try:
+ system(session)
+ finally:
+ # Stop Emulator
+ os.killpg(os.getpgid(p.pid), signal.SIGKILL)
"""
@@ -146,28 +150,82 @@ def system_emulated(session):
"""nox.options.sessions = [
"unit",
"system_emulated",
- "system",""",
+ "system",
+ "mypy",""",
)
+s.replace(
+ "noxfile.py",
+ """\
+@nox.session\(python=DEFAULT_PYTHON_VERSION\)
+def lint_setup_py\(session\):
+""",
+ '''\
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def mypy(session):
+ """Verify type hints are mypy compatible."""
+ session.install("-e", ".")
+ session.install("mypy", "types-setuptools", "types-protobuf", "types-mock")
+ session.install("google-cloud-testutils")
+ # TODO: also verify types on tests, all of google package
+ session.run("mypy", "google/", "tests/")
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def lint_setup_py(session):
+''',
+)
+
+# Work around https://github.com/googleapis/gapic-generator-python/issues/689
+bad_clusters_typing = r"""
+ clusters: Sequence\[
+ bigtable_instance_admin\.CreateInstanceRequest\.ClustersEntry
+ \] = None,"""
+
+good_clusters_typing = """
+ clusters: Dict[str, gba_instance.Cluster] = None,"""
+
+s.replace(
+ "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/*client.py",
+ bad_clusters_typing,
+ good_clusters_typing,
+)
+
+bad_clusters_docstring_1 = re.escape(r"""
+ clusters (:class:`Sequence[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest.ClustersEntry]`):""")
+
+bad_clusters_docstring_2 = re.escape(r"""
+ clusters (Sequence[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest.ClustersEntry]):""")
+
+good_clusters_docstring = """
+ clusters (Dict[str, gba_instance.Cluster]):"""
+
+s.replace(
+ "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/*client.py",
+ bad_clusters_docstring_1,
+ good_clusters_docstring,
+)
+
+s.replace(
+ "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/*client.py",
+ bad_clusters_docstring_2,
+ good_clusters_docstring,
+)
+
# ----------------------------------------------------------------------------
# Samples templates
# ----------------------------------------------------------------------------
-sample_files = common.py_samples(samples=True)
-for path in sample_files:
- s.move(path)
+python.py_samples(skip_readmes=True)
-# Note: python-docs-samples is not yet using 'main':
-#s.replace(
-# "samples/**/*.md",
-# r"python-docs-samples/blob/master/",
-# "python-docs-samples/blob/main/",
-#)
s.replace(
- "samples/**/*.md",
- r"google-cloud-python/blob/master/",
- "google-cloud-python/blob/main/",
-)
+ "samples/beam/noxfile.py",
+ """INSTALL_LIBRARY_FROM_SOURCE \= os.environ.get\("INSTALL_LIBRARY_FROM_SOURCE", False\) in \(
+ "True",
+ "true",
+\)""",
+ """# todo(kolea2): temporary workaround to install pinned dep version
+INSTALL_LIBRARY_FROM_SOURCE = False""")
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
diff --git a/samples/AUTHORING_GUIDE.md b/samples/AUTHORING_GUIDE.md
index 55c97b32f..8249522ff 100644
--- a/samples/AUTHORING_GUIDE.md
+++ b/samples/AUTHORING_GUIDE.md
@@ -1 +1 @@
-See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/AUTHORING_GUIDE.md
\ No newline at end of file
+See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/AUTHORING_GUIDE.md
\ No newline at end of file
diff --git a/samples/CONTRIBUTING.md b/samples/CONTRIBUTING.md
index 34c882b6f..f5fe2e6ba 100644
--- a/samples/CONTRIBUTING.md
+++ b/samples/CONTRIBUTING.md
@@ -1 +1 @@
-See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/CONTRIBUTING.md
\ No newline at end of file
+See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/CONTRIBUTING.md
\ No newline at end of file
diff --git a/samples/beam/hello_world_write.py b/samples/beam/hello_world_write.py
index 894edc46f..89f541d0d 100644
--- a/samples/beam/hello_world_write.py
+++ b/samples/beam/hello_world_write.py
@@ -23,28 +23,29 @@ class BigtableOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument(
- '--bigtable-project',
- help='The Bigtable project ID, this can be different than your '
- 'Dataflow project',
- default='bigtable-project')
+ "--bigtable-project",
+ help="The Bigtable project ID, this can be different than your "
+ "Dataflow project",
+ default="bigtable-project",
+ )
parser.add_argument(
- '--bigtable-instance',
- help='The Bigtable instance ID',
- default='bigtable-instance')
+ "--bigtable-instance",
+ help="The Bigtable instance ID",
+ default="bigtable-instance",
+ )
parser.add_argument(
- '--bigtable-table',
- help='The Bigtable table ID in the instance.',
- default='bigtable-table')
+ "--bigtable-table",
+ help="The Bigtable table ID in the instance.",
+ default="bigtable-table",
+ )
class CreateRowFn(beam.DoFn):
def process(self, key):
direct_row = row.DirectRow(row_key=key)
direct_row.set_cell(
- "stats_summary",
- b"os_build",
- b"android",
- datetime.datetime.now())
+ "stats_summary", b"os_build", b"android", datetime.datetime.now()
+ )
return [direct_row]
@@ -52,13 +53,14 @@ def run(argv=None):
"""Build and run the pipeline."""
options = BigtableOptions(argv)
with beam.Pipeline(options=options) as p:
- p | beam.Create(["phone#4c410523#20190501",
- "phone#4c410523#20190502"]) | beam.ParDo(
- CreateRowFn()) | WriteToBigTable(
+ p | beam.Create(
+ ["phone#4c410523#20190501", "phone#4c410523#20190502"]
+ ) | beam.ParDo(CreateRowFn()) | WriteToBigTable(
project_id=options.bigtable_project,
instance_id=options.bigtable_instance,
- table_id=options.bigtable_table)
+ table_id=options.bigtable_table,
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
run()
diff --git a/samples/beam/hello_world_write_test.py b/samples/beam/hello_world_write_test.py
index cdbecc661..4e9a47c7d 100644
--- a/samples/beam/hello_world_write_test.py
+++ b/samples/beam/hello_world_write_test.py
@@ -19,9 +19,9 @@
import hello_world_write
-PROJECT = os.environ['GOOGLE_CLOUD_PROJECT']
-BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE']
-TABLE_ID_PREFIX = 'mobile-time-series-{}'
+PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]
+BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"]
+TABLE_ID_PREFIX = "mobile-time-series-{}"
@pytest.fixture(scope="module", autouse=True)
@@ -34,17 +34,20 @@ def table_id():
if table.exists():
table.delete()
- table.create(column_families={'stats_summary': None})
+ table.create(column_families={"stats_summary": None})
yield table_id
table.delete()
def test_hello_world_write(table_id):
- hello_world_write.run([
- '--bigtable-project=%s' % PROJECT,
- '--bigtable-instance=%s' % BIGTABLE_INSTANCE,
- '--bigtable-table=%s' % table_id])
+ hello_world_write.run(
+ [
+ "--bigtable-project=%s" % PROJECT,
+ "--bigtable-instance=%s" % BIGTABLE_INSTANCE,
+ "--bigtable-table=%s" % table_id,
+ ]
+ )
client = bigtable.Client(project=PROJECT, admin=True)
instance = client.instance(BIGTABLE_INSTANCE)
diff --git a/samples/beam/noxfile.py b/samples/beam/noxfile.py
index 171bee657..5b10d2811 100644
--- a/samples/beam/noxfile.py
+++ b/samples/beam/noxfile.py
@@ -14,9 +14,11 @@
from __future__ import print_function
+import glob
import os
from pathlib import Path
import sys
+from typing import Callable, Dict, List, Optional
import nox
@@ -27,8 +29,9 @@
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING
-# Copy `noxfile_config.py` to your directory and modify it instead.
+BLACK_VERSION = "black==19.10b0"
+# Copy `noxfile_config.py` to your directory and modify it instead.
# `TEST_CONFIG` dict is a configuration hook that allows users to
# modify the test configurations. The values here should be in sync
@@ -37,24 +40,29 @@
TEST_CONFIG = {
# You can opt out from the test for specific Python versions.
- 'ignored_versions': ["2.7"],
-
+ "ignored_versions": [],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ "enforce_type_hints": False,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
- 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT',
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
-
+ # If you need to use a specific version of pip,
+ # change pip_version_override to the string representation
+ # of the version number, for example, "20.2.4"
+ "pip_version_override": None,
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
- 'envs': {},
+ "envs": {},
}
try:
# Ensure we can import noxfile_config in the project's directory.
- sys.path.append('.')
+ sys.path.append(".")
from noxfile_config import TEST_CONFIG_OVERRIDE
except ImportError as e:
print("No user noxfile_config found: detail: {}".format(e))
@@ -64,37 +72,41 @@
TEST_CONFIG.update(TEST_CONFIG_OVERRIDE)
-def get_pytest_env_vars():
+def get_pytest_env_vars() -> Dict[str, str]:
"""Returns a dict for pytest invocation."""
ret = {}
# Override the GCLOUD_PROJECT and the alias.
- env_key = TEST_CONFIG['gcloud_project_env']
+ env_key = TEST_CONFIG["gcloud_project_env"]
# This should error out if not set.
- ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key]
+ ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key]
# Apply user supplied envs.
- ret.update(TEST_CONFIG['envs'])
+ ret.update(TEST_CONFIG["envs"])
return ret
# DO NOT EDIT - automatically generated.
-# All versions used to tested samples.
-ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"]
+# All versions used to test samples.
+ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"]
# Any default versions that should be ignored.
-IGNORED_VERSIONS = TEST_CONFIG['ignored_versions']
+IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS])
# todo(kolea2): temporary workaround to install pinned dep version
INSTALL_LIBRARY_FROM_SOURCE = False
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
#
# Style Checks
#
-def _determine_local_import_names(start_dir):
+def _determine_local_import_names(start_dir: str) -> List[str]:
"""Determines all import names that should be considered "local".
This is used when running the linter to insure that import order is
@@ -132,60 +144,95 @@ def _determine_local_import_names(start_dir):
@nox.session
-def lint(session):
- session.install("flake8", "flake8-import-order")
+def lint(session: nox.sessions.Session) -> None:
+ if not TEST_CONFIG["enforce_type_hints"]:
+ session.install("flake8", "flake8-import-order")
+ else:
+ session.install("flake8", "flake8-import-order", "flake8-annotations")
local_names = _determine_local_import_names(".")
args = FLAKE8_COMMON_ARGS + [
"--application-import-names",
",".join(local_names),
- "."
+ ".",
]
session.run("flake8", *args)
#
-# Sample Tests
+# Black
#
-PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
+@nox.session
+def blacken(session: nox.sessions.Session) -> None:
+ session.install(BLACK_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+ session.run("black", *python_files)
-def _session_tests(session, post_install=None):
- """Runs py.test for a particular project."""
- if os.path.exists("requirements.txt"):
- session.install("-r", "requirements.txt")
- if os.path.exists("requirements-test.txt"):
- session.install("-r", "requirements-test.txt")
+#
+# Sample Tests
+#
- if INSTALL_LIBRARY_FROM_SOURCE:
- session.install("-e", _get_repo_root())
- if post_install:
- post_install(session)
+PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
- session.run(
- "pytest",
- *(PYTEST_COMMON_ARGS + session.posargs),
- # Pytest will return 5 when no tests are collected. This can happen
- # on travis where slow and flaky tests are excluded.
- # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
- success_codes=[0, 5],
- env=get_pytest_env_vars()
- )
+
+def _session_tests(
+ session: nox.sessions.Session, post_install: Callable = None
+) -> None:
+ # check for presence of tests
+ test_list = glob.glob("*_test.py") + glob.glob("test_*.py")
+ test_list.extend(glob.glob("tests"))
+ if len(test_list) == 0:
+ print("No tests found, skipping directory.")
+ else:
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
+ """Runs py.test for a particular project."""
+ if os.path.exists("requirements.txt"):
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
+
+ if os.path.exists("requirements-test.txt"):
+ if os.path.exists("constraints-test.txt"):
+ session.install(
+ "-r", "requirements-test.txt", "-c", "constraints-test.txt"
+ )
+ else:
+ session.install("-r", "requirements-test.txt")
+
+ if INSTALL_LIBRARY_FROM_SOURCE:
+ session.install("-e", _get_repo_root())
+
+ if post_install:
+ post_install(session)
+
+ session.run(
+ "pytest",
+ *(PYTEST_COMMON_ARGS + session.posargs),
+ # Pytest will return 5 when no tests are collected. This can happen
+ # on travis where slow and flaky tests are excluded.
+ # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
+ success_codes=[0, 5],
+ env=get_pytest_env_vars(),
+ )
@nox.session(python=ALL_VERSIONS)
-def py(session):
+def py(session: nox.sessions.Session) -> None:
"""Runs py.test for a sample using the specified version of Python."""
if session.python in TESTED_VERSIONS:
_session_tests(session)
else:
- session.skip("SKIPPED: {} tests are disabled for this sample.".format(
- session.python
- ))
+ session.skip(
+ "SKIPPED: {} tests are disabled for this sample.".format(session.python)
+ )
#
@@ -193,7 +240,7 @@ def py(session):
#
-def _get_repo_root():
+def _get_repo_root() -> Optional[str]:
""" Returns the root folder of the project. """
# Get root of this repository. Assume we don't have directories nested deeper than 10 items.
p = Path(os.getcwd())
@@ -202,6 +249,11 @@ def _get_repo_root():
break
if Path(p / ".git").exists():
return str(p)
+ # .git is not available in repos cloned via Cloud Build
+ # setup.py is always in the library's root, so use that instead
+ # https://github.com/googleapis/synthtool/issues/792
+ if Path(p / "setup.py").exists():
+ return str(p)
p = p.parent
raise Exception("Unable to detect repository root.")
@@ -211,7 +263,7 @@ def _get_repo_root():
@nox.session
@nox.parametrize("path", GENERATED_READMES)
-def readmegen(session, path):
+def readmegen(session: nox.sessions.Session, path: str) -> None:
"""(Re-)generates the readme for a sample."""
session.install("jinja2", "pyyaml")
dir_ = os.path.dirname(path)
diff --git a/samples/beam/noxfile_config.py b/samples/beam/noxfile_config.py
new file mode 100644
index 000000000..eb01435a0
--- /dev/null
+++ b/samples/beam/noxfile_config.py
@@ -0,0 +1,45 @@
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Default TEST_CONFIG_OVERRIDE for python repos.
+
+# You can copy this file into your directory, then it will be imported from
+# the noxfile.py.
+
+# The source of truth:
+# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py
+
+TEST_CONFIG_OVERRIDE = {
+ # You can opt out from the test for specific Python versions.
+ "ignored_versions": [
+ "2.7", # not supported
+ "3.10", # Beam wheels not yet released for Python 3.10
+ ],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ "enforce_type_hints": False,
+ # An envvar key for determining the project id to use. Change it
+ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
+ # build specific Cloud project. You can also use your own string
+ # to use your own Cloud project.
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
+ # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
+ # If you need to use a specific version of pip,
+ # change pip_version_override to the string representation
+ # of the version number, for example, "20.2.4"
+ "pip_version_override": None,
+ # A dictionary you want to inject into your test. Don't put any
+ # secrets here. These values will override predefined values.
+ "envs": {},
+}
diff --git a/samples/beam/requirements-test.txt b/samples/beam/requirements-test.txt
index 95ea1e6a0..4a46ff600 100644
--- a/samples/beam/requirements-test.txt
+++ b/samples/beam/requirements-test.txt
@@ -1 +1 @@
-pytest==6.2.4
+pytest==7.0.0
diff --git a/samples/beam/requirements.txt b/samples/beam/requirements.txt
index 29731a5f9..094b6ef91 100644
--- a/samples/beam/requirements.txt
+++ b/samples/beam/requirements.txt
@@ -1,3 +1,3 @@
-apache-beam==2.31.0
-google-cloud-bigtable<2.0.0
-google-cloud-core==1.7.2
\ No newline at end of file
+apache-beam==2.35.0
+google-cloud-bigtable<2.5.0
+google-cloud-core==2.2.2
diff --git a/samples/hello/main.py b/samples/hello/main.py
index 073270847..7b2b1764a 100644
--- a/samples/hello/main.py
+++ b/samples/hello/main.py
@@ -25,12 +25,14 @@
"""
import argparse
+
# [START bigtable_hw_imports]
import datetime
from google.cloud import bigtable
from google.cloud.bigtable import column_family
from google.cloud.bigtable import row_filters
+
# [END bigtable_hw_imports]
@@ -43,14 +45,14 @@ def main(project_id, instance_id, table_id):
# [END bigtable_hw_connect]
# [START bigtable_hw_create_table]
- print('Creating the {} table.'.format(table_id))
+ print("Creating the {} table.".format(table_id))
table = instance.table(table_id)
- print('Creating column family cf1 with Max Version GC rule...')
+ print("Creating column family cf1 with Max Version GC rule...")
# Create a column family with GC policy : most recent N versions
# Define the GC policy to retain only the most recent 2 versions
max_versions_rule = column_family.MaxVersionsGCRule(2)
- column_family_id = 'cf1'
+ column_family_id = "cf1"
column_families = {column_family_id: max_versions_rule}
if not table.exists():
table.create(column_families=column_families)
@@ -59,10 +61,10 @@ def main(project_id, instance_id, table_id):
# [END bigtable_hw_create_table]
# [START bigtable_hw_write_rows]
- print('Writing some greetings to the table.')
- greetings = ['Hello World!', 'Hello Cloud Bigtable!', 'Hello Python!']
+ print("Writing some greetings to the table.")
+ greetings = ["Hello World!", "Hello Cloud Bigtable!", "Hello Python!"]
rows = []
- column = 'greeting'.encode()
+ column = "greeting".encode()
for i, value in enumerate(greetings):
# Note: This example uses sequential numeric IDs for simplicity,
# but this can result in poor performance in a production
@@ -74,12 +76,11 @@ def main(project_id, instance_id, table_id):
# the best performance, see the documentation:
#
# https://cloud.google.com/bigtable/docs/schema-design
- row_key = 'greeting{}'.format(i).encode()
+ row_key = "greeting{}".format(i).encode()
row = table.direct_row(row_key)
- row.set_cell(column_family_id,
- column,
- value,
- timestamp=datetime.datetime.utcnow())
+ row.set_cell(
+ column_family_id, column, value, timestamp=datetime.datetime.utcnow()
+ )
rows.append(row)
table.mutate_rows(rows)
# [END bigtable_hw_write_rows]
@@ -91,40 +92,40 @@ def main(project_id, instance_id, table_id):
# [END bigtable_hw_create_filter]
# [START bigtable_hw_get_with_filter]
- print('Getting a single greeting by row key.')
- key = 'greeting0'.encode()
+ print("Getting a single greeting by row key.")
+ key = "greeting0".encode()
row = table.read_row(key, row_filter)
cell = row.cells[column_family_id][column][0]
- print(cell.value.decode('utf-8'))
+ print(cell.value.decode("utf-8"))
# [END bigtable_hw_get_with_filter]
# [START bigtable_hw_scan_with_filter]
- print('Scanning for all greetings:')
+ print("Scanning for all greetings:")
partial_rows = table.read_rows(filter_=row_filter)
for row in partial_rows:
cell = row.cells[column_family_id][column][0]
- print(cell.value.decode('utf-8'))
+ print(cell.value.decode("utf-8"))
# [END bigtable_hw_scan_with_filter]
# [START bigtable_hw_delete_table]
- print('Deleting the {} table.'.format(table_id))
+ print("Deleting the {} table.".format(table_id))
table.delete()
# [END bigtable_hw_delete_table]
-if __name__ == '__main__':
+if __name__ == "__main__":
parser = argparse.ArgumentParser(
- description=__doc__,
- formatter_class=argparse.ArgumentDefaultsHelpFormatter)
- parser.add_argument('project_id', help='Your Cloud Platform project ID.')
+ description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+ parser.add_argument("project_id", help="Your Cloud Platform project ID.")
parser.add_argument(
- 'instance_id', help='ID of the Cloud Bigtable instance to connect to.')
+ "instance_id", help="ID of the Cloud Bigtable instance to connect to."
+ )
parser.add_argument(
- '--table',
- help='Table to create and destroy.',
- default='Hello-Bigtable')
+ "--table", help="Table to create and destroy.", default="Hello-Bigtable"
+ )
args = parser.parse_args()
main(args.project_id, args.instance_id, args.table)
diff --git a/samples/hello/main_test.py b/samples/hello/main_test.py
index 49b8098fc..641b34d11 100644
--- a/samples/hello/main_test.py
+++ b/samples/hello/main_test.py
@@ -17,23 +17,22 @@
from main import main
-PROJECT = os.environ['GOOGLE_CLOUD_PROJECT']
-BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE']
-TABLE_NAME_FORMAT = 'hello-world-test-{}'
+PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]
+BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"]
+TABLE_NAME_FORMAT = "hello-world-test-{}"
TABLE_NAME_RANGE = 10000
def test_main(capsys):
- table_name = TABLE_NAME_FORMAT.format(
- random.randrange(TABLE_NAME_RANGE))
+ table_name = TABLE_NAME_FORMAT.format(random.randrange(TABLE_NAME_RANGE))
main(PROJECT, BIGTABLE_INSTANCE, table_name)
out, _ = capsys.readouterr()
- assert 'Creating the {} table.'.format(table_name) in out
- assert 'Writing some greetings to the table.' in out
- assert 'Getting a single greeting by row key.' in out
- assert 'Hello World!' in out
- assert 'Scanning for all greetings' in out
- assert 'Hello Cloud Bigtable!' in out
- assert 'Deleting the {} table.'.format(table_name) in out
+ assert "Creating the {} table.".format(table_name) in out
+ assert "Writing some greetings to the table." in out
+ assert "Getting a single greeting by row key." in out
+ assert "Hello World!" in out
+ assert "Scanning for all greetings" in out
+ assert "Hello Cloud Bigtable!" in out
+ assert "Deleting the {} table.".format(table_name) in out
diff --git a/samples/hello/noxfile.py b/samples/hello/noxfile.py
index ba55d7ce5..20cdfc620 100644
--- a/samples/hello/noxfile.py
+++ b/samples/hello/noxfile.py
@@ -14,9 +14,11 @@
from __future__ import print_function
+import glob
import os
from pathlib import Path
import sys
+from typing import Callable, Dict, List, Optional
import nox
@@ -27,8 +29,9 @@
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING
-# Copy `noxfile_config.py` to your directory and modify it instead.
+BLACK_VERSION = "black==19.10b0"
+# Copy `noxfile_config.py` to your directory and modify it instead.
# `TEST_CONFIG` dict is a configuration hook that allows users to
# modify the test configurations. The values here should be in sync
@@ -37,24 +40,29 @@
TEST_CONFIG = {
# You can opt out from the test for specific Python versions.
- 'ignored_versions': ["2.7"],
-
+ "ignored_versions": [],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ "enforce_type_hints": False,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
- 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT',
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
-
+ # If you need to use a specific version of pip,
+ # change pip_version_override to the string representation
+ # of the version number, for example, "20.2.4"
+ "pip_version_override": None,
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
- 'envs': {},
+ "envs": {},
}
try:
# Ensure we can import noxfile_config in the project's directory.
- sys.path.append('.')
+ sys.path.append(".")
from noxfile_config import TEST_CONFIG_OVERRIDE
except ImportError as e:
print("No user noxfile_config found: detail: {}".format(e))
@@ -64,36 +72,43 @@
TEST_CONFIG.update(TEST_CONFIG_OVERRIDE)
-def get_pytest_env_vars():
+def get_pytest_env_vars() -> Dict[str, str]:
"""Returns a dict for pytest invocation."""
ret = {}
# Override the GCLOUD_PROJECT and the alias.
- env_key = TEST_CONFIG['gcloud_project_env']
+ env_key = TEST_CONFIG["gcloud_project_env"]
# This should error out if not set.
- ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key]
+ ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key]
# Apply user supplied envs.
- ret.update(TEST_CONFIG['envs'])
+ ret.update(TEST_CONFIG["envs"])
return ret
# DO NOT EDIT - automatically generated.
-# All versions used to tested samples.
-ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"]
+# All versions used to test samples.
+ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"]
# Any default versions that should be ignored.
-IGNORED_VERSIONS = TEST_CONFIG['ignored_versions']
+IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS])
-INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False))
+INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in (
+ "True",
+ "true",
+)
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
#
# Style Checks
#
-def _determine_local_import_names(start_dir):
+def _determine_local_import_names(start_dir: str) -> List[str]:
"""Determines all import names that should be considered "local".
This is used when running the linter to insure that import order is
@@ -131,60 +146,95 @@ def _determine_local_import_names(start_dir):
@nox.session
-def lint(session):
- session.install("flake8", "flake8-import-order")
+def lint(session: nox.sessions.Session) -> None:
+ if not TEST_CONFIG["enforce_type_hints"]:
+ session.install("flake8", "flake8-import-order")
+ else:
+ session.install("flake8", "flake8-import-order", "flake8-annotations")
local_names = _determine_local_import_names(".")
args = FLAKE8_COMMON_ARGS + [
"--application-import-names",
",".join(local_names),
- "."
+ ".",
]
session.run("flake8", *args)
#
-# Sample Tests
+# Black
#
-PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
+@nox.session
+def blacken(session: nox.sessions.Session) -> None:
+ session.install(BLACK_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+ session.run("black", *python_files)
-def _session_tests(session, post_install=None):
- """Runs py.test for a particular project."""
- if os.path.exists("requirements.txt"):
- session.install("-r", "requirements.txt")
- if os.path.exists("requirements-test.txt"):
- session.install("-r", "requirements-test.txt")
+#
+# Sample Tests
+#
- if INSTALL_LIBRARY_FROM_SOURCE:
- session.install("-e", _get_repo_root())
- if post_install:
- post_install(session)
+PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
- session.run(
- "pytest",
- *(PYTEST_COMMON_ARGS + session.posargs),
- # Pytest will return 5 when no tests are collected. This can happen
- # on travis where slow and flaky tests are excluded.
- # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
- success_codes=[0, 5],
- env=get_pytest_env_vars()
- )
+
+def _session_tests(
+ session: nox.sessions.Session, post_install: Callable = None
+) -> None:
+ # check for presence of tests
+ test_list = glob.glob("*_test.py") + glob.glob("test_*.py")
+ test_list.extend(glob.glob("tests"))
+ if len(test_list) == 0:
+ print("No tests found, skipping directory.")
+ else:
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
+ """Runs py.test for a particular project."""
+ if os.path.exists("requirements.txt"):
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
+
+ if os.path.exists("requirements-test.txt"):
+ if os.path.exists("constraints-test.txt"):
+ session.install(
+ "-r", "requirements-test.txt", "-c", "constraints-test.txt"
+ )
+ else:
+ session.install("-r", "requirements-test.txt")
+
+ if INSTALL_LIBRARY_FROM_SOURCE:
+ session.install("-e", _get_repo_root())
+
+ if post_install:
+ post_install(session)
+
+ session.run(
+ "pytest",
+ *(PYTEST_COMMON_ARGS + session.posargs),
+ # Pytest will return 5 when no tests are collected. This can happen
+ # on travis where slow and flaky tests are excluded.
+ # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
+ success_codes=[0, 5],
+ env=get_pytest_env_vars(),
+ )
@nox.session(python=ALL_VERSIONS)
-def py(session):
+def py(session: nox.sessions.Session) -> None:
"""Runs py.test for a sample using the specified version of Python."""
if session.python in TESTED_VERSIONS:
_session_tests(session)
else:
- session.skip("SKIPPED: {} tests are disabled for this sample.".format(
- session.python
- ))
+ session.skip(
+ "SKIPPED: {} tests are disabled for this sample.".format(session.python)
+ )
#
@@ -192,7 +242,7 @@ def py(session):
#
-def _get_repo_root():
+def _get_repo_root() -> Optional[str]:
""" Returns the root folder of the project. """
# Get root of this repository. Assume we don't have directories nested deeper than 10 items.
p = Path(os.getcwd())
@@ -201,6 +251,11 @@ def _get_repo_root():
break
if Path(p / ".git").exists():
return str(p)
+ # .git is not available in repos cloned via Cloud Build
+ # setup.py is always in the library's root, so use that instead
+ # https://github.com/googleapis/synthtool/issues/792
+ if Path(p / "setup.py").exists():
+ return str(p)
p = p.parent
raise Exception("Unable to detect repository root.")
@@ -210,7 +265,7 @@ def _get_repo_root():
@nox.session
@nox.parametrize("path", GENERATED_READMES)
-def readmegen(session, path):
+def readmegen(session: nox.sessions.Session, path: str) -> None:
"""(Re-)generates the readme for a sample."""
session.install("jinja2", "pyyaml")
dir_ = os.path.dirname(path)
diff --git a/samples/hello/requirements-test.txt b/samples/hello/requirements-test.txt
index 95ea1e6a0..4a46ff600 100644
--- a/samples/hello/requirements-test.txt
+++ b/samples/hello/requirements-test.txt
@@ -1 +1 @@
-pytest==6.2.4
+pytest==7.0.0
diff --git a/samples/hello/requirements.txt b/samples/hello/requirements.txt
index 20e1f5078..f62375fdb 100644
--- a/samples/hello/requirements.txt
+++ b/samples/hello/requirements.txt
@@ -1,2 +1,2 @@
-google-cloud-bigtable==2.3.1
-google-cloud-core==1.7.2
+google-cloud-bigtable==2.4.0
+google-cloud-core==2.2.2
diff --git a/samples/hello_happybase/main.py b/samples/hello_happybase/main.py
index ade4acbf0..7999fd006 100644
--- a/samples/hello_happybase/main.py
+++ b/samples/hello_happybase/main.py
@@ -29,6 +29,7 @@
# [START bigtable_hw_imports_happybase]
from google.cloud import bigtable
from google.cloud import happybase
+
# [END bigtable_hw_imports_happybase]
@@ -43,23 +44,21 @@ def main(project_id, instance_id, table_name):
try:
# [START bigtable_hw_create_table_happybase]
- print('Creating the {} table.'.format(table_name))
- column_family_name = 'cf1'
+ print("Creating the {} table.".format(table_name))
+ column_family_name = "cf1"
connection.create_table(
- table_name,
- {
- column_family_name: dict() # Use default options.
- })
+ table_name, {column_family_name: dict()} # Use default options.
+ )
# [END bigtable_hw_create_table_happybase]
# [START bigtable_hw_write_rows_happybase]
- print('Writing some greetings to the table.')
+ print("Writing some greetings to the table.")
table = connection.table(table_name)
- column_name = '{fam}:greeting'.format(fam=column_family_name)
+ column_name = "{fam}:greeting".format(fam=column_family_name)
greetings = [
- 'Hello World!',
- 'Hello Cloud Bigtable!',
- 'Hello HappyBase!',
+ "Hello World!",
+ "Hello Cloud Bigtable!",
+ "Hello HappyBase!",
]
for i, value in enumerate(greetings):
@@ -73,28 +72,26 @@ def main(project_id, instance_id, table_name):
# the best performance, see the documentation:
#
# https://cloud.google.com/bigtable/docs/schema-design
- row_key = 'greeting{}'.format(i)
- table.put(
- row_key, {column_name.encode('utf-8'): value.encode('utf-8')}
- )
+ row_key = "greeting{}".format(i)
+ table.put(row_key, {column_name.encode("utf-8"): value.encode("utf-8")})
# [END bigtable_hw_write_rows_happybase]
# [START bigtable_hw_get_by_key_happybase]
- print('Getting a single greeting by row key.')
- key = 'greeting0'.encode('utf-8')
+ print("Getting a single greeting by row key.")
+ key = "greeting0".encode("utf-8")
row = table.row(key)
- print('\t{}: {}'.format(key, row[column_name.encode('utf-8')]))
+ print("\t{}: {}".format(key, row[column_name.encode("utf-8")]))
# [END bigtable_hw_get_by_key_happybase]
# [START bigtable_hw_scan_all_happybase]
- print('Scanning for all greetings:')
+ print("Scanning for all greetings:")
for key, row in table.scan():
- print('\t{}: {}'.format(key, row[column_name.encode('utf-8')]))
+ print("\t{}: {}".format(key, row[column_name.encode("utf-8")]))
# [END bigtable_hw_scan_all_happybase]
# [START bigtable_hw_delete_table_happybase]
- print('Deleting the {} table.'.format(table_name))
+ print("Deleting the {} table.".format(table_name))
connection.delete_table(table_name)
# [END bigtable_hw_delete_table_happybase]
@@ -102,17 +99,17 @@ def main(project_id, instance_id, table_name):
connection.close()
-if __name__ == '__main__':
+if __name__ == "__main__":
parser = argparse.ArgumentParser(
- description=__doc__,
- formatter_class=argparse.ArgumentDefaultsHelpFormatter)
- parser.add_argument('project_id', help='Your Cloud Platform project ID.')
+ description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+ parser.add_argument("project_id", help="Your Cloud Platform project ID.")
parser.add_argument(
- 'instance_id', help='ID of the Cloud Bigtable instance to connect to.')
+ "instance_id", help="ID of the Cloud Bigtable instance to connect to."
+ )
parser.add_argument(
- '--table',
- help='Table to create and destroy.',
- default='Hello-Bigtable')
+ "--table", help="Table to create and destroy.", default="Hello-Bigtable"
+ )
args = parser.parse_args()
main(args.project_id, args.instance_id, args.table)
diff --git a/samples/hello_happybase/main_test.py b/samples/hello_happybase/main_test.py
index f72fc0b2e..6a63750da 100644
--- a/samples/hello_happybase/main_test.py
+++ b/samples/hello_happybase/main_test.py
@@ -17,25 +17,21 @@
from main import main
-PROJECT = os.environ['GOOGLE_CLOUD_PROJECT']
-BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE']
-TABLE_NAME_FORMAT = 'hello-world-hb-test-{}'
+PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]
+BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"]
+TABLE_NAME_FORMAT = "hello-world-hb-test-{}"
TABLE_NAME_RANGE = 10000
def test_main(capsys):
- table_name = TABLE_NAME_FORMAT.format(
- random.randrange(TABLE_NAME_RANGE))
- main(
- PROJECT,
- BIGTABLE_INSTANCE,
- table_name)
+ table_name = TABLE_NAME_FORMAT.format(random.randrange(TABLE_NAME_RANGE))
+ main(PROJECT, BIGTABLE_INSTANCE, table_name)
out, _ = capsys.readouterr()
- assert 'Creating the {} table.'.format(table_name) in out
- assert 'Writing some greetings to the table.' in out
- assert 'Getting a single greeting by row key.' in out
- assert 'Hello World!' in out
- assert 'Scanning for all greetings' in out
- assert 'Hello Cloud Bigtable!' in out
- assert 'Deleting the {} table.'.format(table_name) in out
+ assert "Creating the {} table.".format(table_name) in out
+ assert "Writing some greetings to the table." in out
+ assert "Getting a single greeting by row key." in out
+ assert "Hello World!" in out
+ assert "Scanning for all greetings" in out
+ assert "Hello Cloud Bigtable!" in out
+ assert "Deleting the {} table.".format(table_name) in out
diff --git a/samples/hello_happybase/noxfile.py b/samples/hello_happybase/noxfile.py
index ba55d7ce5..20cdfc620 100644
--- a/samples/hello_happybase/noxfile.py
+++ b/samples/hello_happybase/noxfile.py
@@ -14,9 +14,11 @@
from __future__ import print_function
+import glob
import os
from pathlib import Path
import sys
+from typing import Callable, Dict, List, Optional
import nox
@@ -27,8 +29,9 @@
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING
-# Copy `noxfile_config.py` to your directory and modify it instead.
+BLACK_VERSION = "black==19.10b0"
+# Copy `noxfile_config.py` to your directory and modify it instead.
# `TEST_CONFIG` dict is a configuration hook that allows users to
# modify the test configurations. The values here should be in sync
@@ -37,24 +40,29 @@
TEST_CONFIG = {
# You can opt out from the test for specific Python versions.
- 'ignored_versions': ["2.7"],
-
+ "ignored_versions": [],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ "enforce_type_hints": False,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
- 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT',
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
-
+ # If you need to use a specific version of pip,
+ # change pip_version_override to the string representation
+ # of the version number, for example, "20.2.4"
+ "pip_version_override": None,
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
- 'envs': {},
+ "envs": {},
}
try:
# Ensure we can import noxfile_config in the project's directory.
- sys.path.append('.')
+ sys.path.append(".")
from noxfile_config import TEST_CONFIG_OVERRIDE
except ImportError as e:
print("No user noxfile_config found: detail: {}".format(e))
@@ -64,36 +72,43 @@
TEST_CONFIG.update(TEST_CONFIG_OVERRIDE)
-def get_pytest_env_vars():
+def get_pytest_env_vars() -> Dict[str, str]:
"""Returns a dict for pytest invocation."""
ret = {}
# Override the GCLOUD_PROJECT and the alias.
- env_key = TEST_CONFIG['gcloud_project_env']
+ env_key = TEST_CONFIG["gcloud_project_env"]
# This should error out if not set.
- ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key]
+ ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key]
# Apply user supplied envs.
- ret.update(TEST_CONFIG['envs'])
+ ret.update(TEST_CONFIG["envs"])
return ret
# DO NOT EDIT - automatically generated.
-# All versions used to tested samples.
-ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"]
+# All versions used to test samples.
+ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"]
# Any default versions that should be ignored.
-IGNORED_VERSIONS = TEST_CONFIG['ignored_versions']
+IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS])
-INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False))
+INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in (
+ "True",
+ "true",
+)
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
#
# Style Checks
#
-def _determine_local_import_names(start_dir):
+def _determine_local_import_names(start_dir: str) -> List[str]:
"""Determines all import names that should be considered "local".
This is used when running the linter to insure that import order is
@@ -131,60 +146,95 @@ def _determine_local_import_names(start_dir):
@nox.session
-def lint(session):
- session.install("flake8", "flake8-import-order")
+def lint(session: nox.sessions.Session) -> None:
+ if not TEST_CONFIG["enforce_type_hints"]:
+ session.install("flake8", "flake8-import-order")
+ else:
+ session.install("flake8", "flake8-import-order", "flake8-annotations")
local_names = _determine_local_import_names(".")
args = FLAKE8_COMMON_ARGS + [
"--application-import-names",
",".join(local_names),
- "."
+ ".",
]
session.run("flake8", *args)
#
-# Sample Tests
+# Black
#
-PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
+@nox.session
+def blacken(session: nox.sessions.Session) -> None:
+ session.install(BLACK_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+ session.run("black", *python_files)
-def _session_tests(session, post_install=None):
- """Runs py.test for a particular project."""
- if os.path.exists("requirements.txt"):
- session.install("-r", "requirements.txt")
- if os.path.exists("requirements-test.txt"):
- session.install("-r", "requirements-test.txt")
+#
+# Sample Tests
+#
- if INSTALL_LIBRARY_FROM_SOURCE:
- session.install("-e", _get_repo_root())
- if post_install:
- post_install(session)
+PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
- session.run(
- "pytest",
- *(PYTEST_COMMON_ARGS + session.posargs),
- # Pytest will return 5 when no tests are collected. This can happen
- # on travis where slow and flaky tests are excluded.
- # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
- success_codes=[0, 5],
- env=get_pytest_env_vars()
- )
+
+def _session_tests(
+ session: nox.sessions.Session, post_install: Callable = None
+) -> None:
+ # check for presence of tests
+ test_list = glob.glob("*_test.py") + glob.glob("test_*.py")
+ test_list.extend(glob.glob("tests"))
+ if len(test_list) == 0:
+ print("No tests found, skipping directory.")
+ else:
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
+ """Runs py.test for a particular project."""
+ if os.path.exists("requirements.txt"):
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
+
+ if os.path.exists("requirements-test.txt"):
+ if os.path.exists("constraints-test.txt"):
+ session.install(
+ "-r", "requirements-test.txt", "-c", "constraints-test.txt"
+ )
+ else:
+ session.install("-r", "requirements-test.txt")
+
+ if INSTALL_LIBRARY_FROM_SOURCE:
+ session.install("-e", _get_repo_root())
+
+ if post_install:
+ post_install(session)
+
+ session.run(
+ "pytest",
+ *(PYTEST_COMMON_ARGS + session.posargs),
+ # Pytest will return 5 when no tests are collected. This can happen
+ # on travis where slow and flaky tests are excluded.
+ # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
+ success_codes=[0, 5],
+ env=get_pytest_env_vars(),
+ )
@nox.session(python=ALL_VERSIONS)
-def py(session):
+def py(session: nox.sessions.Session) -> None:
"""Runs py.test for a sample using the specified version of Python."""
if session.python in TESTED_VERSIONS:
_session_tests(session)
else:
- session.skip("SKIPPED: {} tests are disabled for this sample.".format(
- session.python
- ))
+ session.skip(
+ "SKIPPED: {} tests are disabled for this sample.".format(session.python)
+ )
#
@@ -192,7 +242,7 @@ def py(session):
#
-def _get_repo_root():
+def _get_repo_root() -> Optional[str]:
""" Returns the root folder of the project. """
# Get root of this repository. Assume we don't have directories nested deeper than 10 items.
p = Path(os.getcwd())
@@ -201,6 +251,11 @@ def _get_repo_root():
break
if Path(p / ".git").exists():
return str(p)
+ # .git is not available in repos cloned via Cloud Build
+ # setup.py is always in the library's root, so use that instead
+ # https://github.com/googleapis/synthtool/issues/792
+ if Path(p / "setup.py").exists():
+ return str(p)
p = p.parent
raise Exception("Unable to detect repository root.")
@@ -210,7 +265,7 @@ def _get_repo_root():
@nox.session
@nox.parametrize("path", GENERATED_READMES)
-def readmegen(session, path):
+def readmegen(session: nox.sessions.Session, path: str) -> None:
"""(Re-)generates the readme for a sample."""
session.install("jinja2", "pyyaml")
dir_ = os.path.dirname(path)
diff --git a/samples/hello_happybase/requirements-test.txt b/samples/hello_happybase/requirements-test.txt
index 95ea1e6a0..4a46ff600 100644
--- a/samples/hello_happybase/requirements-test.txt
+++ b/samples/hello_happybase/requirements-test.txt
@@ -1 +1 @@
-pytest==6.2.4
+pytest==7.0.0
diff --git a/samples/instanceadmin/noxfile.py b/samples/instanceadmin/noxfile.py
index ba55d7ce5..20cdfc620 100644
--- a/samples/instanceadmin/noxfile.py
+++ b/samples/instanceadmin/noxfile.py
@@ -14,9 +14,11 @@
from __future__ import print_function
+import glob
import os
from pathlib import Path
import sys
+from typing import Callable, Dict, List, Optional
import nox
@@ -27,8 +29,9 @@
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING
-# Copy `noxfile_config.py` to your directory and modify it instead.
+BLACK_VERSION = "black==19.10b0"
+# Copy `noxfile_config.py` to your directory and modify it instead.
# `TEST_CONFIG` dict is a configuration hook that allows users to
# modify the test configurations. The values here should be in sync
@@ -37,24 +40,29 @@
TEST_CONFIG = {
# You can opt out from the test for specific Python versions.
- 'ignored_versions': ["2.7"],
-
+ "ignored_versions": [],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ "enforce_type_hints": False,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
- 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT',
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
-
+ # If you need to use a specific version of pip,
+ # change pip_version_override to the string representation
+ # of the version number, for example, "20.2.4"
+ "pip_version_override": None,
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
- 'envs': {},
+ "envs": {},
}
try:
# Ensure we can import noxfile_config in the project's directory.
- sys.path.append('.')
+ sys.path.append(".")
from noxfile_config import TEST_CONFIG_OVERRIDE
except ImportError as e:
print("No user noxfile_config found: detail: {}".format(e))
@@ -64,36 +72,43 @@
TEST_CONFIG.update(TEST_CONFIG_OVERRIDE)
-def get_pytest_env_vars():
+def get_pytest_env_vars() -> Dict[str, str]:
"""Returns a dict for pytest invocation."""
ret = {}
# Override the GCLOUD_PROJECT and the alias.
- env_key = TEST_CONFIG['gcloud_project_env']
+ env_key = TEST_CONFIG["gcloud_project_env"]
# This should error out if not set.
- ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key]
+ ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key]
# Apply user supplied envs.
- ret.update(TEST_CONFIG['envs'])
+ ret.update(TEST_CONFIG["envs"])
return ret
# DO NOT EDIT - automatically generated.
-# All versions used to tested samples.
-ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"]
+# All versions used to test samples.
+ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"]
# Any default versions that should be ignored.
-IGNORED_VERSIONS = TEST_CONFIG['ignored_versions']
+IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS])
-INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False))
+INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in (
+ "True",
+ "true",
+)
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
#
# Style Checks
#
-def _determine_local_import_names(start_dir):
+def _determine_local_import_names(start_dir: str) -> List[str]:
"""Determines all import names that should be considered "local".
This is used when running the linter to insure that import order is
@@ -131,60 +146,95 @@ def _determine_local_import_names(start_dir):
@nox.session
-def lint(session):
- session.install("flake8", "flake8-import-order")
+def lint(session: nox.sessions.Session) -> None:
+ if not TEST_CONFIG["enforce_type_hints"]:
+ session.install("flake8", "flake8-import-order")
+ else:
+ session.install("flake8", "flake8-import-order", "flake8-annotations")
local_names = _determine_local_import_names(".")
args = FLAKE8_COMMON_ARGS + [
"--application-import-names",
",".join(local_names),
- "."
+ ".",
]
session.run("flake8", *args)
#
-# Sample Tests
+# Black
#
-PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
+@nox.session
+def blacken(session: nox.sessions.Session) -> None:
+ session.install(BLACK_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+ session.run("black", *python_files)
-def _session_tests(session, post_install=None):
- """Runs py.test for a particular project."""
- if os.path.exists("requirements.txt"):
- session.install("-r", "requirements.txt")
- if os.path.exists("requirements-test.txt"):
- session.install("-r", "requirements-test.txt")
+#
+# Sample Tests
+#
- if INSTALL_LIBRARY_FROM_SOURCE:
- session.install("-e", _get_repo_root())
- if post_install:
- post_install(session)
+PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
- session.run(
- "pytest",
- *(PYTEST_COMMON_ARGS + session.posargs),
- # Pytest will return 5 when no tests are collected. This can happen
- # on travis where slow and flaky tests are excluded.
- # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
- success_codes=[0, 5],
- env=get_pytest_env_vars()
- )
+
+def _session_tests(
+ session: nox.sessions.Session, post_install: Callable = None
+) -> None:
+ # check for presence of tests
+ test_list = glob.glob("*_test.py") + glob.glob("test_*.py")
+ test_list.extend(glob.glob("tests"))
+ if len(test_list) == 0:
+ print("No tests found, skipping directory.")
+ else:
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
+ """Runs py.test for a particular project."""
+ if os.path.exists("requirements.txt"):
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
+
+ if os.path.exists("requirements-test.txt"):
+ if os.path.exists("constraints-test.txt"):
+ session.install(
+ "-r", "requirements-test.txt", "-c", "constraints-test.txt"
+ )
+ else:
+ session.install("-r", "requirements-test.txt")
+
+ if INSTALL_LIBRARY_FROM_SOURCE:
+ session.install("-e", _get_repo_root())
+
+ if post_install:
+ post_install(session)
+
+ session.run(
+ "pytest",
+ *(PYTEST_COMMON_ARGS + session.posargs),
+ # Pytest will return 5 when no tests are collected. This can happen
+ # on travis where slow and flaky tests are excluded.
+ # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
+ success_codes=[0, 5],
+ env=get_pytest_env_vars(),
+ )
@nox.session(python=ALL_VERSIONS)
-def py(session):
+def py(session: nox.sessions.Session) -> None:
"""Runs py.test for a sample using the specified version of Python."""
if session.python in TESTED_VERSIONS:
_session_tests(session)
else:
- session.skip("SKIPPED: {} tests are disabled for this sample.".format(
- session.python
- ))
+ session.skip(
+ "SKIPPED: {} tests are disabled for this sample.".format(session.python)
+ )
#
@@ -192,7 +242,7 @@ def py(session):
#
-def _get_repo_root():
+def _get_repo_root() -> Optional[str]:
""" Returns the root folder of the project. """
# Get root of this repository. Assume we don't have directories nested deeper than 10 items.
p = Path(os.getcwd())
@@ -201,6 +251,11 @@ def _get_repo_root():
break
if Path(p / ".git").exists():
return str(p)
+ # .git is not available in repos cloned via Cloud Build
+ # setup.py is always in the library's root, so use that instead
+ # https://github.com/googleapis/synthtool/issues/792
+ if Path(p / "setup.py").exists():
+ return str(p)
p = p.parent
raise Exception("Unable to detect repository root.")
@@ -210,7 +265,7 @@ def _get_repo_root():
@nox.session
@nox.parametrize("path", GENERATED_READMES)
-def readmegen(session, path):
+def readmegen(session: nox.sessions.Session, path: str) -> None:
"""(Re-)generates the readme for a sample."""
session.install("jinja2", "pyyaml")
dir_ = os.path.dirname(path)
diff --git a/samples/instanceadmin/requirements-test.txt b/samples/instanceadmin/requirements-test.txt
index 95ea1e6a0..4a46ff600 100644
--- a/samples/instanceadmin/requirements-test.txt
+++ b/samples/instanceadmin/requirements-test.txt
@@ -1 +1 @@
-pytest==6.2.4
+pytest==7.0.0
diff --git a/samples/instanceadmin/requirements.txt b/samples/instanceadmin/requirements.txt
index 807a82ce3..844169f7b 100644
--- a/samples/instanceadmin/requirements.txt
+++ b/samples/instanceadmin/requirements.txt
@@ -1,2 +1,2 @@
-google-cloud-bigtable==2.3.1
+google-cloud-bigtable==2.4.0
backoff==1.11.1
diff --git a/samples/instanceadmin/test_instanceadmin.py b/samples/instanceadmin/test_instanceadmin.py
index 929da10e4..b0041294b 100644
--- a/samples/instanceadmin/test_instanceadmin.py
+++ b/samples/instanceadmin/test_instanceadmin.py
@@ -97,17 +97,23 @@ def test_run_instance_operations(capsys, dispose_of):
def test_delete_instance(capsys, dispose_of):
- dispose_of(INSTANCE)
+ from concurrent.futures import TimeoutError
- # Can't delete it, it doesn't exist
- instanceadmin.delete_instance(PROJECT, INSTANCE)
- out = capsys.readouterr().out
- assert "Deleting instance" in out
- assert f"Instance {INSTANCE} does not exist" in out
+ @backoff.on_exception(backoff.expo, TimeoutError)
+ def _set_up_instance():
+ dispose_of(INSTANCE)
- # Ok, create it then
- instanceadmin.run_instance_operations(PROJECT, INSTANCE, CLUSTER1)
- capsys.readouterr() # throw away output
+ # Can't delete it, it doesn't exist
+ instanceadmin.delete_instance(PROJECT, INSTANCE)
+ out = capsys.readouterr().out
+ assert "Deleting instance" in out
+ assert f"Instance {INSTANCE} does not exist" in out
+
+ # Ok, create it then
+ instanceadmin.run_instance_operations(PROJECT, INSTANCE, CLUSTER1)
+ capsys.readouterr() # throw away output
+
+ _set_up_instance()
# Now delete it
instanceadmin.delete_instance(PROJECT, INSTANCE)
@@ -117,22 +123,29 @@ def test_delete_instance(capsys, dispose_of):
def test_add_and_delete_cluster(capsys, dispose_of):
- dispose_of(INSTANCE)
+ from concurrent.futures import TimeoutError
- # This won't work, because the instance isn't created yet
- instanceadmin.add_cluster(PROJECT, INSTANCE, CLUSTER2)
- out = capsys.readouterr().out
- assert f"Instance {INSTANCE} does not exist" in out
+ @backoff.on_exception(backoff.expo, TimeoutError)
+ def _set_up_instance():
+ dispose_of(INSTANCE)
- # Get the instance created
- instanceadmin.run_instance_operations(PROJECT, INSTANCE, CLUSTER1)
- capsys.readouterr() # throw away output
+ # This won't work, because the instance isn't created yet
+ instanceadmin.add_cluster(PROJECT, INSTANCE, CLUSTER2)
+ out = capsys.readouterr().out
+ assert f"Instance {INSTANCE} does not exist" in out
+
+ # Get the instance created
+ instanceadmin.run_instance_operations(PROJECT, INSTANCE, CLUSTER1)
+ capsys.readouterr() # throw away output
+
+ _set_up_instance()
# Add a cluster to that instance
# Avoid failing for "instance is currently being changed" by
# applying an exponential backoff
- w_backoff = backoff.on_exception(backoff.expo, exceptions.ServiceUnavailable)
- w_backoff(instanceadmin.add_cluster)(PROJECT, INSTANCE, CLUSTER2)
+ backoff_503 = backoff.on_exception(backoff.expo, exceptions.ServiceUnavailable)
+
+ backoff_503(instanceadmin.add_cluster)(PROJECT, INSTANCE, CLUSTER2)
out = capsys.readouterr().out
assert f"Adding cluster to instance {INSTANCE}" in out
assert "Listing clusters..." in out
diff --git a/samples/metricscaler/metricscaler.py b/samples/metricscaler/metricscaler.py
index 43b430859..d29e40a39 100644
--- a/samples/metricscaler/metricscaler.py
+++ b/samples/metricscaler/metricscaler.py
@@ -25,9 +25,9 @@
from google.cloud.bigtable import enums
from google.cloud.monitoring_v3 import query
-PROJECT = os.environ['GOOGLE_CLOUD_PROJECT']
+PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]
-logger = logging.getLogger('bigtable.metricscaler')
+logger = logging.getLogger("bigtable.metricscaler")
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
@@ -40,12 +40,15 @@ def get_cpu_load(bigtable_instance, bigtable_cluster):
"""
# [START bigtable_cpu]
client = monitoring_v3.MetricServiceClient()
- cpu_query = query.Query(client,
- project=PROJECT,
- metric_type='bigtable.googleapis.com/'
- 'cluster/cpu_load',
- minutes=5)
- cpu_query = cpu_query.select_resources(instance=bigtable_instance, cluster=bigtable_cluster)
+ cpu_query = query.Query(
+ client,
+ project=PROJECT,
+ metric_type="bigtable.googleapis.com/" "cluster/cpu_load",
+ minutes=5,
+ )
+ cpu_query = cpu_query.select_resources(
+ instance=bigtable_instance, cluster=bigtable_cluster
+ )
cpu = next(cpu_query.iter())
return cpu.points[0].value.double_value
# [END bigtable_cpu]
@@ -59,12 +62,15 @@ def get_storage_utilization(bigtable_instance, bigtable_cluster):
"""
# [START bigtable_metric_scaler_storage_utilization]
client = monitoring_v3.MetricServiceClient()
- utilization_query = query.Query(client,
- project=PROJECT,
- metric_type='bigtable.googleapis.com/'
- 'cluster/storage_utilization',
- minutes=5)
- utilization_query = utilization_query.select_resources(instance=bigtable_instance, cluster=bigtable_cluster)
+ utilization_query = query.Query(
+ client,
+ project=PROJECT,
+ metric_type="bigtable.googleapis.com/" "cluster/storage_utilization",
+ minutes=5,
+ )
+ utilization_query = utilization_query.select_resources(
+ instance=bigtable_instance, cluster=bigtable_cluster
+ )
utilization = next(utilization_query.iter())
return utilization.points[0].value.double_value
# [END bigtable_metric_scaler_storage_utilization]
@@ -114,20 +120,24 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
if scale_up:
if current_node_count < max_node_count:
- new_node_count = min(
- current_node_count + size_change_step, max_node_count)
+ new_node_count = min(current_node_count + size_change_step, max_node_count)
cluster.serve_nodes = new_node_count
cluster.update()
- logger.info('Scaled up from {} to {} nodes.'.format(
- current_node_count, new_node_count))
+ logger.info(
+ "Scaled up from {} to {} nodes.".format(
+ current_node_count, new_node_count
+ )
+ )
else:
if current_node_count > min_node_count:
- new_node_count = max(
- current_node_count - size_change_step, min_node_count)
+ new_node_count = max(current_node_count - size_change_step, min_node_count)
cluster.serve_nodes = new_node_count
cluster.update()
- logger.info('Scaled down from {} to {} nodes.'.format(
- current_node_count, new_node_count))
+ logger.info(
+ "Scaled down from {} to {} nodes.".format(
+ current_node_count, new_node_count
+ )
+ )
# [END bigtable_scale]
@@ -138,7 +148,7 @@ def main(
low_cpu_threshold,
high_storage_threshold,
short_sleep,
- long_sleep
+ long_sleep,
):
"""Main loop runner that autoscales Cloud Bigtable.
@@ -154,8 +164,8 @@ def main(
"""
cluster_cpu = get_cpu_load(bigtable_instance, bigtable_cluster)
cluster_storage = get_storage_utilization(bigtable_instance, bigtable_cluster)
- logger.info('Detected cpu of {}'.format(cluster_cpu))
- logger.info('Detected storage utilization of {}'.format(cluster_storage))
+ logger.info("Detected cpu of {}".format(cluster_cpu))
+ logger.info("Detected storage utilization of {}".format(cluster_storage))
try:
if cluster_cpu > high_cpu_threshold or cluster_storage > high_storage_threshold:
scale_bigtable(bigtable_instance, bigtable_cluster, True)
@@ -165,44 +175,50 @@ def main(
scale_bigtable(bigtable_instance, bigtable_cluster, False)
time.sleep(long_sleep)
else:
- logger.info('CPU within threshold, sleeping.')
+ logger.info("CPU within threshold, sleeping.")
time.sleep(short_sleep)
except Exception as e:
logger.error("Error during scaling: %s", e)
-if __name__ == '__main__':
+if __name__ == "__main__":
parser = argparse.ArgumentParser(
- description='Scales Cloud Bigtable clusters based on CPU usage.')
+ description="Scales Cloud Bigtable clusters based on CPU usage."
+ )
parser.add_argument(
- 'bigtable_instance',
- help='ID of the Cloud Bigtable instance to connect to.')
+ "bigtable_instance", help="ID of the Cloud Bigtable instance to connect to."
+ )
parser.add_argument(
- 'bigtable_cluster',
- help='ID of the Cloud Bigtable cluster to connect to.')
+ "bigtable_cluster", help="ID of the Cloud Bigtable cluster to connect to."
+ )
parser.add_argument(
- '--high_cpu_threshold',
- help='If Cloud Bigtable CPU usage is above this threshold, scale up',
- default=0.6)
+ "--high_cpu_threshold",
+ help="If Cloud Bigtable CPU usage is above this threshold, scale up",
+ default=0.6,
+ )
parser.add_argument(
- '--low_cpu_threshold',
- help='If Cloud Bigtable CPU usage is below this threshold, scale down',
- default=0.2)
+ "--low_cpu_threshold",
+ help="If Cloud Bigtable CPU usage is below this threshold, scale down",
+ default=0.2,
+ )
parser.add_argument(
- '--high_storage_threshold',
- help='If Cloud Bigtable storage utilization is above this threshold, '
- 'scale up',
- default=0.6)
+ "--high_storage_threshold",
+ help="If Cloud Bigtable storage utilization is above this threshold, "
+ "scale up",
+ default=0.6,
+ )
parser.add_argument(
- '--short_sleep',
- help='How long to sleep in seconds between checking metrics after no '
- 'scale operation',
- default=60)
+ "--short_sleep",
+ help="How long to sleep in seconds between checking metrics after no "
+ "scale operation",
+ default=60,
+ )
parser.add_argument(
- '--long_sleep',
- help='How long to sleep in seconds between checking metrics after a '
- 'scaling operation',
- default=60 * 10)
+ "--long_sleep",
+ help="How long to sleep in seconds between checking metrics after a "
+ "scaling operation",
+ default=60 * 10,
+ )
args = parser.parse_args()
while True:
@@ -213,4 +229,5 @@ def main(
float(args.low_cpu_threshold),
float(args.high_storage_threshold),
int(args.short_sleep),
- int(args.long_sleep))
+ int(args.long_sleep),
+ )
diff --git a/samples/metricscaler/metricscaler_test.py b/samples/metricscaler/metricscaler_test.py
index 219ec535e..4420605ec 100644
--- a/samples/metricscaler/metricscaler_test.py
+++ b/samples/metricscaler/metricscaler_test.py
@@ -23,6 +23,7 @@
import pytest
from test_utils.retry import RetryInstanceState
+from test_utils.retry import RetryResult
from metricscaler import get_cpu_load
from metricscaler import get_storage_utilization
@@ -30,10 +31,10 @@
from metricscaler import scale_bigtable
-PROJECT = os.environ['GOOGLE_CLOUD_PROJECT']
-BIGTABLE_ZONE = os.environ['BIGTABLE_ZONE']
+PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]
+BIGTABLE_ZONE = os.environ["BIGTABLE_ZONE"]
SIZE_CHANGE_STEP = 3
-INSTANCE_ID_FORMAT = 'metric-scale-test-{}'
+INSTANCE_ID_FORMAT = "metric-scale-test-{}"
BIGTABLE_INSTANCE = INSTANCE_ID_FORMAT.format(str(uuid.uuid4())[:10])
BIGTABLE_DEV_INSTANCE = INSTANCE_ID_FORMAT.format(str(uuid.uuid4())[:10])
@@ -41,14 +42,14 @@
# System tests to verify API calls succeed
-@patch('metricscaler.query')
+@patch("metricscaler.query")
def test_get_cpu_load(monitoring_v3_query):
iter_mock = monitoring_v3_query.Query().select_resources().iter
iter_mock.return_value = iter([Mock(points=[Mock(value=Mock(double_value=1.0))])])
assert float(get_cpu_load(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE)) > 0.0
-@patch('metricscaler.query')
+@patch("metricscaler.query")
def test_get_storage_utilization(monitoring_v3_query):
iter_mock = monitoring_v3_query.Query().select_resources().iter
iter_mock.return_value = iter([Mock(points=[Mock(value=Mock(double_value=1.0))])])
@@ -64,16 +65,24 @@ def instance():
serve_nodes = 1
storage_type = enums.StorageType.SSD
production = enums.Instance.Type.PRODUCTION
- labels = {'prod-label': 'prod-label'}
- instance = client.instance(BIGTABLE_INSTANCE, instance_type=production,
- labels=labels)
+ labels = {"prod-label": "prod-label"}
+ instance = client.instance(
+ BIGTABLE_INSTANCE, instance_type=production, labels=labels
+ )
if not instance.exists():
- cluster = instance.cluster(cluster_id, location_id=BIGTABLE_ZONE,
- serve_nodes=serve_nodes,
- default_storage_type=storage_type)
+ cluster = instance.cluster(
+ cluster_id,
+ location_id=BIGTABLE_ZONE,
+ serve_nodes=serve_nodes,
+ default_storage_type=storage_type,
+ )
instance.create(clusters=[cluster])
+ # Eventual consistency check
+ retry_found = RetryResult(bool)
+ retry_found(instance.exists)()
+
yield
instance.delete()
@@ -87,21 +96,36 @@ def dev_instance():
storage_type = enums.StorageType.SSD
development = enums.Instance.Type.DEVELOPMENT
- labels = {'dev-label': 'dev-label'}
- instance = client.instance(BIGTABLE_DEV_INSTANCE,
- instance_type=development,
- labels=labels)
+ labels = {"dev-label": "dev-label"}
+ instance = client.instance(
+ BIGTABLE_DEV_INSTANCE, instance_type=development, labels=labels
+ )
if not instance.exists():
- cluster = instance.cluster(cluster_id, location_id=BIGTABLE_ZONE,
- default_storage_type=storage_type)
+ cluster = instance.cluster(
+ cluster_id, location_id=BIGTABLE_ZONE, default_storage_type=storage_type
+ )
instance.create(clusters=[cluster])
+ # Eventual consistency check
+ retry_found = RetryResult(bool)
+ retry_found(instance.exists)()
+
yield
instance.delete()
+class ClusterNodeCountPredicate:
+ def __init__(self, expected_node_count):
+ self.expected_node_count = expected_node_count
+
+ def __call__(self, cluster):
+ expected = self.expected_node_count
+ print(f"Expected node count: {expected}; found: {cluster.serve_nodes}")
+ return cluster.serve_nodes == expected
+
+
def test_scale_bigtable(instance):
bigtable_client = bigtable.Client(admin=True)
@@ -120,17 +144,22 @@ def test_scale_bigtable(instance):
scale_bigtable(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, True)
- expected_count = original_node_count + SIZE_CHANGE_STEP
+ scaled_node_count_predicate = ClusterNodeCountPredicate(
+ original_node_count + SIZE_CHANGE_STEP
+ )
+ scaled_node_count_predicate.__name__ = "scaled_node_count_predicate"
_scaled_node_count = RetryInstanceState(
- instance_predicate=lambda c: c.serve_nodes == expected_count,
+ instance_predicate=scaled_node_count_predicate,
max_tries=10,
)
_scaled_node_count(cluster.reload)()
scale_bigtable(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, False)
+ restored_node_count_predicate = ClusterNodeCountPredicate(original_node_count)
+ restored_node_count_predicate.__name__ = "restored_node_count_predicate"
_restored_node_count = RetryInstanceState(
- instance_predicate=lambda c: c.serve_nodes == original_node_count,
+ instance_predicate=restored_node_count_predicate,
max_tries=10,
)
_restored_node_count(cluster.reload)()
@@ -141,10 +170,10 @@ def test_handle_dev_instance(capsys, dev_instance):
scale_bigtable(BIGTABLE_DEV_INSTANCE, BIGTABLE_DEV_INSTANCE, True)
-@patch('time.sleep')
-@patch('metricscaler.get_storage_utilization')
-@patch('metricscaler.get_cpu_load')
-@patch('metricscaler.scale_bigtable')
+@patch("time.sleep")
+@patch("metricscaler.get_storage_utilization")
+@patch("metricscaler.get_cpu_load")
+@patch("metricscaler.scale_bigtable")
def test_main(scale_bigtable, get_cpu_load, get_storage_utilization, sleep):
SHORT_SLEEP = 5
LONG_SLEEP = 10
@@ -153,57 +182,46 @@ def test_main(scale_bigtable, get_cpu_load, get_storage_utilization, sleep):
get_cpu_load.return_value = 0.5
get_storage_utilization.return_value = 0.5
- main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP,
- LONG_SLEEP)
+ main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, LONG_SLEEP)
scale_bigtable.assert_not_called()
scale_bigtable.reset_mock()
# Test high CPU, okay storage utilization
get_cpu_load.return_value = 0.7
get_storage_utilization.return_value = 0.5
- main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP,
- LONG_SLEEP)
- scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE,
- BIGTABLE_INSTANCE, True)
+ main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, LONG_SLEEP)
+ scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, True)
scale_bigtable.reset_mock()
# Test low CPU, okay storage utilization
get_storage_utilization.return_value = 0.5
get_cpu_load.return_value = 0.2
- main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP,
- LONG_SLEEP)
- scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE,
- BIGTABLE_INSTANCE, False)
+ main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, LONG_SLEEP)
+ scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, False)
scale_bigtable.reset_mock()
# Test okay CPU, high storage utilization
get_cpu_load.return_value = 0.5
get_storage_utilization.return_value = 0.7
- main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP,
- LONG_SLEEP)
- scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE,
- BIGTABLE_INSTANCE, True)
+ main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, LONG_SLEEP)
+ scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, True)
scale_bigtable.reset_mock()
# Test high CPU, high storage utilization
get_cpu_load.return_value = 0.7
get_storage_utilization.return_value = 0.7
- main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP,
- LONG_SLEEP)
- scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE,
- BIGTABLE_INSTANCE, True)
+ main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, LONG_SLEEP)
+ scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, True)
scale_bigtable.reset_mock()
# Test low CPU, high storage utilization
get_cpu_load.return_value = 0.2
get_storage_utilization.return_value = 0.7
- main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP,
- LONG_SLEEP)
- scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE,
- BIGTABLE_INSTANCE, True)
+ main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, LONG_SLEEP)
+ scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, True)
scale_bigtable.reset_mock()
-if __name__ == '__main__':
+if __name__ == "__main__":
test_get_cpu_load()
diff --git a/samples/metricscaler/noxfile.py b/samples/metricscaler/noxfile.py
index ba55d7ce5..20cdfc620 100644
--- a/samples/metricscaler/noxfile.py
+++ b/samples/metricscaler/noxfile.py
@@ -14,9 +14,11 @@
from __future__ import print_function
+import glob
import os
from pathlib import Path
import sys
+from typing import Callable, Dict, List, Optional
import nox
@@ -27,8 +29,9 @@
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING
-# Copy `noxfile_config.py` to your directory and modify it instead.
+BLACK_VERSION = "black==19.10b0"
+# Copy `noxfile_config.py` to your directory and modify it instead.
# `TEST_CONFIG` dict is a configuration hook that allows users to
# modify the test configurations. The values here should be in sync
@@ -37,24 +40,29 @@
TEST_CONFIG = {
# You can opt out from the test for specific Python versions.
- 'ignored_versions': ["2.7"],
-
+ "ignored_versions": [],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ "enforce_type_hints": False,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
- 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT',
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
-
+ # If you need to use a specific version of pip,
+ # change pip_version_override to the string representation
+ # of the version number, for example, "20.2.4"
+ "pip_version_override": None,
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
- 'envs': {},
+ "envs": {},
}
try:
# Ensure we can import noxfile_config in the project's directory.
- sys.path.append('.')
+ sys.path.append(".")
from noxfile_config import TEST_CONFIG_OVERRIDE
except ImportError as e:
print("No user noxfile_config found: detail: {}".format(e))
@@ -64,36 +72,43 @@
TEST_CONFIG.update(TEST_CONFIG_OVERRIDE)
-def get_pytest_env_vars():
+def get_pytest_env_vars() -> Dict[str, str]:
"""Returns a dict for pytest invocation."""
ret = {}
# Override the GCLOUD_PROJECT and the alias.
- env_key = TEST_CONFIG['gcloud_project_env']
+ env_key = TEST_CONFIG["gcloud_project_env"]
# This should error out if not set.
- ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key]
+ ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key]
# Apply user supplied envs.
- ret.update(TEST_CONFIG['envs'])
+ ret.update(TEST_CONFIG["envs"])
return ret
# DO NOT EDIT - automatically generated.
-# All versions used to tested samples.
-ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"]
+# All versions used to test samples.
+ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"]
# Any default versions that should be ignored.
-IGNORED_VERSIONS = TEST_CONFIG['ignored_versions']
+IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS])
-INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False))
+INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in (
+ "True",
+ "true",
+)
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
#
# Style Checks
#
-def _determine_local_import_names(start_dir):
+def _determine_local_import_names(start_dir: str) -> List[str]:
"""Determines all import names that should be considered "local".
This is used when running the linter to insure that import order is
@@ -131,60 +146,95 @@ def _determine_local_import_names(start_dir):
@nox.session
-def lint(session):
- session.install("flake8", "flake8-import-order")
+def lint(session: nox.sessions.Session) -> None:
+ if not TEST_CONFIG["enforce_type_hints"]:
+ session.install("flake8", "flake8-import-order")
+ else:
+ session.install("flake8", "flake8-import-order", "flake8-annotations")
local_names = _determine_local_import_names(".")
args = FLAKE8_COMMON_ARGS + [
"--application-import-names",
",".join(local_names),
- "."
+ ".",
]
session.run("flake8", *args)
#
-# Sample Tests
+# Black
#
-PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
+@nox.session
+def blacken(session: nox.sessions.Session) -> None:
+ session.install(BLACK_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+ session.run("black", *python_files)
-def _session_tests(session, post_install=None):
- """Runs py.test for a particular project."""
- if os.path.exists("requirements.txt"):
- session.install("-r", "requirements.txt")
- if os.path.exists("requirements-test.txt"):
- session.install("-r", "requirements-test.txt")
+#
+# Sample Tests
+#
- if INSTALL_LIBRARY_FROM_SOURCE:
- session.install("-e", _get_repo_root())
- if post_install:
- post_install(session)
+PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
- session.run(
- "pytest",
- *(PYTEST_COMMON_ARGS + session.posargs),
- # Pytest will return 5 when no tests are collected. This can happen
- # on travis where slow and flaky tests are excluded.
- # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
- success_codes=[0, 5],
- env=get_pytest_env_vars()
- )
+
+def _session_tests(
+ session: nox.sessions.Session, post_install: Callable = None
+) -> None:
+ # check for presence of tests
+ test_list = glob.glob("*_test.py") + glob.glob("test_*.py")
+ test_list.extend(glob.glob("tests"))
+ if len(test_list) == 0:
+ print("No tests found, skipping directory.")
+ else:
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
+ """Runs py.test for a particular project."""
+ if os.path.exists("requirements.txt"):
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
+
+ if os.path.exists("requirements-test.txt"):
+ if os.path.exists("constraints-test.txt"):
+ session.install(
+ "-r", "requirements-test.txt", "-c", "constraints-test.txt"
+ )
+ else:
+ session.install("-r", "requirements-test.txt")
+
+ if INSTALL_LIBRARY_FROM_SOURCE:
+ session.install("-e", _get_repo_root())
+
+ if post_install:
+ post_install(session)
+
+ session.run(
+ "pytest",
+ *(PYTEST_COMMON_ARGS + session.posargs),
+ # Pytest will return 5 when no tests are collected. This can happen
+ # on travis where slow and flaky tests are excluded.
+ # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
+ success_codes=[0, 5],
+ env=get_pytest_env_vars(),
+ )
@nox.session(python=ALL_VERSIONS)
-def py(session):
+def py(session: nox.sessions.Session) -> None:
"""Runs py.test for a sample using the specified version of Python."""
if session.python in TESTED_VERSIONS:
_session_tests(session)
else:
- session.skip("SKIPPED: {} tests are disabled for this sample.".format(
- session.python
- ))
+ session.skip(
+ "SKIPPED: {} tests are disabled for this sample.".format(session.python)
+ )
#
@@ -192,7 +242,7 @@ def py(session):
#
-def _get_repo_root():
+def _get_repo_root() -> Optional[str]:
""" Returns the root folder of the project. """
# Get root of this repository. Assume we don't have directories nested deeper than 10 items.
p = Path(os.getcwd())
@@ -201,6 +251,11 @@ def _get_repo_root():
break
if Path(p / ".git").exists():
return str(p)
+ # .git is not available in repos cloned via Cloud Build
+ # setup.py is always in the library's root, so use that instead
+ # https://github.com/googleapis/synthtool/issues/792
+ if Path(p / "setup.py").exists():
+ return str(p)
p = p.parent
raise Exception("Unable to detect repository root.")
@@ -210,7 +265,7 @@ def _get_repo_root():
@nox.session
@nox.parametrize("path", GENERATED_READMES)
-def readmegen(session, path):
+def readmegen(session: nox.sessions.Session, path: str) -> None:
"""(Re-)generates the readme for a sample."""
session.install("jinja2", "pyyaml")
dir_ = os.path.dirname(path)
diff --git a/samples/metricscaler/requirements-test.txt b/samples/metricscaler/requirements-test.txt
index 7903fa1e1..2d5a435bd 100644
--- a/samples/metricscaler/requirements-test.txt
+++ b/samples/metricscaler/requirements-test.txt
@@ -1,3 +1,3 @@
-pytest==6.2.4
+pytest==7.0.0
mock==4.0.3
google-cloud-testutils
diff --git a/samples/metricscaler/requirements.txt b/samples/metricscaler/requirements.txt
index 8a9f48af8..2e1843a99 100644
--- a/samples/metricscaler/requirements.txt
+++ b/samples/metricscaler/requirements.txt
@@ -1,2 +1,2 @@
-google-cloud-bigtable==2.3.1
-google-cloud-monitoring==2.4.2
+google-cloud-bigtable==2.4.0
+google-cloud-monitoring==2.8.0
diff --git a/samples/quickstart/main.py b/samples/quickstart/main.py
index 3763296f1..50bfe6394 100644
--- a/samples/quickstart/main.py
+++ b/samples/quickstart/main.py
@@ -20,8 +20,7 @@
from google.cloud import bigtable
-def main(project_id="project-id", instance_id="instance-id",
- table_id="my-table"):
+def main(project_id="project-id", instance_id="instance-id", table_id="my-table"):
# Create a Cloud Bigtable client.
client = bigtable.Client(project=project_id)
@@ -31,27 +30,27 @@ def main(project_id="project-id", instance_id="instance-id",
# Open an existing table.
table = instance.table(table_id)
- row_key = 'r1'
- row = table.read_row(row_key.encode('utf-8'))
+ row_key = "r1"
+ row = table.read_row(row_key.encode("utf-8"))
- column_family_id = 'cf1'
- column_id = 'c1'.encode('utf-8')
- value = row.cells[column_family_id][column_id][0].value.decode('utf-8')
+ column_family_id = "cf1"
+ column_id = "c1".encode("utf-8")
+ value = row.cells[column_family_id][column_id][0].value.decode("utf-8")
- print('Row key: {}\nData: {}'.format(row_key, value))
+ print("Row key: {}\nData: {}".format(row_key, value))
-if __name__ == '__main__':
+if __name__ == "__main__":
parser = argparse.ArgumentParser(
- description=__doc__,
- formatter_class=argparse.ArgumentDefaultsHelpFormatter)
- parser.add_argument('project_id', help='Your Cloud Platform project ID.')
+ description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+ parser.add_argument("project_id", help="Your Cloud Platform project ID.")
parser.add_argument(
- 'instance_id', help='ID of the Cloud Bigtable instance to connect to.')
+ "instance_id", help="ID of the Cloud Bigtable instance to connect to."
+ )
parser.add_argument(
- '--table',
- help='Existing table used in the quickstart.',
- default='my-table')
+ "--table", help="Existing table used in the quickstart.", default="my-table"
+ )
args = parser.parse_args()
main(args.project_id, args.instance_id, args.table)
diff --git a/samples/quickstart/main_test.py b/samples/quickstart/main_test.py
index ea1e8776b..46d578b6b 100644
--- a/samples/quickstart/main_test.py
+++ b/samples/quickstart/main_test.py
@@ -21,9 +21,9 @@
from main import main
-PROJECT = os.environ['GOOGLE_CLOUD_PROJECT']
-BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE']
-TABLE_ID_FORMAT = 'quickstart-test-{}'
+PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]
+BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"]
+TABLE_ID_FORMAT = "quickstart-test-{}"
@pytest.fixture()
@@ -32,7 +32,7 @@ def table():
client = bigtable.Client(project=PROJECT, admin=True)
instance = client.instance(BIGTABLE_INSTANCE)
table = instance.table(table_id)
- column_family_id = 'cf1'
+ column_family_id = "cf1"
column_families = {column_family_id: None}
table.create(column_families=column_families)
@@ -50,4 +50,4 @@ def test_main(capsys, table):
main(PROJECT, BIGTABLE_INSTANCE, table_id)
out, _ = capsys.readouterr()
- assert 'Row key: r1\nData: test-value\n' in out
+ assert "Row key: r1\nData: test-value\n" in out
diff --git a/samples/quickstart/noxfile.py b/samples/quickstart/noxfile.py
index ba55d7ce5..20cdfc620 100644
--- a/samples/quickstart/noxfile.py
+++ b/samples/quickstart/noxfile.py
@@ -14,9 +14,11 @@
from __future__ import print_function
+import glob
import os
from pathlib import Path
import sys
+from typing import Callable, Dict, List, Optional
import nox
@@ -27,8 +29,9 @@
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING
-# Copy `noxfile_config.py` to your directory and modify it instead.
+BLACK_VERSION = "black==19.10b0"
+# Copy `noxfile_config.py` to your directory and modify it instead.
# `TEST_CONFIG` dict is a configuration hook that allows users to
# modify the test configurations. The values here should be in sync
@@ -37,24 +40,29 @@
TEST_CONFIG = {
# You can opt out from the test for specific Python versions.
- 'ignored_versions': ["2.7"],
-
+ "ignored_versions": [],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ "enforce_type_hints": False,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
- 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT',
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
-
+ # If you need to use a specific version of pip,
+ # change pip_version_override to the string representation
+ # of the version number, for example, "20.2.4"
+ "pip_version_override": None,
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
- 'envs': {},
+ "envs": {},
}
try:
# Ensure we can import noxfile_config in the project's directory.
- sys.path.append('.')
+ sys.path.append(".")
from noxfile_config import TEST_CONFIG_OVERRIDE
except ImportError as e:
print("No user noxfile_config found: detail: {}".format(e))
@@ -64,36 +72,43 @@
TEST_CONFIG.update(TEST_CONFIG_OVERRIDE)
-def get_pytest_env_vars():
+def get_pytest_env_vars() -> Dict[str, str]:
"""Returns a dict for pytest invocation."""
ret = {}
# Override the GCLOUD_PROJECT and the alias.
- env_key = TEST_CONFIG['gcloud_project_env']
+ env_key = TEST_CONFIG["gcloud_project_env"]
# This should error out if not set.
- ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key]
+ ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key]
# Apply user supplied envs.
- ret.update(TEST_CONFIG['envs'])
+ ret.update(TEST_CONFIG["envs"])
return ret
# DO NOT EDIT - automatically generated.
-# All versions used to tested samples.
-ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"]
+# All versions used to test samples.
+ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"]
# Any default versions that should be ignored.
-IGNORED_VERSIONS = TEST_CONFIG['ignored_versions']
+IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS])
-INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False))
+INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in (
+ "True",
+ "true",
+)
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
#
# Style Checks
#
-def _determine_local_import_names(start_dir):
+def _determine_local_import_names(start_dir: str) -> List[str]:
"""Determines all import names that should be considered "local".
This is used when running the linter to insure that import order is
@@ -131,60 +146,95 @@ def _determine_local_import_names(start_dir):
@nox.session
-def lint(session):
- session.install("flake8", "flake8-import-order")
+def lint(session: nox.sessions.Session) -> None:
+ if not TEST_CONFIG["enforce_type_hints"]:
+ session.install("flake8", "flake8-import-order")
+ else:
+ session.install("flake8", "flake8-import-order", "flake8-annotations")
local_names = _determine_local_import_names(".")
args = FLAKE8_COMMON_ARGS + [
"--application-import-names",
",".join(local_names),
- "."
+ ".",
]
session.run("flake8", *args)
#
-# Sample Tests
+# Black
#
-PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
+@nox.session
+def blacken(session: nox.sessions.Session) -> None:
+ session.install(BLACK_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+ session.run("black", *python_files)
-def _session_tests(session, post_install=None):
- """Runs py.test for a particular project."""
- if os.path.exists("requirements.txt"):
- session.install("-r", "requirements.txt")
- if os.path.exists("requirements-test.txt"):
- session.install("-r", "requirements-test.txt")
+#
+# Sample Tests
+#
- if INSTALL_LIBRARY_FROM_SOURCE:
- session.install("-e", _get_repo_root())
- if post_install:
- post_install(session)
+PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
- session.run(
- "pytest",
- *(PYTEST_COMMON_ARGS + session.posargs),
- # Pytest will return 5 when no tests are collected. This can happen
- # on travis where slow and flaky tests are excluded.
- # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
- success_codes=[0, 5],
- env=get_pytest_env_vars()
- )
+
+def _session_tests(
+ session: nox.sessions.Session, post_install: Callable = None
+) -> None:
+ # check for presence of tests
+ test_list = glob.glob("*_test.py") + glob.glob("test_*.py")
+ test_list.extend(glob.glob("tests"))
+ if len(test_list) == 0:
+ print("No tests found, skipping directory.")
+ else:
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
+ """Runs py.test for a particular project."""
+ if os.path.exists("requirements.txt"):
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
+
+ if os.path.exists("requirements-test.txt"):
+ if os.path.exists("constraints-test.txt"):
+ session.install(
+ "-r", "requirements-test.txt", "-c", "constraints-test.txt"
+ )
+ else:
+ session.install("-r", "requirements-test.txt")
+
+ if INSTALL_LIBRARY_FROM_SOURCE:
+ session.install("-e", _get_repo_root())
+
+ if post_install:
+ post_install(session)
+
+ session.run(
+ "pytest",
+ *(PYTEST_COMMON_ARGS + session.posargs),
+ # Pytest will return 5 when no tests are collected. This can happen
+ # on travis where slow and flaky tests are excluded.
+ # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
+ success_codes=[0, 5],
+ env=get_pytest_env_vars(),
+ )
@nox.session(python=ALL_VERSIONS)
-def py(session):
+def py(session: nox.sessions.Session) -> None:
"""Runs py.test for a sample using the specified version of Python."""
if session.python in TESTED_VERSIONS:
_session_tests(session)
else:
- session.skip("SKIPPED: {} tests are disabled for this sample.".format(
- session.python
- ))
+ session.skip(
+ "SKIPPED: {} tests are disabled for this sample.".format(session.python)
+ )
#
@@ -192,7 +242,7 @@ def py(session):
#
-def _get_repo_root():
+def _get_repo_root() -> Optional[str]:
""" Returns the root folder of the project. """
# Get root of this repository. Assume we don't have directories nested deeper than 10 items.
p = Path(os.getcwd())
@@ -201,6 +251,11 @@ def _get_repo_root():
break
if Path(p / ".git").exists():
return str(p)
+ # .git is not available in repos cloned via Cloud Build
+ # setup.py is always in the library's root, so use that instead
+ # https://github.com/googleapis/synthtool/issues/792
+ if Path(p / "setup.py").exists():
+ return str(p)
p = p.parent
raise Exception("Unable to detect repository root.")
@@ -210,7 +265,7 @@ def _get_repo_root():
@nox.session
@nox.parametrize("path", GENERATED_READMES)
-def readmegen(session, path):
+def readmegen(session: nox.sessions.Session, path: str) -> None:
"""(Re-)generates the readme for a sample."""
session.install("jinja2", "pyyaml")
dir_ = os.path.dirname(path)
diff --git a/samples/quickstart/requirements-test.txt b/samples/quickstart/requirements-test.txt
index 95ea1e6a0..4a46ff600 100644
--- a/samples/quickstart/requirements-test.txt
+++ b/samples/quickstart/requirements-test.txt
@@ -1 +1 @@
-pytest==6.2.4
+pytest==7.0.0
diff --git a/samples/quickstart/requirements.txt b/samples/quickstart/requirements.txt
index 5197d54ba..73d64741d 100644
--- a/samples/quickstart/requirements.txt
+++ b/samples/quickstart/requirements.txt
@@ -1 +1 @@
-google-cloud-bigtable==2.3.1
+google-cloud-bigtable==2.4.0
diff --git a/samples/quickstart_happybase/main.py b/samples/quickstart_happybase/main.py
index 056e3666b..6a05c4cbd 100644
--- a/samples/quickstart_happybase/main.py
+++ b/samples/quickstart_happybase/main.py
@@ -20,8 +20,7 @@
from google.cloud import happybase
-def main(project_id="project-id", instance_id="instance-id",
- table_id="my-table"):
+def main(project_id="project-id", instance_id="instance-id", table_id="my-table"):
# Creates a Bigtable client
client = bigtable.Client(project=project_id)
@@ -34,28 +33,28 @@ def main(project_id="project-id", instance_id="instance-id",
# Connect to an existing table:my-table
table = connection.table(table_id)
- key = 'r1'
- row = table.row(key.encode('utf-8'))
+ key = "r1"
+ row = table.row(key.encode("utf-8"))
- column = 'cf1:c1'.encode('utf-8')
- value = row[column].decode('utf-8')
- print('Row key: {}\nData: {}'.format(key, value))
+ column = "cf1:c1".encode("utf-8")
+ value = row[column].decode("utf-8")
+ print("Row key: {}\nData: {}".format(key, value))
finally:
connection.close()
-if __name__ == '__main__':
+if __name__ == "__main__":
parser = argparse.ArgumentParser(
- description=__doc__,
- formatter_class=argparse.ArgumentDefaultsHelpFormatter)
- parser.add_argument('project_id', help='Your Cloud Platform project ID.')
+ description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+ parser.add_argument("project_id", help="Your Cloud Platform project ID.")
parser.add_argument(
- 'instance_id', help='ID of the Cloud Bigtable instance to connect to.')
+ "instance_id", help="ID of the Cloud Bigtable instance to connect to."
+ )
parser.add_argument(
- '--table',
- help='Existing table used in the quickstart.',
- default='my-table')
+ "--table", help="Existing table used in the quickstart.", default="my-table"
+ )
args = parser.parse_args()
main(args.project_id, args.instance_id, args.table)
diff --git a/samples/quickstart_happybase/main_test.py b/samples/quickstart_happybase/main_test.py
index 26afa6d6b..dc62ebede 100644
--- a/samples/quickstart_happybase/main_test.py
+++ b/samples/quickstart_happybase/main_test.py
@@ -21,9 +21,9 @@
from main import main
-PROJECT = os.environ['GOOGLE_CLOUD_PROJECT']
-BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE']
-TABLE_ID_FORMAT = 'quickstart-hb-test-{}'
+PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]
+BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"]
+TABLE_ID_FORMAT = "quickstart-hb-test-{}"
@pytest.fixture()
@@ -32,7 +32,7 @@ def table():
client = bigtable.Client(project=PROJECT, admin=True)
instance = client.instance(BIGTABLE_INSTANCE)
table = instance.table(table_id)
- column_family_id = 'cf1'
+ column_family_id = "cf1"
column_families = {column_family_id: None}
table.create(column_families=column_families)
@@ -50,4 +50,4 @@ def test_main(capsys, table):
main(PROJECT, BIGTABLE_INSTANCE, table_id)
out, _ = capsys.readouterr()
- assert 'Row key: r1\nData: test-value\n' in out
+ assert "Row key: r1\nData: test-value\n" in out
diff --git a/samples/quickstart_happybase/noxfile.py b/samples/quickstart_happybase/noxfile.py
index ba55d7ce5..20cdfc620 100644
--- a/samples/quickstart_happybase/noxfile.py
+++ b/samples/quickstart_happybase/noxfile.py
@@ -14,9 +14,11 @@
from __future__ import print_function
+import glob
import os
from pathlib import Path
import sys
+from typing import Callable, Dict, List, Optional
import nox
@@ -27,8 +29,9 @@
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING
-# Copy `noxfile_config.py` to your directory and modify it instead.
+BLACK_VERSION = "black==19.10b0"
+# Copy `noxfile_config.py` to your directory and modify it instead.
# `TEST_CONFIG` dict is a configuration hook that allows users to
# modify the test configurations. The values here should be in sync
@@ -37,24 +40,29 @@
TEST_CONFIG = {
# You can opt out from the test for specific Python versions.
- 'ignored_versions': ["2.7"],
-
+ "ignored_versions": [],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ "enforce_type_hints": False,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
- 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT',
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
-
+ # If you need to use a specific version of pip,
+ # change pip_version_override to the string representation
+ # of the version number, for example, "20.2.4"
+ "pip_version_override": None,
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
- 'envs': {},
+ "envs": {},
}
try:
# Ensure we can import noxfile_config in the project's directory.
- sys.path.append('.')
+ sys.path.append(".")
from noxfile_config import TEST_CONFIG_OVERRIDE
except ImportError as e:
print("No user noxfile_config found: detail: {}".format(e))
@@ -64,36 +72,43 @@
TEST_CONFIG.update(TEST_CONFIG_OVERRIDE)
-def get_pytest_env_vars():
+def get_pytest_env_vars() -> Dict[str, str]:
"""Returns a dict for pytest invocation."""
ret = {}
# Override the GCLOUD_PROJECT and the alias.
- env_key = TEST_CONFIG['gcloud_project_env']
+ env_key = TEST_CONFIG["gcloud_project_env"]
# This should error out if not set.
- ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key]
+ ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key]
# Apply user supplied envs.
- ret.update(TEST_CONFIG['envs'])
+ ret.update(TEST_CONFIG["envs"])
return ret
# DO NOT EDIT - automatically generated.
-# All versions used to tested samples.
-ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"]
+# All versions used to test samples.
+ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"]
# Any default versions that should be ignored.
-IGNORED_VERSIONS = TEST_CONFIG['ignored_versions']
+IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS])
-INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False))
+INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in (
+ "True",
+ "true",
+)
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
#
# Style Checks
#
-def _determine_local_import_names(start_dir):
+def _determine_local_import_names(start_dir: str) -> List[str]:
"""Determines all import names that should be considered "local".
This is used when running the linter to insure that import order is
@@ -131,60 +146,95 @@ def _determine_local_import_names(start_dir):
@nox.session
-def lint(session):
- session.install("flake8", "flake8-import-order")
+def lint(session: nox.sessions.Session) -> None:
+ if not TEST_CONFIG["enforce_type_hints"]:
+ session.install("flake8", "flake8-import-order")
+ else:
+ session.install("flake8", "flake8-import-order", "flake8-annotations")
local_names = _determine_local_import_names(".")
args = FLAKE8_COMMON_ARGS + [
"--application-import-names",
",".join(local_names),
- "."
+ ".",
]
session.run("flake8", *args)
#
-# Sample Tests
+# Black
#
-PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
+@nox.session
+def blacken(session: nox.sessions.Session) -> None:
+ session.install(BLACK_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+ session.run("black", *python_files)
-def _session_tests(session, post_install=None):
- """Runs py.test for a particular project."""
- if os.path.exists("requirements.txt"):
- session.install("-r", "requirements.txt")
- if os.path.exists("requirements-test.txt"):
- session.install("-r", "requirements-test.txt")
+#
+# Sample Tests
+#
- if INSTALL_LIBRARY_FROM_SOURCE:
- session.install("-e", _get_repo_root())
- if post_install:
- post_install(session)
+PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
- session.run(
- "pytest",
- *(PYTEST_COMMON_ARGS + session.posargs),
- # Pytest will return 5 when no tests are collected. This can happen
- # on travis where slow and flaky tests are excluded.
- # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
- success_codes=[0, 5],
- env=get_pytest_env_vars()
- )
+
+def _session_tests(
+ session: nox.sessions.Session, post_install: Callable = None
+) -> None:
+ # check for presence of tests
+ test_list = glob.glob("*_test.py") + glob.glob("test_*.py")
+ test_list.extend(glob.glob("tests"))
+ if len(test_list) == 0:
+ print("No tests found, skipping directory.")
+ else:
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
+ """Runs py.test for a particular project."""
+ if os.path.exists("requirements.txt"):
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
+
+ if os.path.exists("requirements-test.txt"):
+ if os.path.exists("constraints-test.txt"):
+ session.install(
+ "-r", "requirements-test.txt", "-c", "constraints-test.txt"
+ )
+ else:
+ session.install("-r", "requirements-test.txt")
+
+ if INSTALL_LIBRARY_FROM_SOURCE:
+ session.install("-e", _get_repo_root())
+
+ if post_install:
+ post_install(session)
+
+ session.run(
+ "pytest",
+ *(PYTEST_COMMON_ARGS + session.posargs),
+ # Pytest will return 5 when no tests are collected. This can happen
+ # on travis where slow and flaky tests are excluded.
+ # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
+ success_codes=[0, 5],
+ env=get_pytest_env_vars(),
+ )
@nox.session(python=ALL_VERSIONS)
-def py(session):
+def py(session: nox.sessions.Session) -> None:
"""Runs py.test for a sample using the specified version of Python."""
if session.python in TESTED_VERSIONS:
_session_tests(session)
else:
- session.skip("SKIPPED: {} tests are disabled for this sample.".format(
- session.python
- ))
+ session.skip(
+ "SKIPPED: {} tests are disabled for this sample.".format(session.python)
+ )
#
@@ -192,7 +242,7 @@ def py(session):
#
-def _get_repo_root():
+def _get_repo_root() -> Optional[str]:
""" Returns the root folder of the project. """
# Get root of this repository. Assume we don't have directories nested deeper than 10 items.
p = Path(os.getcwd())
@@ -201,6 +251,11 @@ def _get_repo_root():
break
if Path(p / ".git").exists():
return str(p)
+ # .git is not available in repos cloned via Cloud Build
+ # setup.py is always in the library's root, so use that instead
+ # https://github.com/googleapis/synthtool/issues/792
+ if Path(p / "setup.py").exists():
+ return str(p)
p = p.parent
raise Exception("Unable to detect repository root.")
@@ -210,7 +265,7 @@ def _get_repo_root():
@nox.session
@nox.parametrize("path", GENERATED_READMES)
-def readmegen(session, path):
+def readmegen(session: nox.sessions.Session, path: str) -> None:
"""(Re-)generates the readme for a sample."""
session.install("jinja2", "pyyaml")
dir_ = os.path.dirname(path)
diff --git a/samples/quickstart_happybase/requirements-test.txt b/samples/quickstart_happybase/requirements-test.txt
index 95ea1e6a0..4a46ff600 100644
--- a/samples/quickstart_happybase/requirements-test.txt
+++ b/samples/quickstart_happybase/requirements-test.txt
@@ -1 +1 @@
-pytest==6.2.4
+pytest==7.0.0
diff --git a/samples/snippets/filters/filter_snippets.py b/samples/snippets/filters/filter_snippets.py
index c815eae99..4211378f3 100644
--- a/samples/snippets/filters/filter_snippets.py
+++ b/samples/snippets/filters/filter_snippets.py
@@ -29,7 +29,7 @@ def filter_limit_row_sample(project_id, instance_id, table_id):
instance = client.instance(instance_id)
table = instance.table(table_id)
- rows = table.read_rows(filter_=row_filters.RowSampleFilter(.75))
+ rows = table.read_rows(filter_=row_filters.RowSampleFilter(0.75))
for row in rows:
print_row(row)
@@ -42,7 +42,8 @@ def filter_limit_row_regex(project_id, instance_id, table_id):
table = instance.table(table_id)
rows = table.read_rows(
- filter_=row_filters.RowKeyRegexFilter(".*#20190501$".encode("utf-8")))
+ filter_=row_filters.RowKeyRegexFilter(".*#20190501$".encode("utf-8"))
+ )
for row in rows:
print_row(row)
@@ -91,7 +92,8 @@ def filter_limit_col_family_regex(project_id, instance_id, table_id):
table = instance.table(table_id)
rows = table.read_rows(
- filter_=row_filters.FamilyNameRegexFilter("stats_.*$".encode("utf-8")))
+ filter_=row_filters.FamilyNameRegexFilter("stats_.*$".encode("utf-8"))
+ )
for row in rows:
print_row(row)
@@ -104,8 +106,8 @@ def filter_limit_col_qualifier_regex(project_id, instance_id, table_id):
table = instance.table(table_id)
rows = table.read_rows(
- filter_=row_filters.ColumnQualifierRegexFilter(
- "connected_.*$".encode("utf-8")))
+ filter_=row_filters.ColumnQualifierRegexFilter("connected_.*$".encode("utf-8"))
+ )
for row in rows:
print_row(row)
@@ -118,10 +120,10 @@ def filter_limit_col_range(project_id, instance_id, table_id):
table = instance.table(table_id)
rows = table.read_rows(
- filter_=row_filters.ColumnRangeFilter("cell_plan",
- b"data_plan_01gb",
- b"data_plan_10gb",
- inclusive_end=False))
+ filter_=row_filters.ColumnRangeFilter(
+ "cell_plan", b"data_plan_01gb", b"data_plan_10gb", inclusive_end=False
+ )
+ )
for row in rows:
print_row(row)
@@ -134,7 +136,8 @@ def filter_limit_value_range(project_id, instance_id, table_id):
table = instance.table(table_id)
rows = table.read_rows(
- filter_=row_filters.ValueRangeFilter(b"PQ2A.190405", b"PQ2A.190406"))
+ filter_=row_filters.ValueRangeFilter(b"PQ2A.190405", b"PQ2A.190406")
+ )
for row in rows:
print_row(row)
@@ -150,7 +153,8 @@ def filter_limit_value_regex(project_id, instance_id, table_id):
table = instance.table(table_id)
rows = table.read_rows(
- filter_=row_filters.ValueRegexFilter("PQ2A.*$".encode("utf-8")))
+ filter_=row_filters.ValueRegexFilter("PQ2A.*$".encode("utf-8"))
+ )
for row in rows:
print_row(row)
@@ -165,8 +169,8 @@ def filter_limit_timestamp_range(project_id, instance_id, table_id):
end = datetime.datetime(2019, 5, 1)
rows = table.read_rows(
- filter_=row_filters.TimestampRangeFilter(
- row_filters.TimestampRange(end=end)))
+ filter_=row_filters.TimestampRangeFilter(row_filters.TimestampRange(end=end))
+ )
for row in rows:
print_row(row)
@@ -202,8 +206,7 @@ def filter_modify_strip_value(project_id, instance_id, table_id):
instance = client.instance(instance_id)
table = instance.table(table_id)
- rows = table.read_rows(
- filter_=row_filters.StripValueTransformerFilter(True))
+ rows = table.read_rows(filter_=row_filters.StripValueTransformerFilter(True))
for row in rows:
print_row(row)
@@ -215,8 +218,7 @@ def filter_modify_apply_label(project_id, instance_id, table_id):
instance = client.instance(instance_id)
table = instance.table(table_id)
- rows = table.read_rows(
- filter_=row_filters.ApplyLabelFilter(label="labelled"))
+ rows = table.read_rows(filter_=row_filters.ApplyLabelFilter(label="labelled"))
for row in rows:
print_row(row)
@@ -228,9 +230,14 @@ def filter_composing_chain(project_id, instance_id, table_id):
instance = client.instance(instance_id)
table = instance.table(table_id)
- rows = table.read_rows(filter_=row_filters.RowFilterChain(
- filters=[row_filters.CellsColumnLimitFilter(1),
- row_filters.FamilyNameRegexFilter("cell_plan")]))
+ rows = table.read_rows(
+ filter_=row_filters.RowFilterChain(
+ filters=[
+ row_filters.CellsColumnLimitFilter(1),
+ row_filters.FamilyNameRegexFilter("cell_plan"),
+ ]
+ )
+ )
for row in rows:
print_row(row)
@@ -242,9 +249,14 @@ def filter_composing_interleave(project_id, instance_id, table_id):
instance = client.instance(instance_id)
table = instance.table(table_id)
- rows = table.read_rows(filter_=row_filters.RowFilterUnion(
- filters=[row_filters.ValueRegexFilter("true"),
- row_filters.ColumnQualifierRegexFilter("os_build")]))
+ rows = table.read_rows(
+ filter_=row_filters.RowFilterUnion(
+ filters=[
+ row_filters.ValueRegexFilter("true"),
+ row_filters.ColumnQualifierRegexFilter("os_build"),
+ ]
+ )
+ )
for row in rows:
print_row(row)
@@ -256,16 +268,18 @@ def filter_composing_condition(project_id, instance_id, table_id):
instance = client.instance(instance_id)
table = instance.table(table_id)
- rows = table.read_rows(filter_=row_filters.ConditionalRowFilter(
- base_filter=row_filters.RowFilterChain(filters=[
- row_filters.ColumnQualifierRegexFilter(
- "data_plan_10gb"),
- row_filters.ValueRegexFilter(
- "true")]),
- true_filter=row_filters.ApplyLabelFilter(label="passed-filter"),
- false_filter=row_filters.ApplyLabelFilter(label="filtered-out")
-
- ))
+ rows = table.read_rows(
+ filter_=row_filters.ConditionalRowFilter(
+ base_filter=row_filters.RowFilterChain(
+ filters=[
+ row_filters.ColumnQualifierRegexFilter("data_plan_10gb"),
+ row_filters.ValueRegexFilter("true"),
+ ]
+ ),
+ true_filter=row_filters.ApplyLabelFilter(label="passed-filter"),
+ false_filter=row_filters.ApplyLabelFilter(label="filtered-out"),
+ )
+ )
for row in rows:
print_row(row)
@@ -275,16 +289,23 @@ def filter_composing_condition(project_id, instance_id, table_id):
def print_row(row):
- print("Reading data for {}:".format(row.row_key.decode('utf-8')))
+ print("Reading data for {}:".format(row.row_key.decode("utf-8")))
for cf, cols in sorted(row.cells.items()):
print("Column Family {}".format(cf))
for col, cells in sorted(cols.items()):
for cell in cells:
- labels = " [{}]".format(",".join(cell.labels)) \
- if len(cell.labels) else ""
+ labels = (
+ " [{}]".format(",".join(cell.labels)) if len(cell.labels) else ""
+ )
print(
- "\t{}: {} @{}{}".format(col.decode('utf-8'),
- cell.value.decode('utf-8'),
- cell.timestamp, labels))
+ "\t{}: {} @{}{}".format(
+ col.decode("utf-8"),
+ cell.value.decode("utf-8"),
+ cell.timestamp,
+ labels,
+ )
+ )
print("")
+
+
# [END bigtable_filters_print]
diff --git a/samples/snippets/filters/filters_test.py b/samples/snippets/filters/filters_test.py
index 36dc4a5b1..35cf62ff0 100644
--- a/samples/snippets/filters/filters_test.py
+++ b/samples/snippets/filters/filters_test.py
@@ -23,9 +23,9 @@
import filter_snippets
-PROJECT = os.environ['GOOGLE_CLOUD_PROJECT']
-BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE']
-TABLE_ID_PREFIX = 'mobile-time-series-{}'
+PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]
+BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"]
+TABLE_ID_PREFIX = "mobile-time-series-{}"
@pytest.fixture(scope="module", autouse=True)
@@ -40,11 +40,10 @@ def table_id():
if table.exists():
table.delete()
- table.create(column_families={'stats_summary': None, 'cell_plan': None})
+ table.create(column_families={"stats_summary": None, "cell_plan": None})
timestamp = datetime.datetime(2019, 5, 1)
- timestamp_minus_hr = datetime.datetime(2019, 5, 1) - datetime.timedelta(
- hours=1)
+ timestamp_minus_hr = datetime.datetime(2019, 5, 1) - datetime.timedelta(hours=1)
row_keys = [
"phone#4c410523#20190501",
@@ -99,98 +98,88 @@ def table_id():
def test_filter_limit_row_sample(capsys, snapshot, table_id):
- filter_snippets.filter_limit_row_sample(PROJECT, BIGTABLE_INSTANCE,
- table_id)
+ filter_snippets.filter_limit_row_sample(PROJECT, BIGTABLE_INSTANCE, table_id)
out, _ = capsys.readouterr()
- assert 'Reading data for' in out
+ assert "Reading data for" in out
def test_filter_limit_row_regex(capsys, snapshot, table_id):
- filter_snippets.filter_limit_row_regex(PROJECT, BIGTABLE_INSTANCE,
- table_id)
+ filter_snippets.filter_limit_row_regex(PROJECT, BIGTABLE_INSTANCE, table_id)
out, _ = capsys.readouterr()
snapshot.assert_match(out)
def test_filter_limit_cells_per_col(capsys, snapshot, table_id):
- filter_snippets.filter_limit_cells_per_col(PROJECT, BIGTABLE_INSTANCE,
- table_id)
+ filter_snippets.filter_limit_cells_per_col(PROJECT, BIGTABLE_INSTANCE, table_id)
out, _ = capsys.readouterr()
snapshot.assert_match(out)
def test_filter_limit_cells_per_row(capsys, snapshot, table_id):
- filter_snippets.filter_limit_cells_per_row(PROJECT, BIGTABLE_INSTANCE,
- table_id)
+ filter_snippets.filter_limit_cells_per_row(PROJECT, BIGTABLE_INSTANCE, table_id)
out, _ = capsys.readouterr()
snapshot.assert_match(out)
def test_filter_limit_cells_per_row_offset(capsys, snapshot, table_id):
- filter_snippets.filter_limit_cells_per_row_offset(PROJECT,
- BIGTABLE_INSTANCE,
- table_id)
+ filter_snippets.filter_limit_cells_per_row_offset(
+ PROJECT, BIGTABLE_INSTANCE, table_id
+ )
out, _ = capsys.readouterr()
snapshot.assert_match(out)
def test_filter_limit_col_family_regex(capsys, snapshot, table_id):
- filter_snippets.filter_limit_col_family_regex(PROJECT, BIGTABLE_INSTANCE,
- table_id)
+ filter_snippets.filter_limit_col_family_regex(PROJECT, BIGTABLE_INSTANCE, table_id)
out, _ = capsys.readouterr()
snapshot.assert_match(out)
def test_filter_limit_col_qualifier_regex(capsys, snapshot, table_id):
- filter_snippets.filter_limit_col_qualifier_regex(PROJECT,
- BIGTABLE_INSTANCE,
- table_id)
+ filter_snippets.filter_limit_col_qualifier_regex(
+ PROJECT, BIGTABLE_INSTANCE, table_id
+ )
out, _ = capsys.readouterr()
snapshot.assert_match(out)
def test_filter_limit_col_range(capsys, snapshot, table_id):
- filter_snippets.filter_limit_col_range(PROJECT, BIGTABLE_INSTANCE,
- table_id)
+ filter_snippets.filter_limit_col_range(PROJECT, BIGTABLE_INSTANCE, table_id)
out, _ = capsys.readouterr()
snapshot.assert_match(out)
def test_filter_limit_value_range(capsys, snapshot, table_id):
- filter_snippets.filter_limit_value_range(PROJECT, BIGTABLE_INSTANCE,
- table_id)
+ filter_snippets.filter_limit_value_range(PROJECT, BIGTABLE_INSTANCE, table_id)
out, _ = capsys.readouterr()
snapshot.assert_match(out)
def test_filter_limit_value_regex(capsys, snapshot, table_id):
- filter_snippets.filter_limit_value_regex(PROJECT, BIGTABLE_INSTANCE,
- table_id)
+ filter_snippets.filter_limit_value_regex(PROJECT, BIGTABLE_INSTANCE, table_id)
out, _ = capsys.readouterr()
snapshot.assert_match(out)
def test_filter_limit_timestamp_range(capsys, snapshot, table_id):
- filter_snippets.filter_limit_timestamp_range(PROJECT, BIGTABLE_INSTANCE,
- table_id)
+ filter_snippets.filter_limit_timestamp_range(PROJECT, BIGTABLE_INSTANCE, table_id)
out, _ = capsys.readouterr()
snapshot.assert_match(out)
def test_filter_limit_block_all(capsys, snapshot, table_id):
- filter_snippets.filter_limit_block_all(PROJECT, BIGTABLE_INSTANCE,
- table_id)
+ filter_snippets.filter_limit_block_all(PROJECT, BIGTABLE_INSTANCE, table_id)
out, _ = capsys.readouterr()
snapshot.assert_match(out)
@@ -204,40 +193,35 @@ def test_filter_limit_pass_all(capsys, snapshot, table_id):
def test_filter_modify_strip_value(capsys, snapshot, table_id):
- filter_snippets.filter_modify_strip_value(PROJECT, BIGTABLE_INSTANCE,
- table_id)
+ filter_snippets.filter_modify_strip_value(PROJECT, BIGTABLE_INSTANCE, table_id)
out, _ = capsys.readouterr()
snapshot.assert_match(out)
def test_filter_modify_apply_label(capsys, snapshot, table_id):
- filter_snippets.filter_modify_apply_label(PROJECT, BIGTABLE_INSTANCE,
- table_id)
+ filter_snippets.filter_modify_apply_label(PROJECT, BIGTABLE_INSTANCE, table_id)
out, _ = capsys.readouterr()
snapshot.assert_match(out)
def test_filter_composing_chain(capsys, snapshot, table_id):
- filter_snippets.filter_composing_chain(PROJECT, BIGTABLE_INSTANCE,
- table_id)
+ filter_snippets.filter_composing_chain(PROJECT, BIGTABLE_INSTANCE, table_id)
out, _ = capsys.readouterr()
snapshot.assert_match(out)
def test_filter_composing_interleave(capsys, snapshot, table_id):
- filter_snippets.filter_composing_interleave(PROJECT, BIGTABLE_INSTANCE,
- table_id)
+ filter_snippets.filter_composing_interleave(PROJECT, BIGTABLE_INSTANCE, table_id)
out, _ = capsys.readouterr()
snapshot.assert_match(out)
def test_filter_composing_condition(capsys, snapshot, table_id):
- filter_snippets.filter_composing_condition(PROJECT, BIGTABLE_INSTANCE,
- table_id)
+ filter_snippets.filter_composing_condition(PROJECT, BIGTABLE_INSTANCE, table_id)
out, _ = capsys.readouterr()
snapshot.assert_match(out)
diff --git a/samples/snippets/filters/noxfile.py b/samples/snippets/filters/noxfile.py
index ba55d7ce5..20cdfc620 100644
--- a/samples/snippets/filters/noxfile.py
+++ b/samples/snippets/filters/noxfile.py
@@ -14,9 +14,11 @@
from __future__ import print_function
+import glob
import os
from pathlib import Path
import sys
+from typing import Callable, Dict, List, Optional
import nox
@@ -27,8 +29,9 @@
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING
-# Copy `noxfile_config.py` to your directory and modify it instead.
+BLACK_VERSION = "black==19.10b0"
+# Copy `noxfile_config.py` to your directory and modify it instead.
# `TEST_CONFIG` dict is a configuration hook that allows users to
# modify the test configurations. The values here should be in sync
@@ -37,24 +40,29 @@
TEST_CONFIG = {
# You can opt out from the test for specific Python versions.
- 'ignored_versions': ["2.7"],
-
+ "ignored_versions": [],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ "enforce_type_hints": False,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
- 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT',
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
-
+ # If you need to use a specific version of pip,
+ # change pip_version_override to the string representation
+ # of the version number, for example, "20.2.4"
+ "pip_version_override": None,
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
- 'envs': {},
+ "envs": {},
}
try:
# Ensure we can import noxfile_config in the project's directory.
- sys.path.append('.')
+ sys.path.append(".")
from noxfile_config import TEST_CONFIG_OVERRIDE
except ImportError as e:
print("No user noxfile_config found: detail: {}".format(e))
@@ -64,36 +72,43 @@
TEST_CONFIG.update(TEST_CONFIG_OVERRIDE)
-def get_pytest_env_vars():
+def get_pytest_env_vars() -> Dict[str, str]:
"""Returns a dict for pytest invocation."""
ret = {}
# Override the GCLOUD_PROJECT and the alias.
- env_key = TEST_CONFIG['gcloud_project_env']
+ env_key = TEST_CONFIG["gcloud_project_env"]
# This should error out if not set.
- ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key]
+ ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key]
# Apply user supplied envs.
- ret.update(TEST_CONFIG['envs'])
+ ret.update(TEST_CONFIG["envs"])
return ret
# DO NOT EDIT - automatically generated.
-# All versions used to tested samples.
-ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"]
+# All versions used to test samples.
+ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"]
# Any default versions that should be ignored.
-IGNORED_VERSIONS = TEST_CONFIG['ignored_versions']
+IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS])
-INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False))
+INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in (
+ "True",
+ "true",
+)
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
#
# Style Checks
#
-def _determine_local_import_names(start_dir):
+def _determine_local_import_names(start_dir: str) -> List[str]:
"""Determines all import names that should be considered "local".
This is used when running the linter to insure that import order is
@@ -131,60 +146,95 @@ def _determine_local_import_names(start_dir):
@nox.session
-def lint(session):
- session.install("flake8", "flake8-import-order")
+def lint(session: nox.sessions.Session) -> None:
+ if not TEST_CONFIG["enforce_type_hints"]:
+ session.install("flake8", "flake8-import-order")
+ else:
+ session.install("flake8", "flake8-import-order", "flake8-annotations")
local_names = _determine_local_import_names(".")
args = FLAKE8_COMMON_ARGS + [
"--application-import-names",
",".join(local_names),
- "."
+ ".",
]
session.run("flake8", *args)
#
-# Sample Tests
+# Black
#
-PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
+@nox.session
+def blacken(session: nox.sessions.Session) -> None:
+ session.install(BLACK_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+ session.run("black", *python_files)
-def _session_tests(session, post_install=None):
- """Runs py.test for a particular project."""
- if os.path.exists("requirements.txt"):
- session.install("-r", "requirements.txt")
- if os.path.exists("requirements-test.txt"):
- session.install("-r", "requirements-test.txt")
+#
+# Sample Tests
+#
- if INSTALL_LIBRARY_FROM_SOURCE:
- session.install("-e", _get_repo_root())
- if post_install:
- post_install(session)
+PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
- session.run(
- "pytest",
- *(PYTEST_COMMON_ARGS + session.posargs),
- # Pytest will return 5 when no tests are collected. This can happen
- # on travis where slow and flaky tests are excluded.
- # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
- success_codes=[0, 5],
- env=get_pytest_env_vars()
- )
+
+def _session_tests(
+ session: nox.sessions.Session, post_install: Callable = None
+) -> None:
+ # check for presence of tests
+ test_list = glob.glob("*_test.py") + glob.glob("test_*.py")
+ test_list.extend(glob.glob("tests"))
+ if len(test_list) == 0:
+ print("No tests found, skipping directory.")
+ else:
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
+ """Runs py.test for a particular project."""
+ if os.path.exists("requirements.txt"):
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
+
+ if os.path.exists("requirements-test.txt"):
+ if os.path.exists("constraints-test.txt"):
+ session.install(
+ "-r", "requirements-test.txt", "-c", "constraints-test.txt"
+ )
+ else:
+ session.install("-r", "requirements-test.txt")
+
+ if INSTALL_LIBRARY_FROM_SOURCE:
+ session.install("-e", _get_repo_root())
+
+ if post_install:
+ post_install(session)
+
+ session.run(
+ "pytest",
+ *(PYTEST_COMMON_ARGS + session.posargs),
+ # Pytest will return 5 when no tests are collected. This can happen
+ # on travis where slow and flaky tests are excluded.
+ # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
+ success_codes=[0, 5],
+ env=get_pytest_env_vars(),
+ )
@nox.session(python=ALL_VERSIONS)
-def py(session):
+def py(session: nox.sessions.Session) -> None:
"""Runs py.test for a sample using the specified version of Python."""
if session.python in TESTED_VERSIONS:
_session_tests(session)
else:
- session.skip("SKIPPED: {} tests are disabled for this sample.".format(
- session.python
- ))
+ session.skip(
+ "SKIPPED: {} tests are disabled for this sample.".format(session.python)
+ )
#
@@ -192,7 +242,7 @@ def py(session):
#
-def _get_repo_root():
+def _get_repo_root() -> Optional[str]:
""" Returns the root folder of the project. """
# Get root of this repository. Assume we don't have directories nested deeper than 10 items.
p = Path(os.getcwd())
@@ -201,6 +251,11 @@ def _get_repo_root():
break
if Path(p / ".git").exists():
return str(p)
+ # .git is not available in repos cloned via Cloud Build
+ # setup.py is always in the library's root, so use that instead
+ # https://github.com/googleapis/synthtool/issues/792
+ if Path(p / "setup.py").exists():
+ return str(p)
p = p.parent
raise Exception("Unable to detect repository root.")
@@ -210,7 +265,7 @@ def _get_repo_root():
@nox.session
@nox.parametrize("path", GENERATED_READMES)
-def readmegen(session, path):
+def readmegen(session: nox.sessions.Session, path: str) -> None:
"""(Re-)generates the readme for a sample."""
session.install("jinja2", "pyyaml")
dir_ = os.path.dirname(path)
diff --git a/samples/snippets/filters/requirements-test.txt b/samples/snippets/filters/requirements-test.txt
index 95ea1e6a0..4a46ff600 100644
--- a/samples/snippets/filters/requirements-test.txt
+++ b/samples/snippets/filters/requirements-test.txt
@@ -1 +1 @@
-pytest==6.2.4
+pytest==7.0.0
diff --git a/samples/snippets/filters/requirements.txt b/samples/snippets/filters/requirements.txt
index 83fd1d5e2..d2916abfc 100644
--- a/samples/snippets/filters/requirements.txt
+++ b/samples/snippets/filters/requirements.txt
@@ -1,2 +1,2 @@
-google-cloud-bigtable==2.3.1
+google-cloud-bigtable==2.4.0
snapshottest==0.6.0
\ No newline at end of file
diff --git a/samples/snippets/reads/noxfile.py b/samples/snippets/reads/noxfile.py
index ba55d7ce5..20cdfc620 100644
--- a/samples/snippets/reads/noxfile.py
+++ b/samples/snippets/reads/noxfile.py
@@ -14,9 +14,11 @@
from __future__ import print_function
+import glob
import os
from pathlib import Path
import sys
+from typing import Callable, Dict, List, Optional
import nox
@@ -27,8 +29,9 @@
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING
-# Copy `noxfile_config.py` to your directory and modify it instead.
+BLACK_VERSION = "black==19.10b0"
+# Copy `noxfile_config.py` to your directory and modify it instead.
# `TEST_CONFIG` dict is a configuration hook that allows users to
# modify the test configurations. The values here should be in sync
@@ -37,24 +40,29 @@
TEST_CONFIG = {
# You can opt out from the test for specific Python versions.
- 'ignored_versions': ["2.7"],
-
+ "ignored_versions": [],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ "enforce_type_hints": False,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
- 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT',
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
-
+ # If you need to use a specific version of pip,
+ # change pip_version_override to the string representation
+ # of the version number, for example, "20.2.4"
+ "pip_version_override": None,
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
- 'envs': {},
+ "envs": {},
}
try:
# Ensure we can import noxfile_config in the project's directory.
- sys.path.append('.')
+ sys.path.append(".")
from noxfile_config import TEST_CONFIG_OVERRIDE
except ImportError as e:
print("No user noxfile_config found: detail: {}".format(e))
@@ -64,36 +72,43 @@
TEST_CONFIG.update(TEST_CONFIG_OVERRIDE)
-def get_pytest_env_vars():
+def get_pytest_env_vars() -> Dict[str, str]:
"""Returns a dict for pytest invocation."""
ret = {}
# Override the GCLOUD_PROJECT and the alias.
- env_key = TEST_CONFIG['gcloud_project_env']
+ env_key = TEST_CONFIG["gcloud_project_env"]
# This should error out if not set.
- ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key]
+ ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key]
# Apply user supplied envs.
- ret.update(TEST_CONFIG['envs'])
+ ret.update(TEST_CONFIG["envs"])
return ret
# DO NOT EDIT - automatically generated.
-# All versions used to tested samples.
-ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"]
+# All versions used to test samples.
+ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"]
# Any default versions that should be ignored.
-IGNORED_VERSIONS = TEST_CONFIG['ignored_versions']
+IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS])
-INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False))
+INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in (
+ "True",
+ "true",
+)
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
#
# Style Checks
#
-def _determine_local_import_names(start_dir):
+def _determine_local_import_names(start_dir: str) -> List[str]:
"""Determines all import names that should be considered "local".
This is used when running the linter to insure that import order is
@@ -131,60 +146,95 @@ def _determine_local_import_names(start_dir):
@nox.session
-def lint(session):
- session.install("flake8", "flake8-import-order")
+def lint(session: nox.sessions.Session) -> None:
+ if not TEST_CONFIG["enforce_type_hints"]:
+ session.install("flake8", "flake8-import-order")
+ else:
+ session.install("flake8", "flake8-import-order", "flake8-annotations")
local_names = _determine_local_import_names(".")
args = FLAKE8_COMMON_ARGS + [
"--application-import-names",
",".join(local_names),
- "."
+ ".",
]
session.run("flake8", *args)
#
-# Sample Tests
+# Black
#
-PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
+@nox.session
+def blacken(session: nox.sessions.Session) -> None:
+ session.install(BLACK_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+ session.run("black", *python_files)
-def _session_tests(session, post_install=None):
- """Runs py.test for a particular project."""
- if os.path.exists("requirements.txt"):
- session.install("-r", "requirements.txt")
- if os.path.exists("requirements-test.txt"):
- session.install("-r", "requirements-test.txt")
+#
+# Sample Tests
+#
- if INSTALL_LIBRARY_FROM_SOURCE:
- session.install("-e", _get_repo_root())
- if post_install:
- post_install(session)
+PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
- session.run(
- "pytest",
- *(PYTEST_COMMON_ARGS + session.posargs),
- # Pytest will return 5 when no tests are collected. This can happen
- # on travis where slow and flaky tests are excluded.
- # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
- success_codes=[0, 5],
- env=get_pytest_env_vars()
- )
+
+def _session_tests(
+ session: nox.sessions.Session, post_install: Callable = None
+) -> None:
+ # check for presence of tests
+ test_list = glob.glob("*_test.py") + glob.glob("test_*.py")
+ test_list.extend(glob.glob("tests"))
+ if len(test_list) == 0:
+ print("No tests found, skipping directory.")
+ else:
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
+ """Runs py.test for a particular project."""
+ if os.path.exists("requirements.txt"):
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
+
+ if os.path.exists("requirements-test.txt"):
+ if os.path.exists("constraints-test.txt"):
+ session.install(
+ "-r", "requirements-test.txt", "-c", "constraints-test.txt"
+ )
+ else:
+ session.install("-r", "requirements-test.txt")
+
+ if INSTALL_LIBRARY_FROM_SOURCE:
+ session.install("-e", _get_repo_root())
+
+ if post_install:
+ post_install(session)
+
+ session.run(
+ "pytest",
+ *(PYTEST_COMMON_ARGS + session.posargs),
+ # Pytest will return 5 when no tests are collected. This can happen
+ # on travis where slow and flaky tests are excluded.
+ # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
+ success_codes=[0, 5],
+ env=get_pytest_env_vars(),
+ )
@nox.session(python=ALL_VERSIONS)
-def py(session):
+def py(session: nox.sessions.Session) -> None:
"""Runs py.test for a sample using the specified version of Python."""
if session.python in TESTED_VERSIONS:
_session_tests(session)
else:
- session.skip("SKIPPED: {} tests are disabled for this sample.".format(
- session.python
- ))
+ session.skip(
+ "SKIPPED: {} tests are disabled for this sample.".format(session.python)
+ )
#
@@ -192,7 +242,7 @@ def py(session):
#
-def _get_repo_root():
+def _get_repo_root() -> Optional[str]:
""" Returns the root folder of the project. """
# Get root of this repository. Assume we don't have directories nested deeper than 10 items.
p = Path(os.getcwd())
@@ -201,6 +251,11 @@ def _get_repo_root():
break
if Path(p / ".git").exists():
return str(p)
+ # .git is not available in repos cloned via Cloud Build
+ # setup.py is always in the library's root, so use that instead
+ # https://github.com/googleapis/synthtool/issues/792
+ if Path(p / "setup.py").exists():
+ return str(p)
p = p.parent
raise Exception("Unable to detect repository root.")
@@ -210,7 +265,7 @@ def _get_repo_root():
@nox.session
@nox.parametrize("path", GENERATED_READMES)
-def readmegen(session, path):
+def readmegen(session: nox.sessions.Session, path: str) -> None:
"""(Re-)generates the readme for a sample."""
session.install("jinja2", "pyyaml")
dir_ = os.path.dirname(path)
diff --git a/samples/snippets/reads/read_snippets.py b/samples/snippets/reads/read_snippets.py
index 6936b4c64..afd0955b8 100644
--- a/samples/snippets/reads/read_snippets.py
+++ b/samples/snippets/reads/read_snippets.py
@@ -43,7 +43,7 @@ def read_row_partial(project_id, instance_id, table_id):
table = instance.table(table_id)
row_key = "phone#4c410523#20190501"
- col_filter = row_filters.ColumnQualifierRegexFilter(b'os_build')
+ col_filter = row_filters.ColumnQualifierRegexFilter(b"os_build")
row = table.read_row(row_key, filter_=col_filter)
print_row(row)
@@ -74,8 +74,8 @@ def read_row_range(project_id, instance_id, table_id):
row_set = RowSet()
row_set.add_row_range_from_keys(
- start_key=b"phone#4c410523#20190501",
- end_key=b"phone#4c410523#201906201")
+ start_key=b"phone#4c410523#20190501", end_key=b"phone#4c410523#201906201"
+ )
rows = table.read_rows(row_set=row_set)
for row in rows:
@@ -91,11 +91,11 @@ def read_row_ranges(project_id, instance_id, table_id):
row_set = RowSet()
row_set.add_row_range_from_keys(
- start_key=b"phone#4c410523#20190501",
- end_key=b"phone#4c410523#201906201")
+ start_key=b"phone#4c410523#20190501", end_key=b"phone#4c410523#201906201"
+ )
row_set.add_row_range_from_keys(
- start_key=b"phone#5c10102#20190501",
- end_key=b"phone#5c10102#201906201")
+ start_key=b"phone#5c10102#20190501", end_key=b"phone#5c10102#201906201"
+ )
rows = table.read_rows(row_set=row_set)
for row in rows:
@@ -112,8 +112,7 @@ def read_prefix(project_id, instance_id, table_id):
end_key = prefix[:-1] + chr(ord(prefix[-1]) + 1)
row_set = RowSet()
- row_set.add_row_range_from_keys(prefix.encode("utf-8"),
- end_key.encode("utf-8"))
+ row_set.add_row_range_from_keys(prefix.encode("utf-8"), end_key.encode("utf-8"))
rows = table.read_rows(row_set=row_set)
for row in rows:
@@ -137,16 +136,23 @@ def read_filter(project_id, instance_id, table_id):
def print_row(row):
- print("Reading data for {}:".format(row.row_key.decode('utf-8')))
+ print("Reading data for {}:".format(row.row_key.decode("utf-8")))
for cf, cols in sorted(row.cells.items()):
print("Column Family {}".format(cf))
for col, cells in sorted(cols.items()):
for cell in cells:
- labels = " [{}]".format(",".join(cell.labels)) \
- if len(cell.labels) else ""
+ labels = (
+ " [{}]".format(",".join(cell.labels)) if len(cell.labels) else ""
+ )
print(
- "\t{}: {} @{}{}".format(col.decode('utf-8'),
- cell.value.decode('utf-8'),
- cell.timestamp, labels))
+ "\t{}: {} @{}{}".format(
+ col.decode("utf-8"),
+ cell.value.decode("utf-8"),
+ cell.timestamp,
+ labels,
+ )
+ )
print("")
+
+
# [END bigtable_reads_print]
diff --git a/samples/snippets/reads/reads_test.py b/samples/snippets/reads/reads_test.py
index fc3421000..0b61e341f 100644
--- a/samples/snippets/reads/reads_test.py
+++ b/samples/snippets/reads/reads_test.py
@@ -21,9 +21,9 @@
import read_snippets
-PROJECT = os.environ['GOOGLE_CLOUD_PROJECT']
-BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE']
-TABLE_ID_PREFIX = 'mobile-time-series-{}'
+PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]
+BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"]
+TABLE_ID_PREFIX = "mobile-time-series-{}"
@pytest.fixture(scope="module", autouse=True)
@@ -36,7 +36,7 @@ def table_id():
if table.exists():
table.delete()
- table.create(column_families={'stats_summary': None})
+ table.create(column_families={"stats_summary": None})
# table = instance.table(table_id)
diff --git a/samples/snippets/reads/requirements-test.txt b/samples/snippets/reads/requirements-test.txt
index 95ea1e6a0..4a46ff600 100644
--- a/samples/snippets/reads/requirements-test.txt
+++ b/samples/snippets/reads/requirements-test.txt
@@ -1 +1 @@
-pytest==6.2.4
+pytest==7.0.0
diff --git a/samples/snippets/reads/requirements.txt b/samples/snippets/reads/requirements.txt
index 83fd1d5e2..d2916abfc 100644
--- a/samples/snippets/reads/requirements.txt
+++ b/samples/snippets/reads/requirements.txt
@@ -1,2 +1,2 @@
-google-cloud-bigtable==2.3.1
+google-cloud-bigtable==2.4.0
snapshottest==0.6.0
\ No newline at end of file
diff --git a/samples/snippets/writes/noxfile.py b/samples/snippets/writes/noxfile.py
index ba55d7ce5..20cdfc620 100644
--- a/samples/snippets/writes/noxfile.py
+++ b/samples/snippets/writes/noxfile.py
@@ -14,9 +14,11 @@
from __future__ import print_function
+import glob
import os
from pathlib import Path
import sys
+from typing import Callable, Dict, List, Optional
import nox
@@ -27,8 +29,9 @@
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING
-# Copy `noxfile_config.py` to your directory and modify it instead.
+BLACK_VERSION = "black==19.10b0"
+# Copy `noxfile_config.py` to your directory and modify it instead.
# `TEST_CONFIG` dict is a configuration hook that allows users to
# modify the test configurations. The values here should be in sync
@@ -37,24 +40,29 @@
TEST_CONFIG = {
# You can opt out from the test for specific Python versions.
- 'ignored_versions': ["2.7"],
-
+ "ignored_versions": [],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ "enforce_type_hints": False,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
- 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT',
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
-
+ # If you need to use a specific version of pip,
+ # change pip_version_override to the string representation
+ # of the version number, for example, "20.2.4"
+ "pip_version_override": None,
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
- 'envs': {},
+ "envs": {},
}
try:
# Ensure we can import noxfile_config in the project's directory.
- sys.path.append('.')
+ sys.path.append(".")
from noxfile_config import TEST_CONFIG_OVERRIDE
except ImportError as e:
print("No user noxfile_config found: detail: {}".format(e))
@@ -64,36 +72,43 @@
TEST_CONFIG.update(TEST_CONFIG_OVERRIDE)
-def get_pytest_env_vars():
+def get_pytest_env_vars() -> Dict[str, str]:
"""Returns a dict for pytest invocation."""
ret = {}
# Override the GCLOUD_PROJECT and the alias.
- env_key = TEST_CONFIG['gcloud_project_env']
+ env_key = TEST_CONFIG["gcloud_project_env"]
# This should error out if not set.
- ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key]
+ ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key]
# Apply user supplied envs.
- ret.update(TEST_CONFIG['envs'])
+ ret.update(TEST_CONFIG["envs"])
return ret
# DO NOT EDIT - automatically generated.
-# All versions used to tested samples.
-ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"]
+# All versions used to test samples.
+ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"]
# Any default versions that should be ignored.
-IGNORED_VERSIONS = TEST_CONFIG['ignored_versions']
+IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS])
-INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False))
+INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in (
+ "True",
+ "true",
+)
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
#
# Style Checks
#
-def _determine_local_import_names(start_dir):
+def _determine_local_import_names(start_dir: str) -> List[str]:
"""Determines all import names that should be considered "local".
This is used when running the linter to insure that import order is
@@ -131,60 +146,95 @@ def _determine_local_import_names(start_dir):
@nox.session
-def lint(session):
- session.install("flake8", "flake8-import-order")
+def lint(session: nox.sessions.Session) -> None:
+ if not TEST_CONFIG["enforce_type_hints"]:
+ session.install("flake8", "flake8-import-order")
+ else:
+ session.install("flake8", "flake8-import-order", "flake8-annotations")
local_names = _determine_local_import_names(".")
args = FLAKE8_COMMON_ARGS + [
"--application-import-names",
",".join(local_names),
- "."
+ ".",
]
session.run("flake8", *args)
#
-# Sample Tests
+# Black
#
-PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
+@nox.session
+def blacken(session: nox.sessions.Session) -> None:
+ session.install(BLACK_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+ session.run("black", *python_files)
-def _session_tests(session, post_install=None):
- """Runs py.test for a particular project."""
- if os.path.exists("requirements.txt"):
- session.install("-r", "requirements.txt")
- if os.path.exists("requirements-test.txt"):
- session.install("-r", "requirements-test.txt")
+#
+# Sample Tests
+#
- if INSTALL_LIBRARY_FROM_SOURCE:
- session.install("-e", _get_repo_root())
- if post_install:
- post_install(session)
+PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
- session.run(
- "pytest",
- *(PYTEST_COMMON_ARGS + session.posargs),
- # Pytest will return 5 when no tests are collected. This can happen
- # on travis where slow and flaky tests are excluded.
- # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
- success_codes=[0, 5],
- env=get_pytest_env_vars()
- )
+
+def _session_tests(
+ session: nox.sessions.Session, post_install: Callable = None
+) -> None:
+ # check for presence of tests
+ test_list = glob.glob("*_test.py") + glob.glob("test_*.py")
+ test_list.extend(glob.glob("tests"))
+ if len(test_list) == 0:
+ print("No tests found, skipping directory.")
+ else:
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
+ """Runs py.test for a particular project."""
+ if os.path.exists("requirements.txt"):
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
+
+ if os.path.exists("requirements-test.txt"):
+ if os.path.exists("constraints-test.txt"):
+ session.install(
+ "-r", "requirements-test.txt", "-c", "constraints-test.txt"
+ )
+ else:
+ session.install("-r", "requirements-test.txt")
+
+ if INSTALL_LIBRARY_FROM_SOURCE:
+ session.install("-e", _get_repo_root())
+
+ if post_install:
+ post_install(session)
+
+ session.run(
+ "pytest",
+ *(PYTEST_COMMON_ARGS + session.posargs),
+ # Pytest will return 5 when no tests are collected. This can happen
+ # on travis where slow and flaky tests are excluded.
+ # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
+ success_codes=[0, 5],
+ env=get_pytest_env_vars(),
+ )
@nox.session(python=ALL_VERSIONS)
-def py(session):
+def py(session: nox.sessions.Session) -> None:
"""Runs py.test for a sample using the specified version of Python."""
if session.python in TESTED_VERSIONS:
_session_tests(session)
else:
- session.skip("SKIPPED: {} tests are disabled for this sample.".format(
- session.python
- ))
+ session.skip(
+ "SKIPPED: {} tests are disabled for this sample.".format(session.python)
+ )
#
@@ -192,7 +242,7 @@ def py(session):
#
-def _get_repo_root():
+def _get_repo_root() -> Optional[str]:
""" Returns the root folder of the project. """
# Get root of this repository. Assume we don't have directories nested deeper than 10 items.
p = Path(os.getcwd())
@@ -201,6 +251,11 @@ def _get_repo_root():
break
if Path(p / ".git").exists():
return str(p)
+ # .git is not available in repos cloned via Cloud Build
+ # setup.py is always in the library's root, so use that instead
+ # https://github.com/googleapis/synthtool/issues/792
+ if Path(p / "setup.py").exists():
+ return str(p)
p = p.parent
raise Exception("Unable to detect repository root.")
@@ -210,7 +265,7 @@ def _get_repo_root():
@nox.session
@nox.parametrize("path", GENERATED_READMES)
-def readmegen(session, path):
+def readmegen(session: nox.sessions.Session, path: str) -> None:
"""(Re-)generates the readme for a sample."""
session.install("jinja2", "pyyaml")
dir_ = os.path.dirname(path)
diff --git a/samples/snippets/writes/requirements-test.txt b/samples/snippets/writes/requirements-test.txt
index 0db5cc446..27df4634c 100644
--- a/samples/snippets/writes/requirements-test.txt
+++ b/samples/snippets/writes/requirements-test.txt
@@ -1,2 +1,2 @@
backoff==1.11.1
-pytest==6.2.4
+pytest==7.0.0
diff --git a/samples/snippets/writes/requirements.txt b/samples/snippets/writes/requirements.txt
index f9a2edd68..2946eff51 100644
--- a/samples/snippets/writes/requirements.txt
+++ b/samples/snippets/writes/requirements.txt
@@ -1 +1 @@
-google-cloud-bigtable==2.3.1
\ No newline at end of file
+google-cloud-bigtable==2.4.0
\ No newline at end of file
diff --git a/samples/snippets/writes/write_batch.py b/samples/snippets/writes/write_batch.py
index ecc8f273b..fd5117242 100644
--- a/samples/snippets/writes/write_batch.py
+++ b/samples/snippets/writes/write_batch.py
@@ -26,30 +26,22 @@ def write_batch(project_id, instance_id, table_id):
timestamp = datetime.datetime.utcnow()
column_family_id = "stats_summary"
- rows = [table.direct_row("tablet#a0b81f74#20190501"),
- table.direct_row("tablet#a0b81f74#20190502")]
-
- rows[0].set_cell(column_family_id,
- "connected_wifi",
- 1,
- timestamp)
- rows[0].set_cell(column_family_id,
- "os_build",
- "12155.0.0-rc1",
- timestamp)
- rows[1].set_cell(column_family_id,
- "connected_wifi",
- 1,
- timestamp)
- rows[1].set_cell(column_family_id,
- "os_build",
- "12145.0.0-rc6",
- timestamp)
+ rows = [
+ table.direct_row("tablet#a0b81f74#20190501"),
+ table.direct_row("tablet#a0b81f74#20190502"),
+ ]
+
+ rows[0].set_cell(column_family_id, "connected_wifi", 1, timestamp)
+ rows[0].set_cell(column_family_id, "os_build", "12155.0.0-rc1", timestamp)
+ rows[1].set_cell(column_family_id, "connected_wifi", 1, timestamp)
+ rows[1].set_cell(column_family_id, "os_build", "12145.0.0-rc6", timestamp)
response = table.mutate_rows(rows)
for i, status in enumerate(response):
if status.code != 0:
print("Error writing row: {}".format(status.message))
- print('Successfully wrote 2 rows.')
+ print("Successfully wrote 2 rows.")
+
+
# [END bigtable_writes_batch]
diff --git a/samples/snippets/writes/write_conditionally.py b/samples/snippets/writes/write_conditionally.py
index 5f3d4d607..7fb640aad 100644
--- a/samples/snippets/writes/write_conditionally.py
+++ b/samples/snippets/writes/write_conditionally.py
@@ -30,15 +30,17 @@ def write_conditional(project_id, instance_id, table_id):
row_key = "phone#4c410523#20190501"
row_filter = row_filters.RowFilterChain(
- filters=[row_filters.FamilyNameRegexFilter(column_family_id),
- row_filters.ColumnQualifierRegexFilter('os_build'),
- row_filters.ValueRegexFilter("PQ2A\\..*")])
+ filters=[
+ row_filters.FamilyNameRegexFilter(column_family_id),
+ row_filters.ColumnQualifierRegexFilter("os_build"),
+ row_filters.ValueRegexFilter("PQ2A\\..*"),
+ ]
+ )
row = table.conditional_row(row_key, filter_=row_filter)
- row.set_cell(column_family_id,
- "os_name",
- "android",
- timestamp)
+ row.set_cell(column_family_id, "os_name", "android", timestamp)
row.commit()
- print('Successfully updated row\'s os_name.')
+ print("Successfully updated row's os_name.")
+
+
# [END bigtable_writes_conditional]
diff --git a/samples/snippets/writes/write_increment.py b/samples/snippets/writes/write_increment.py
index 73ce52c2f..ac8e2d16a 100644
--- a/samples/snippets/writes/write_increment.py
+++ b/samples/snippets/writes/write_increment.py
@@ -30,5 +30,7 @@ def write_increment(project_id, instance_id, table_id):
row.increment_cell_value(column_family_id, "connected_wifi", -1)
row.commit()
- print('Successfully updated row {}.'.format(row_key))
+ print("Successfully updated row {}.".format(row_key))
+
+
# [END bigtable_writes_increment]
diff --git a/samples/snippets/writes/write_simple.py b/samples/snippets/writes/write_simple.py
index b4222d234..1aa5a810f 100644
--- a/samples/snippets/writes/write_simple.py
+++ b/samples/snippets/writes/write_simple.py
@@ -30,20 +30,13 @@ def write_simple(project_id, instance_id, table_id):
row_key = "phone#4c410523#20190501"
row = table.direct_row(row_key)
- row.set_cell(column_family_id,
- "connected_cell",
- 1,
- timestamp)
- row.set_cell(column_family_id,
- "connected_wifi",
- 1,
- timestamp)
- row.set_cell(column_family_id,
- "os_build",
- "PQ2A.190405.003",
- timestamp)
+ row.set_cell(column_family_id, "connected_cell", 1, timestamp)
+ row.set_cell(column_family_id, "connected_wifi", 1, timestamp)
+ row.set_cell(column_family_id, "os_build", "PQ2A.190405.003", timestamp)
row.commit()
- print('Successfully wrote row {}.'.format(row_key))
+ print("Successfully wrote row {}.".format(row_key))
+
+
# [END bigtable_writes_simple]
diff --git a/samples/snippets/writes/writes_test.py b/samples/snippets/writes/writes_test.py
index abe300095..77ae883d6 100644
--- a/samples/snippets/writes/writes_test.py
+++ b/samples/snippets/writes/writes_test.py
@@ -26,9 +26,9 @@
from .write_simple import write_simple
-PROJECT = os.environ['GOOGLE_CLOUD_PROJECT']
-BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE']
-TABLE_ID_PREFIX = 'mobile-time-series-{}'
+PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]
+BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"]
+TABLE_ID_PREFIX = "mobile-time-series-{}"
@pytest.fixture
@@ -48,7 +48,7 @@ def table_id(bigtable_instance):
if table.exists():
table.delete()
- column_family_id = 'stats_summary'
+ column_family_id = "stats_summary"
column_families = {column_family_id: None}
table.create(column_families=column_families)
@@ -67,7 +67,7 @@ def _write_simple():
_write_simple()
out, _ = capsys.readouterr()
- assert 'Successfully wrote row' in out
+ assert "Successfully wrote row" in out
@backoff.on_exception(backoff.expo, DeadlineExceeded, max_time=60)
def _write_increment():
@@ -75,7 +75,7 @@ def _write_increment():
_write_increment()
out, _ = capsys.readouterr()
- assert 'Successfully updated row' in out
+ assert "Successfully updated row" in out
@backoff.on_exception(backoff.expo, DeadlineExceeded, max_time=60)
def _write_conditional():
@@ -83,7 +83,7 @@ def _write_conditional():
_write_conditional()
out, _ = capsys.readouterr()
- assert 'Successfully updated row\'s os_name' in out
+ assert "Successfully updated row's os_name" in out
@backoff.on_exception(backoff.expo, DeadlineExceeded, max_time=60)
def _write_batch():
@@ -91,4 +91,4 @@ def _write_batch():
_write_batch()
out, _ = capsys.readouterr()
- assert 'Successfully wrote 2 rows' in out
+ assert "Successfully wrote 2 rows" in out
diff --git a/samples/tableadmin/noxfile.py b/samples/tableadmin/noxfile.py
index ba55d7ce5..20cdfc620 100644
--- a/samples/tableadmin/noxfile.py
+++ b/samples/tableadmin/noxfile.py
@@ -14,9 +14,11 @@
from __future__ import print_function
+import glob
import os
from pathlib import Path
import sys
+from typing import Callable, Dict, List, Optional
import nox
@@ -27,8 +29,9 @@
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING
-# Copy `noxfile_config.py` to your directory and modify it instead.
+BLACK_VERSION = "black==19.10b0"
+# Copy `noxfile_config.py` to your directory and modify it instead.
# `TEST_CONFIG` dict is a configuration hook that allows users to
# modify the test configurations. The values here should be in sync
@@ -37,24 +40,29 @@
TEST_CONFIG = {
# You can opt out from the test for specific Python versions.
- 'ignored_versions': ["2.7"],
-
+ "ignored_versions": [],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ "enforce_type_hints": False,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
- 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT',
+ "gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
-
+ # If you need to use a specific version of pip,
+ # change pip_version_override to the string representation
+ # of the version number, for example, "20.2.4"
+ "pip_version_override": None,
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
- 'envs': {},
+ "envs": {},
}
try:
# Ensure we can import noxfile_config in the project's directory.
- sys.path.append('.')
+ sys.path.append(".")
from noxfile_config import TEST_CONFIG_OVERRIDE
except ImportError as e:
print("No user noxfile_config found: detail: {}".format(e))
@@ -64,36 +72,43 @@
TEST_CONFIG.update(TEST_CONFIG_OVERRIDE)
-def get_pytest_env_vars():
+def get_pytest_env_vars() -> Dict[str, str]:
"""Returns a dict for pytest invocation."""
ret = {}
# Override the GCLOUD_PROJECT and the alias.
- env_key = TEST_CONFIG['gcloud_project_env']
+ env_key = TEST_CONFIG["gcloud_project_env"]
# This should error out if not set.
- ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key]
+ ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key]
# Apply user supplied envs.
- ret.update(TEST_CONFIG['envs'])
+ ret.update(TEST_CONFIG["envs"])
return ret
# DO NOT EDIT - automatically generated.
-# All versions used to tested samples.
-ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"]
+# All versions used to test samples.
+ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"]
# Any default versions that should be ignored.
-IGNORED_VERSIONS = TEST_CONFIG['ignored_versions']
+IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS])
-INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False))
+INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in (
+ "True",
+ "true",
+)
+
+# Error if a python version is missing
+nox.options.error_on_missing_interpreters = True
+
#
# Style Checks
#
-def _determine_local_import_names(start_dir):
+def _determine_local_import_names(start_dir: str) -> List[str]:
"""Determines all import names that should be considered "local".
This is used when running the linter to insure that import order is
@@ -131,60 +146,95 @@ def _determine_local_import_names(start_dir):
@nox.session
-def lint(session):
- session.install("flake8", "flake8-import-order")
+def lint(session: nox.sessions.Session) -> None:
+ if not TEST_CONFIG["enforce_type_hints"]:
+ session.install("flake8", "flake8-import-order")
+ else:
+ session.install("flake8", "flake8-import-order", "flake8-annotations")
local_names = _determine_local_import_names(".")
args = FLAKE8_COMMON_ARGS + [
"--application-import-names",
",".join(local_names),
- "."
+ ".",
]
session.run("flake8", *args)
#
-# Sample Tests
+# Black
#
-PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
+@nox.session
+def blacken(session: nox.sessions.Session) -> None:
+ session.install(BLACK_VERSION)
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+ session.run("black", *python_files)
-def _session_tests(session, post_install=None):
- """Runs py.test for a particular project."""
- if os.path.exists("requirements.txt"):
- session.install("-r", "requirements.txt")
- if os.path.exists("requirements-test.txt"):
- session.install("-r", "requirements-test.txt")
+#
+# Sample Tests
+#
- if INSTALL_LIBRARY_FROM_SOURCE:
- session.install("-e", _get_repo_root())
- if post_install:
- post_install(session)
+PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"]
- session.run(
- "pytest",
- *(PYTEST_COMMON_ARGS + session.posargs),
- # Pytest will return 5 when no tests are collected. This can happen
- # on travis where slow and flaky tests are excluded.
- # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
- success_codes=[0, 5],
- env=get_pytest_env_vars()
- )
+
+def _session_tests(
+ session: nox.sessions.Session, post_install: Callable = None
+) -> None:
+ # check for presence of tests
+ test_list = glob.glob("*_test.py") + glob.glob("test_*.py")
+ test_list.extend(glob.glob("tests"))
+ if len(test_list) == 0:
+ print("No tests found, skipping directory.")
+ else:
+ if TEST_CONFIG["pip_version_override"]:
+ pip_version = TEST_CONFIG["pip_version_override"]
+ session.install(f"pip=={pip_version}")
+ """Runs py.test for a particular project."""
+ if os.path.exists("requirements.txt"):
+ if os.path.exists("constraints.txt"):
+ session.install("-r", "requirements.txt", "-c", "constraints.txt")
+ else:
+ session.install("-r", "requirements.txt")
+
+ if os.path.exists("requirements-test.txt"):
+ if os.path.exists("constraints-test.txt"):
+ session.install(
+ "-r", "requirements-test.txt", "-c", "constraints-test.txt"
+ )
+ else:
+ session.install("-r", "requirements-test.txt")
+
+ if INSTALL_LIBRARY_FROM_SOURCE:
+ session.install("-e", _get_repo_root())
+
+ if post_install:
+ post_install(session)
+
+ session.run(
+ "pytest",
+ *(PYTEST_COMMON_ARGS + session.posargs),
+ # Pytest will return 5 when no tests are collected. This can happen
+ # on travis where slow and flaky tests are excluded.
+ # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
+ success_codes=[0, 5],
+ env=get_pytest_env_vars(),
+ )
@nox.session(python=ALL_VERSIONS)
-def py(session):
+def py(session: nox.sessions.Session) -> None:
"""Runs py.test for a sample using the specified version of Python."""
if session.python in TESTED_VERSIONS:
_session_tests(session)
else:
- session.skip("SKIPPED: {} tests are disabled for this sample.".format(
- session.python
- ))
+ session.skip(
+ "SKIPPED: {} tests are disabled for this sample.".format(session.python)
+ )
#
@@ -192,7 +242,7 @@ def py(session):
#
-def _get_repo_root():
+def _get_repo_root() -> Optional[str]:
""" Returns the root folder of the project. """
# Get root of this repository. Assume we don't have directories nested deeper than 10 items.
p = Path(os.getcwd())
@@ -201,6 +251,11 @@ def _get_repo_root():
break
if Path(p / ".git").exists():
return str(p)
+ # .git is not available in repos cloned via Cloud Build
+ # setup.py is always in the library's root, so use that instead
+ # https://github.com/googleapis/synthtool/issues/792
+ if Path(p / "setup.py").exists():
+ return str(p)
p = p.parent
raise Exception("Unable to detect repository root.")
@@ -210,7 +265,7 @@ def _get_repo_root():
@nox.session
@nox.parametrize("path", GENERATED_READMES)
-def readmegen(session, path):
+def readmegen(session: nox.sessions.Session, path: str) -> None:
"""(Re-)generates the readme for a sample."""
session.install("jinja2", "pyyaml")
dir_ = os.path.dirname(path)
diff --git a/samples/tableadmin/requirements-test.txt b/samples/tableadmin/requirements-test.txt
index 95ea1e6a0..f5889ff1d 100644
--- a/samples/tableadmin/requirements-test.txt
+++ b/samples/tableadmin/requirements-test.txt
@@ -1 +1,2 @@
-pytest==6.2.4
+pytest==7.0.0
+google-cloud-testutils==1.3.1
diff --git a/samples/tableadmin/requirements.txt b/samples/tableadmin/requirements.txt
index 5197d54ba..73d64741d 100644
--- a/samples/tableadmin/requirements.txt
+++ b/samples/tableadmin/requirements.txt
@@ -1 +1 @@
-google-cloud-bigtable==2.3.1
+google-cloud-bigtable==2.4.0
diff --git a/samples/tableadmin/tableadmin.py b/samples/tableadmin/tableadmin.py
index 29551a7f3..7c28601fb 100644
--- a/samples/tableadmin/tableadmin.py
+++ b/samples/tableadmin/tableadmin.py
@@ -38,7 +38,7 @@
def create_table(project_id, instance_id, table_id):
- ''' Create a Bigtable table
+ """Create a Bigtable table
:type project_id: str
:param project_id: Project id of the client.
@@ -48,7 +48,7 @@ def create_table(project_id, instance_id, table_id):
:type table_id: str
:param table_id: Table id to create table.
- '''
+ """
client = bigtable.Client(project=project_id, admin=True)
instance = client.instance(instance_id)
@@ -56,19 +56,19 @@ def create_table(project_id, instance_id, table_id):
# Check whether table exists in an instance.
# Create table if it does not exists.
- print('Checking if table {} exists...'.format(table_id))
+ print("Checking if table {} exists...".format(table_id))
if table.exists():
- print('Table {} already exists.'.format(table_id))
+ print("Table {} already exists.".format(table_id))
else:
- print('Creating the {} table.'.format(table_id))
+ print("Creating the {} table.".format(table_id))
table.create()
- print('Created table {}.'.format(table_id))
+ print("Created table {}.".format(table_id))
return client, instance, table
def run_table_operations(project_id, instance_id, table_id):
- ''' Create a Bigtable table and perform basic operations on it
+ """Create a Bigtable table and perform basic operations on it
:type project_id: str
:param project_id: Project id of the client.
@@ -78,78 +78,84 @@ def run_table_operations(project_id, instance_id, table_id):
:type table_id: str
:param table_id: Table id to create table.
- '''
+ """
client, instance, table = create_table(project_id, instance_id, table_id)
# [START bigtable_list_tables]
tables = instance.list_tables()
- print('Listing tables in current project...')
+ print("Listing tables in current project...")
if tables != []:
for tbl in tables:
print(tbl.table_id)
else:
- print('No table exists in current project...')
+ print("No table exists in current project...")
# [END bigtable_list_tables]
# [START bigtable_create_family_gc_max_age]
- print('Creating column family cf1 with with MaxAge GC Rule...')
+ print("Creating column family cf1 with with MaxAge GC Rule...")
# Create a column family with GC policy : maximum age
# where age = current time minus cell timestamp
# Define the GC rule to retain data with max age of 5 days
max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5))
- column_family1 = table.column_family('cf1', max_age_rule)
+ column_family1 = table.column_family("cf1", max_age_rule)
column_family1.create()
- print('Created column family cf1 with MaxAge GC Rule.')
+ print("Created column family cf1 with MaxAge GC Rule.")
# [END bigtable_create_family_gc_max_age]
# [START bigtable_create_family_gc_max_versions]
- print('Creating column family cf2 with max versions GC rule...')
+ print("Creating column family cf2 with max versions GC rule...")
# Create a column family with GC policy : most recent N versions
# where 1 = most recent version
# Define the GC policy to retain only the most recent 2 versions
max_versions_rule = column_family.MaxVersionsGCRule(2)
- column_family2 = table.column_family('cf2', max_versions_rule)
+ column_family2 = table.column_family("cf2", max_versions_rule)
column_family2.create()
- print('Created column family cf2 with Max Versions GC Rule.')
+ print("Created column family cf2 with Max Versions GC Rule.")
# [END bigtable_create_family_gc_max_versions]
# [START bigtable_create_family_gc_union]
- print('Creating column family cf3 with union GC rule...')
+ print("Creating column family cf3 with union GC rule...")
# Create a column family with GC policy to drop data that matches
# at least one condition.
# Define a GC rule to drop cells older than 5 days or not the
# most recent version
- union_rule = column_family.GCRuleUnion([
- column_family.MaxAgeGCRule(datetime.timedelta(days=5)),
- column_family.MaxVersionsGCRule(2)])
-
- column_family3 = table.column_family('cf3', union_rule)
+ union_rule = column_family.GCRuleUnion(
+ [
+ column_family.MaxAgeGCRule(datetime.timedelta(days=5)),
+ column_family.MaxVersionsGCRule(2),
+ ]
+ )
+
+ column_family3 = table.column_family("cf3", union_rule)
column_family3.create()
- print('Created column family cf3 with Union GC rule')
+ print("Created column family cf3 with Union GC rule")
# [END bigtable_create_family_gc_union]
# [START bigtable_create_family_gc_intersection]
- print('Creating column family cf4 with Intersection GC rule...')
+ print("Creating column family cf4 with Intersection GC rule...")
# Create a column family with GC policy to drop data that matches
# all conditions
# GC rule: Drop cells older than 5 days AND older than the most
# recent 2 versions
- intersection_rule = column_family.GCRuleIntersection([
- column_family.MaxAgeGCRule(datetime.timedelta(days=5)),
- column_family.MaxVersionsGCRule(2)])
-
- column_family4 = table.column_family('cf4', intersection_rule)
+ intersection_rule = column_family.GCRuleIntersection(
+ [
+ column_family.MaxAgeGCRule(datetime.timedelta(days=5)),
+ column_family.MaxVersionsGCRule(2),
+ ]
+ )
+
+ column_family4 = table.column_family("cf4", intersection_rule)
column_family4.create()
- print('Created column family cf4 with Intersection GC rule.')
+ print("Created column family cf4 with Intersection GC rule.")
# [END bigtable_create_family_gc_intersection]
# [START bigtable_create_family_gc_nested]
- print('Creating column family cf5 with a Nested GC rule...')
+ print("Creating column family cf5 with a Nested GC rule...")
# Create a column family with nested GC policies.
# Create a nested GC rule:
# Drop cells that are either older than the 10 recent versions
@@ -157,23 +163,26 @@ def run_table_operations(project_id, instance_id, table_id):
# Drop cells that are older than a month AND older than the
# 2 recent versions
rule1 = column_family.MaxVersionsGCRule(10)
- rule2 = column_family.GCRuleIntersection([
- column_family.MaxAgeGCRule(datetime.timedelta(days=30)),
- column_family.MaxVersionsGCRule(2)])
+ rule2 = column_family.GCRuleIntersection(
+ [
+ column_family.MaxAgeGCRule(datetime.timedelta(days=30)),
+ column_family.MaxVersionsGCRule(2),
+ ]
+ )
nested_rule = column_family.GCRuleUnion([rule1, rule2])
- column_family5 = table.column_family('cf5', nested_rule)
+ column_family5 = table.column_family("cf5", nested_rule)
column_family5.create()
- print('Created column family cf5 with a Nested GC rule.')
+ print("Created column family cf5 with a Nested GC rule.")
# [END bigtable_create_family_gc_nested]
# [START bigtable_list_column_families]
- print('Printing Column Family and GC Rule for all column families...')
+ print("Printing Column Family and GC Rule for all column families...")
column_families = table.list_column_families()
for column_family_name, gc_rule in sorted(column_families.items()):
- print('Column Family:', column_family_name)
- print('GC Rule:')
+ print("Column Family:", column_family_name)
+ print("GC Rule:")
print(gc_rule.to_pb())
# Sample output:
# Column Family: cf4
@@ -192,37 +201,37 @@ def run_table_operations(project_id, instance_id, table_id):
# }
# [END bigtable_list_column_families]
- print('Print column family cf1 GC rule before update...')
- print('Column Family: cf1')
+ print("Print column family cf1 GC rule before update...")
+ print("Column Family: cf1")
print(column_family1.to_pb())
# [START bigtable_update_gc_rule]
- print('Updating column family cf1 GC rule...')
+ print("Updating column family cf1 GC rule...")
# Update the column family cf1 to update the GC rule
- column_family1 = table.column_family(
- 'cf1',
- column_family.MaxVersionsGCRule(1))
+ column_family1 = table.column_family("cf1", column_family.MaxVersionsGCRule(1))
column_family1.update()
- print('Updated column family cf1 GC rule\n')
+ print("Updated column family cf1 GC rule\n")
# [END bigtable_update_gc_rule]
- print('Print column family cf1 GC rule after update...')
- print('Column Family: cf1')
+ print("Print column family cf1 GC rule after update...")
+ print("Column Family: cf1")
print(column_family1.to_pb())
# [START bigtable_delete_family]
- print('Delete a column family cf2...')
+ print("Delete a column family cf2...")
# Delete a column family
column_family2.delete()
- print('Column family cf2 deleted successfully.')
+ print("Column family cf2 deleted successfully.")
# [END bigtable_delete_family]
- print('execute command "python tableadmin.py delete [project_id] \
- [instance_id] --table [tableName]" to delete the table.')
+ print(
+ 'execute command "python tableadmin.py delete [project_id] \
+ [instance_id] --table [tableName]" to delete the table.'
+ )
def delete_table(project_id, instance_id, table_id):
- ''' Delete bigtable.
+ """Delete bigtable.
:type project_id: str
:param project_id: Project id of the client.
@@ -232,7 +241,7 @@ def delete_table(project_id, instance_id, table_id):
:type table_id: str
:param table_id: Table id to create table.
- '''
+ """
client = bigtable.Client(project=project_id, admin=True)
instance = client.instance(instance_id)
@@ -241,43 +250,44 @@ def delete_table(project_id, instance_id, table_id):
# [START bigtable_delete_table]
# Delete the entire table
- print('Checking if table {} exists...'.format(table_id))
+ print("Checking if table {} exists...".format(table_id))
if table.exists():
- print('Table {} exists.'.format(table_id))
- print('Deleting {} table.'.format(table_id))
+ print("Table {} exists.".format(table_id))
+ print("Deleting {} table.".format(table_id))
table.delete()
- print('Deleted {} table.'.format(table_id))
+ print("Deleted {} table.".format(table_id))
else:
- print('Table {} does not exists.'.format(table_id))
+ print("Table {} does not exists.".format(table_id))
# [END bigtable_delete_table]
-if __name__ == '__main__':
+if __name__ == "__main__":
parser = argparse.ArgumentParser(
- description=__doc__,
- formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
- parser.add_argument('command',
- help='run or delete. \
- Operation to perform on table.')
parser.add_argument(
- '--table',
- help='Cloud Bigtable Table name.',
- default='Hello-Bigtable')
+ "command",
+ help="run or delete. \
+ Operation to perform on table.",
+ )
+ parser.add_argument(
+ "--table", help="Cloud Bigtable Table name.", default="Hello-Bigtable"
+ )
- parser.add_argument('project_id',
- help='Your Cloud Platform project ID.')
+ parser.add_argument("project_id", help="Your Cloud Platform project ID.")
parser.add_argument(
- 'instance_id',
- help='ID of the Cloud Bigtable instance to connect to.')
+ "instance_id", help="ID of the Cloud Bigtable instance to connect to."
+ )
args = parser.parse_args()
- if args.command.lower() == 'run':
- run_table_operations(args.project_id, args.instance_id,
- args.table)
- elif args.command.lower() == 'delete':
+ if args.command.lower() == "run":
+ run_table_operations(args.project_id, args.instance_id, args.table)
+ elif args.command.lower() == "delete":
delete_table(args.project_id, args.instance_id, args.table)
else:
- print('Command should be either run or delete.\n Use argument -h,\
- --help to show help and exit.')
+ print(
+ "Command should be either run or delete.\n Use argument -h,\
+ --help to show help and exit."
+ )
diff --git a/samples/tableadmin/tableadmin_test.py b/samples/tableadmin/tableadmin_test.py
index c0ef09d12..3063eee9f 100755
--- a/samples/tableadmin/tableadmin_test.py
+++ b/samples/tableadmin/tableadmin_test.py
@@ -16,48 +16,53 @@
import os
import uuid
+from google.api_core import exceptions
+from test_utils.retry import RetryErrors
+
from tableadmin import create_table
from tableadmin import delete_table
from tableadmin import run_table_operations
-PROJECT = os.environ['GOOGLE_CLOUD_PROJECT']
-BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE']
-TABLE_ID_FORMAT = 'tableadmin-test-{}'
+PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]
+BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"]
+TABLE_ID_FORMAT = "tableadmin-test-{}"
+
+retry_429_503 = RetryErrors(exceptions.TooManyRequests, exceptions.ServiceUnavailable)
def test_run_table_operations(capsys):
table_id = TABLE_ID_FORMAT.format(uuid.uuid4().hex[:8])
- run_table_operations(PROJECT, BIGTABLE_INSTANCE, table_id)
+ retry_429_503(run_table_operations)(PROJECT, BIGTABLE_INSTANCE, table_id)
out, _ = capsys.readouterr()
- assert 'Creating the ' + table_id + ' table.' in out
- assert 'Listing tables in current project.' in out
- assert 'Creating column family cf1 with with MaxAge GC Rule' in out
- assert 'Created column family cf1 with MaxAge GC Rule.' in out
- assert 'Created column family cf2 with Max Versions GC Rule.' in out
- assert 'Created column family cf3 with Union GC rule' in out
- assert 'Created column family cf4 with Intersection GC rule.' in out
- assert 'Created column family cf5 with a Nested GC rule.' in out
- assert 'Printing Column Family and GC Rule for all column families.' in out
- assert 'Updating column family cf1 GC rule...' in out
- assert 'Updated column family cf1 GC rule' in out
- assert 'Print column family cf1 GC rule after update...' in out
- assert 'Column Family: cf1' in out
- assert 'max_num_versions: 1' in out
- assert 'Delete a column family cf2...' in out
- assert 'Column family cf2 deleted successfully.' in out
-
- delete_table(PROJECT, BIGTABLE_INSTANCE, table_id)
+ assert "Creating the " + table_id + " table." in out
+ assert "Listing tables in current project." in out
+ assert "Creating column family cf1 with with MaxAge GC Rule" in out
+ assert "Created column family cf1 with MaxAge GC Rule." in out
+ assert "Created column family cf2 with Max Versions GC Rule." in out
+ assert "Created column family cf3 with Union GC rule" in out
+ assert "Created column family cf4 with Intersection GC rule." in out
+ assert "Created column family cf5 with a Nested GC rule." in out
+ assert "Printing Column Family and GC Rule for all column families." in out
+ assert "Updating column family cf1 GC rule..." in out
+ assert "Updated column family cf1 GC rule" in out
+ assert "Print column family cf1 GC rule after update..." in out
+ assert "Column Family: cf1" in out
+ assert "max_num_versions: 1" in out
+ assert "Delete a column family cf2..." in out
+ assert "Column family cf2 deleted successfully." in out
+
+ retry_429_503(delete_table)(PROJECT, BIGTABLE_INSTANCE, table_id)
def test_delete_table(capsys):
table_id = TABLE_ID_FORMAT.format(uuid.uuid4().hex[:8])
- create_table(PROJECT, BIGTABLE_INSTANCE, table_id)
+ retry_429_503(create_table)(PROJECT, BIGTABLE_INSTANCE, table_id)
- delete_table(PROJECT, BIGTABLE_INSTANCE, table_id)
+ retry_429_503(delete_table)(PROJECT, BIGTABLE_INSTANCE, table_id)
out, _ = capsys.readouterr()
- assert 'Table ' + table_id + ' exists.' in out
- assert 'Deleting ' + table_id + ' table.' in out
- assert 'Deleted ' + table_id + ' table.' in out
+ assert "Table " + table_id + " exists." in out
+ assert "Deleting " + table_id + " table." in out
+ assert "Deleted " + table_id + " table." in out
diff --git a/scripts/fixup_bigtable_admin_v2_keywords.py b/scripts/fixup_bigtable_admin_v2_keywords.py
index c8e998f88..a837ad292 100644
--- a/scripts/fixup_bigtable_admin_v2_keywords.py
+++ b/scripts/fixup_bigtable_admin_v2_keywords.py
@@ -68,6 +68,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer):
'list_snapshots': ('parent', 'page_size', 'page_token', ),
'list_tables': ('parent', 'view', 'page_size', 'page_token', ),
'modify_column_families': ('name', 'modifications', ),
+ 'partial_update_cluster': ('cluster', 'update_mask', ),
'partial_update_instance': ('instance', 'update_mask', ),
'restore_table': ('parent', 'table_id', 'backup', ),
'set_iam_policy': ('resource', 'policy', ),
@@ -75,8 +76,8 @@ class bigtable_adminCallTransformer(cst.CSTTransformer):
'test_iam_permissions': ('resource', 'permissions', ),
'update_app_profile': ('app_profile', 'update_mask', 'ignore_warnings', ),
'update_backup': ('backup', 'update_mask', ),
- 'update_cluster': ('serve_nodes', 'name', 'location', 'state', 'default_storage_type', 'encryption_config', ),
- 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', ),
+ 'update_cluster': ('name', 'location', 'state', 'serve_nodes', 'cluster_config', 'default_storage_type', 'encryption_config', ),
+ 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', 'create_time', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
diff --git a/setup.py b/setup.py
index 71e055017..ac58e62e7 100644
--- a/setup.py
+++ b/setup.py
@@ -22,7 +22,7 @@
name = "google-cloud-bigtable"
description = "Google Cloud Bigtable API client library"
-version = "2.4.0"
+version = "2.5.0"
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
@@ -32,7 +32,7 @@
# NOTE: Maintainers, please do not require google-api-core>=2.x.x
# Until this issue is closed
# https://github.com/googleapis/google-cloud-python/issues/10566
- "google-api-core[grpc] >= 1.26.0, <3.0.0dev",
+ "google-api-core[grpc] >= 1.28.0, <3.0.0dev",
# NOTE: Maintainers, please do not require google-api-core>=2.x.x
# Until this issue is closed
# https://github.com/googleapis/google-cloud-python/issues/10566
@@ -40,7 +40,6 @@
"grpc-google-iam-v1 >= 0.12.3, < 0.13dev",
"proto-plus >= 1.13.0",
"libcst >= 0.2.5",
- "packaging >= 14.3",
]
extras = {}
@@ -83,6 +82,9 @@
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
"Operating System :: OS Independent",
"Topic :: Internet",
],
diff --git a/testing/constraints-3.6.txt b/testing/constraints-3.6.txt
index 25d8d3eef..1e50717bf 100644
--- a/testing/constraints-3.6.txt
+++ b/testing/constraints-3.6.txt
@@ -5,10 +5,8 @@
#
# e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev",
# Then this file should have foo==1.14.0
-google-api-core==1.26.0
+google-api-core==1.28.0
google-cloud-core==1.4.1
grpc-google-iam-v1==0.12.3
proto-plus==1.13.0
libcst==0.2.5
-packaging==14.3
-google-auth==1.24.0 # TODO: remove when google-auth >= 1.25.0 is required transitively through google-api-core
diff --git a/tests/system/_helpers.py b/tests/system/_helpers.py
index f6895a51f..ab4b54b05 100644
--- a/tests/system/_helpers.py
+++ b/tests/system/_helpers.py
@@ -33,7 +33,7 @@ def _retry_on_unavailable(exc):
retry_grpc_unavailable = retry.RetryErrors(
- core_exceptions.GrpcRendezvous, error_predicate=_retry_on_unavailable,
+ core_exceptions.GrpcRendezvous, error_predicate=_retry_on_unavailable, max_tries=9,
)
diff --git a/tests/system/conftest.py b/tests/system/conftest.py
index 778cf8c94..6f6cdc2d1 100644
--- a/tests/system/conftest.py
+++ b/tests/system/conftest.py
@@ -41,7 +41,7 @@ def with_kms_key_name(kms_key_name):
@pytest.fixture(scope="session")
-def not_in_emulator(in_emulator):
+def skip_on_emulator(in_emulator):
if in_emulator:
pytest.skip("Emulator does not support this feature")
@@ -146,10 +146,10 @@ def data_instance_populated(
serve_nodes,
in_emulator,
):
+ instance = admin_client.instance(data_instance_id, labels=instance_labels)
# Emulator does not support instance admin operations (create / delete).
# See: https://cloud.google.com/bigtable/docs/emulator
if not in_emulator:
- instance = admin_client.instance(data_instance_id, labels=instance_labels)
cluster = instance.cluster(
data_cluster_id, location_id=location_id, serve_nodes=serve_nodes,
)
diff --git a/tests/system/test_data_api.py b/tests/system/test_data_api.py
index 2137aa2e4..2ca7e1504 100644
--- a/tests/system/test_data_api.py
+++ b/tests/system/test_data_api.py
@@ -210,7 +210,7 @@ def test_rowset_add_row_range_w_pfx(data_table, rows_to_delete):
assert found_row_keys == expected_row_keys
-def test_table_read_row_large_cell(data_table, rows_to_delete, not_in_emulator):
+def test_table_read_row_large_cell(data_table, rows_to_delete, skip_on_emulator):
# Maximum gRPC received message size for emulator is 4194304 bytes.
row = data_table.direct_row(ROW_KEY)
rows_to_delete.append(row)
@@ -325,7 +325,7 @@ def test_table_read_rows(data_table, rows_to_delete):
assert rows_data.rows == expected_rows
-def test_read_with_label_applied(data_table, rows_to_delete, not_in_emulator):
+def test_read_with_label_applied(data_table, rows_to_delete, skip_on_emulator):
from google.cloud.bigtable.row_filters import ApplyLabelFilter
from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter
from google.cloud.bigtable.row_filters import RowFilterChain
diff --git a/tests/system/test_instance_admin.py b/tests/system/test_instance_admin.py
index c5f7b525e..c2cf21291 100644
--- a/tests/system/test_instance_admin.py
+++ b/tests/system/test_instance_admin.py
@@ -79,7 +79,7 @@ def _modify_app_profile_helper(
)
operation = app_profile.update(ignore_warnings=ignore_warnings)
- operation.result(timeout=30)
+ operation.result(timeout=60)
alt_profile = instance.app_profile(app_profile_id)
alt_profile.reload()
@@ -96,7 +96,9 @@ def _delete_app_profile_helper(app_profile):
assert not app_profile.exists()
-def test_client_list_instances(admin_client, admin_instance_populated, not_in_emulator):
+def test_client_list_instances(
+ admin_client, admin_instance_populated, skip_on_emulator
+):
instances, failed_locations = admin_client.list_instances()
assert failed_locations == []
@@ -105,20 +107,20 @@ def test_client_list_instances(admin_client, admin_instance_populated, not_in_em
assert admin_instance_populated.name in found
-def test_instance_exists_hit(admin_instance_populated):
+def test_instance_exists_hit(admin_instance_populated, skip_on_emulator):
# Emulator does not support instance admin operations (create / delete).
# It allows connecting with *any* project / instance name.
# See: https://cloud.google.com/bigtable/docs/emulator
assert admin_instance_populated.exists()
-def test_instance_exists_miss(admin_client):
+def test_instance_exists_miss(admin_client, skip_on_emulator):
alt_instance = admin_client.instance("nonesuch-instance")
assert not alt_instance.exists()
def test_instance_reload(
- admin_client, admin_instance_id, admin_instance_populated, not_in_emulator
+ admin_client, admin_instance_id, admin_instance_populated, skip_on_emulator
):
# Use same arguments as 'admin_instance_populated'
# so we can use reload() on a fresh instance.
@@ -139,7 +141,7 @@ def test_instance_create_prod(
location_id,
instance_labels,
instances_to_delete,
- not_in_emulator,
+ skip_on_emulator,
):
from google.cloud.bigtable import enums
@@ -153,7 +155,7 @@ def test_instance_create_prod(
operation = instance.create(clusters=[cluster])
instances_to_delete.append(instance)
- operation.result(timeout=30) # Ensure the operation completes.
+ operation.result(timeout=60) # Ensure the operation completes.
assert instance.type_ is None
# Create a new instance instance and make sure it is the same.
@@ -171,7 +173,7 @@ def test_instance_create_development(
location_id,
instance_labels,
instances_to_delete,
- not_in_emulator,
+ skip_on_emulator,
):
alt_instance_id = f"new{unique_suffix}"
instance = admin_client.instance(
@@ -184,7 +186,7 @@ def test_instance_create_development(
operation = instance.create(clusters=[cluster])
instances_to_delete.append(instance)
- operation.result(timeout=30) # Ensure the operation completes.
+ operation.result(timeout=60) # Ensure the operation completes.
# Create a new instance instance and make sure it is the same.
instance_alt = admin_client.instance(alt_instance_id)
@@ -205,7 +207,7 @@ def test_instance_create_w_two_clusters(
location_id,
instance_labels,
instances_to_delete,
- not_in_emulator,
+ skip_on_emulator,
):
alt_instance_id = f"dif{unique_suffix}"
instance = admin_client.instance(
@@ -400,7 +402,7 @@ def test_instance_create_w_two_clusters_cmek(
instance_labels,
instances_to_delete,
with_kms_key_name,
- not_in_emulator,
+ skip_on_emulator,
):
alt_instance_id = f"dif-cmek{unique_suffix}"
instance = admin_client.instance(
@@ -484,7 +486,7 @@ def test_instance_update_display_name_and_labels(
admin_instance_populated,
label_key,
instance_labels,
- not_in_emulator,
+ skip_on_emulator,
):
old_display_name = admin_instance_populated.display_name
new_display_name = "Foo Bar Baz"
@@ -494,7 +496,7 @@ def test_instance_update_display_name_and_labels(
admin_instance_populated.labels = new_labels
operation = admin_instance_populated.update()
- operation.result(timeout=30) # ensure the operation completes.
+ operation.result(timeout=60) # ensure the operation completes.
# Create a new instance instance and reload it.
instance_alt = admin_client.instance(admin_instance_id, labels={})
@@ -511,7 +513,7 @@ def test_instance_update_display_name_and_labels(
admin_instance_populated.display_name = old_display_name
admin_instance_populated.labels = instance_labels
operation = admin_instance_populated.update()
- operation.result(timeout=30) # ensure the operation completes.
+ operation.result(timeout=60) # ensure the operation completes.
def test_instance_update_w_type(
@@ -521,7 +523,7 @@ def test_instance_update_w_type(
location_id,
instance_labels,
instances_to_delete,
- not_in_emulator,
+ skip_on_emulator,
):
alt_instance_id = f"ndif{unique_suffix}"
instance = admin_client.instance(
@@ -534,12 +536,12 @@ def test_instance_update_w_type(
operation = instance.create(clusters=[cluster])
instances_to_delete.append(instance)
- operation.result(timeout=30) # Ensure the operation completes.
+ operation.result(timeout=60) # Ensure the operation completes.
instance.display_name = None
instance.type_ = enums.Instance.Type.PRODUCTION
operation = instance.update()
- operation.result(timeout=30) # ensure the operation completes.
+ operation.result(timeout=60) # ensure the operation completes.
# Create a new instance instance and reload it.
instance_alt = admin_client.instance(alt_instance_id)
@@ -548,17 +550,17 @@ def test_instance_update_w_type(
assert instance_alt.type_ == enums.Instance.Type.PRODUCTION
-def test_cluster_exists_hit(admin_cluster, not_in_emulator):
+def test_cluster_exists_hit(admin_cluster, skip_on_emulator):
assert admin_cluster.exists()
-def test_cluster_exists_miss(admin_instance_populated, not_in_emulator):
+def test_cluster_exists_miss(admin_instance_populated, skip_on_emulator):
alt_cluster = admin_instance_populated.cluster("nonesuch-cluster")
assert not alt_cluster.exists()
def test_cluster_create(
- admin_instance_populated, admin_instance_id,
+ admin_instance_populated, admin_instance_id, skip_on_emulator,
):
alt_cluster_id = f"{admin_instance_id}-c2"
alt_location_id = "us-central1-f"
@@ -571,7 +573,7 @@ def test_cluster_create(
default_storage_type=(enums.StorageType.SSD),
)
operation = cluster_2.create()
- operation.result(timeout=30) # Ensure the operation completes.
+ operation.result(timeout=60) # Ensure the operation completes.
# Create a new object instance, reload and make sure it is the same.
alt_cluster = admin_instance_populated.cluster(alt_cluster_id)
@@ -594,14 +596,14 @@ def test_cluster_update(
admin_cluster_id,
admin_cluster,
serve_nodes,
- not_in_emulator,
+ skip_on_emulator,
):
new_serve_nodes = 4
admin_cluster.serve_nodes = new_serve_nodes
operation = admin_cluster.update()
- operation.result(timeout=30) # Ensure the operation completes.
+ operation.result(timeout=60) # Ensure the operation completes.
# Create a new cluster instance and reload it.
alt_cluster = admin_instance_populated.cluster(admin_cluster_id)
@@ -611,4 +613,4 @@ def test_cluster_update(
# Put the cluster back the way it was for the other test cases.
admin_cluster.serve_nodes = serve_nodes
operation = admin_cluster.update()
- operation.result(timeout=30) # Ensure the operation completes.
+ operation.result(timeout=60) # Ensure the operation completes.
diff --git a/tests/system/test_table_admin.py b/tests/system/test_table_admin.py
index 232c6d0fc..1ed540d63 100644
--- a/tests/system/test_table_admin.py
+++ b/tests/system/test_table_admin.py
@@ -57,7 +57,7 @@ def backups_to_delete():
backup.delete()
-def test_instance_list_tables(data_instance_populated, shared_table):
+def test_instance_list_tables(data_instance_populated, shared_table, skip_on_emulator):
# Since `data_instance_populated` is newly created, the
# table created in `shared_table` here will be the only one.
tables = data_instance_populated.list_tables()
@@ -115,7 +115,7 @@ def test_table_create_w_families(
def test_table_create_w_split_keys(
- data_instance_populated, tables_to_delete, not_in_emulator,
+ data_instance_populated, tables_to_delete, skip_on_emulator
):
temp_table_id = "foo-bar-baz-split-table"
initial_split_keys = [b"split_key_1", b"split_key_10", b"split_key_20"]
@@ -203,7 +203,7 @@ def test_column_family_delete(data_instance_populated, tables_to_delete):
def test_table_get_iam_policy(
- data_instance_populated, tables_to_delete, not_in_emulator,
+ data_instance_populated, tables_to_delete, skip_on_emulator
):
temp_table_id = "test-get-iam-policy-table"
temp_table = data_instance_populated.table(temp_table_id)
@@ -216,7 +216,7 @@ def test_table_get_iam_policy(
def test_table_set_iam_policy(
- service_account, data_instance_populated, tables_to_delete, not_in_emulator,
+ service_account, data_instance_populated, tables_to_delete, skip_on_emulator
):
from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
from google.cloud.bigtable.policy import Policy
@@ -236,7 +236,7 @@ def test_table_set_iam_policy(
def test_table_test_iam_permissions(
- data_instance_populated, tables_to_delete, not_in_emulator,
+ data_instance_populated, tables_to_delete, skip_on_emulator,
):
temp_table_id = "test-test-iam-policy-table"
temp_table = data_instance_populated.table(temp_table_id)
@@ -258,7 +258,7 @@ def test_table_backup(
instances_to_delete,
tables_to_delete,
backups_to_delete,
- not_in_emulator,
+ skip_on_emulator,
):
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.bigtable import enums
diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py
index 029ed196f..bf5b3e9e5 100644
--- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py
+++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py
@@ -15,7 +15,6 @@
#
import os
import mock
-import packaging.version
import grpc
from grpc.experimental import aio
@@ -30,8 +29,10 @@
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
+from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
+from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
@@ -42,9 +43,6 @@
)
from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers
from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import transports
-from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.base import (
- _GOOGLE_AUTH_VERSION,
-)
from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin
from google.cloud.bigtable_admin_v2.types import common
from google.cloud.bigtable_admin_v2.types import instance
@@ -55,24 +53,11 @@
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
+from google.protobuf import timestamp_pb2 # type: ignore
from google.type import expr_pb2 # type: ignore
import google.auth
-# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
-# through google-api-core:
-# - Delete the auth "less than" test cases
-# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
-requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
- packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
- reason="This test requires google-auth < 1.25.0",
-)
-requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
- packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
- reason="This test requires google-auth >= 1.25.0",
-)
-
-
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
@@ -234,7 +219,7 @@ def test_bigtable_instance_admin_client_client_options(
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(client_options=options)
+ client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -251,7 +236,7 @@ def test_bigtable_instance_admin_client_client_options(
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class()
+ client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -268,7 +253,7 @@ def test_bigtable_instance_admin_client_client_options(
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class()
+ client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -284,20 +269,20 @@ def test_bigtable_instance_admin_client_client_options(
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
- client = client_class()
+ client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
- client = client_class()
+ client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -366,7 +351,7 @@ def test_bigtable_instance_admin_client_mtls_env_auto(
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
@@ -408,7 +393,7 @@ def test_bigtable_instance_admin_client_mtls_env_auto(
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
- client = client_class()
+ client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -430,7 +415,7 @@ def test_bigtable_instance_admin_client_mtls_env_auto(
return_value=False,
):
patched.return_value = None
- client = client_class()
+ client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -443,6 +428,87 @@ def test_bigtable_instance_admin_client_mtls_env_auto(
)
+@pytest.mark.parametrize(
+ "client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient]
+)
+@mock.patch.object(
+ BigtableInstanceAdminClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(BigtableInstanceAdminClient),
+)
+@mock.patch.object(
+ BigtableInstanceAdminAsyncClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(BigtableInstanceAdminAsyncClient),
+)
+def test_bigtable_instance_admin_client_get_mtls_endpoint_and_cert_source(client_class):
+ mock_client_cert_source = mock.Mock()
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source == mock_client_cert_source
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ mock_client_cert_source = mock.Mock()
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_client_cert_source,
+ ):
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source == mock_client_cert_source
+
+
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
@@ -465,7 +531,7 @@ def test_bigtable_instance_admin_client_client_options_scopes(
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -479,28 +545,31 @@ def test_bigtable_instance_admin_client_client_options_scopes(
@pytest.mark.parametrize(
- "client_class,transport_class,transport_name",
+ "client_class,transport_class,transport_name,grpc_helpers",
[
(
BigtableInstanceAdminClient,
transports.BigtableInstanceAdminGrpcTransport,
"grpc",
+ grpc_helpers,
),
(
BigtableInstanceAdminAsyncClient,
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
"grpc_asyncio",
+ grpc_helpers_async,
),
],
)
def test_bigtable_instance_admin_client_client_options_credentials_file(
- client_class, transport_class, transport_name
+ client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
+
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
@@ -533,9 +602,84 @@ def test_bigtable_instance_admin_client_client_options_from_dict():
)
-def test_create_instance(
- transport: str = "grpc", request_type=bigtable_instance_admin.CreateInstanceRequest
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ BigtableInstanceAdminClient,
+ transports.BigtableInstanceAdminGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ BigtableInstanceAdminAsyncClient,
+ transports.BigtableInstanceAdminGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_bigtable_instance_admin_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "bigtableadmin.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=(
+ "https://www.googleapis.com/auth/bigtable.admin",
+ "https://www.googleapis.com/auth/bigtable.admin.cluster",
+ "https://www.googleapis.com/auth/bigtable.admin.instance",
+ "https://www.googleapis.com/auth/cloud-bigtable.admin",
+ "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster",
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ ),
+ scopes=None,
+ default_host="bigtableadmin.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type", [bigtable_instance_admin.CreateInstanceRequest, dict,]
+)
+def test_create_instance(request_type, transport: str = "grpc"):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -559,10 +703,6 @@ def test_create_instance(
assert isinstance(response, future.Future)
-def test_create_instance_from_dict():
- test_create_instance(request_type=dict)
-
-
def test_create_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -690,12 +830,18 @@ def test_create_instance_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].instance_id == "instance_id_value"
- assert args[0].instance == gba_instance.Instance(name="name_value")
- assert args[0].clusters == {
- "key_value": gba_instance.Cluster(name="name_value")
- }
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].instance_id
+ mock_val = "instance_id_value"
+ assert arg == mock_val
+ arg = args[0].instance
+ mock_val = gba_instance.Instance(name="name_value")
+ assert arg == mock_val
+ arg = args[0].clusters
+ mock_val = {"key_value": gba_instance.Cluster(name="name_value")}
+ assert arg == mock_val
def test_create_instance_flattened_error():
@@ -742,12 +888,18 @@ async def test_create_instance_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].instance_id == "instance_id_value"
- assert args[0].instance == gba_instance.Instance(name="name_value")
- assert args[0].clusters == {
- "key_value": gba_instance.Cluster(name="name_value")
- }
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].instance_id
+ mock_val = "instance_id_value"
+ assert arg == mock_val
+ arg = args[0].instance
+ mock_val = gba_instance.Instance(name="name_value")
+ assert arg == mock_val
+ arg = args[0].clusters
+ mock_val = {"key_value": gba_instance.Cluster(name="name_value")}
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -768,9 +920,10 @@ async def test_create_instance_flattened_error_async():
)
-def test_get_instance(
- transport: str = "grpc", request_type=bigtable_instance_admin.GetInstanceRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_instance_admin.GetInstanceRequest, dict,]
+)
+def test_get_instance(request_type, transport: str = "grpc"):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -803,10 +956,6 @@ def test_get_instance(
assert response.type_ == instance.Instance.Type.PRODUCTION
-def test_get_instance_from_dict():
- test_get_instance(request_type=dict)
-
-
def test_get_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -936,7 +1085,9 @@ def test_get_instance_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_get_instance_flattened_error():
@@ -972,7 +1123,9 @@ async def test_get_instance_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -989,9 +1142,10 @@ async def test_get_instance_flattened_error_async():
)
-def test_list_instances(
- transport: str = "grpc", request_type=bigtable_instance_admin.ListInstancesRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_instance_admin.ListInstancesRequest, dict,]
+)
+def test_list_instances(request_type, transport: str = "grpc"):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -1021,10 +1175,6 @@ def test_list_instances(
assert response.next_page_token == "next_page_token_value"
-def test_list_instances_from_dict():
- test_list_instances(request_type=dict)
-
-
def test_list_instances_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -1152,7 +1302,9 @@ def test_list_instances_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
def test_list_instances_flattened_error():
@@ -1190,7 +1342,9 @@ async def test_list_instances_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -1207,7 +1361,8 @@ async def test_list_instances_flattened_error_async():
)
-def test_update_instance(transport: str = "grpc", request_type=instance.Instance):
+@pytest.mark.parametrize("request_type", [instance.Instance, dict,])
+def test_update_instance(request_type, transport: str = "grpc"):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -1240,10 +1395,6 @@ def test_update_instance(transport: str = "grpc", request_type=instance.Instance
assert response.type_ == instance.Instance.Type.PRODUCTION
-def test_update_instance_from_dict():
- test_update_instance(request_type=dict)
-
-
def test_update_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -1355,10 +1506,10 @@ async def test_update_instance_field_headers_async():
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
-def test_partial_update_instance(
- transport: str = "grpc",
- request_type=bigtable_instance_admin.PartialUpdateInstanceRequest,
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_instance_admin.PartialUpdateInstanceRequest, dict,]
+)
+def test_partial_update_instance(request_type, transport: str = "grpc"):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -1384,10 +1535,6 @@ def test_partial_update_instance(
assert isinstance(response, future.Future)
-def test_partial_update_instance_from_dict():
- test_partial_update_instance(request_type=dict)
-
-
def test_partial_update_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -1527,8 +1674,12 @@ def test_partial_update_instance_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].instance == gba_instance.Instance(name="name_value")
- assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
+ arg = args[0].instance
+ mock_val = gba_instance.Instance(name="name_value")
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
def test_partial_update_instance_flattened_error():
@@ -1573,8 +1724,12 @@ async def test_partial_update_instance_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].instance == gba_instance.Instance(name="name_value")
- assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
+ arg = args[0].instance
+ mock_val = gba_instance.Instance(name="name_value")
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -1593,9 +1748,10 @@ async def test_partial_update_instance_flattened_error_async():
)
-def test_delete_instance(
- transport: str = "grpc", request_type=bigtable_instance_admin.DeleteInstanceRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_instance_admin.DeleteInstanceRequest, dict,]
+)
+def test_delete_instance(request_type, transport: str = "grpc"):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -1619,10 +1775,6 @@ def test_delete_instance(
assert response is None
-def test_delete_instance_from_dict():
- test_delete_instance(request_type=dict)
-
-
def test_delete_instance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -1741,7 +1893,9 @@ def test_delete_instance_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_delete_instance_flattened_error():
@@ -1777,7 +1931,9 @@ async def test_delete_instance_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -1794,9 +1950,10 @@ async def test_delete_instance_flattened_error_async():
)
-def test_create_cluster(
- transport: str = "grpc", request_type=bigtable_instance_admin.CreateClusterRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_instance_admin.CreateClusterRequest, dict,]
+)
+def test_create_cluster(request_type, transport: str = "grpc"):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -1820,10 +1977,6 @@ def test_create_cluster(
assert isinstance(response, future.Future)
-def test_create_cluster_from_dict():
- test_create_cluster(request_type=dict)
-
-
def test_create_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -1950,9 +2103,15 @@ def test_create_cluster_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].cluster_id == "cluster_id_value"
- assert args[0].cluster == instance.Cluster(name="name_value")
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].cluster_id
+ mock_val = "cluster_id_value"
+ assert arg == mock_val
+ arg = args[0].cluster
+ mock_val = instance.Cluster(name="name_value")
+ assert arg == mock_val
def test_create_cluster_flattened_error():
@@ -1997,9 +2156,15 @@ async def test_create_cluster_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].cluster_id == "cluster_id_value"
- assert args[0].cluster == instance.Cluster(name="name_value")
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].cluster_id
+ mock_val = "cluster_id_value"
+ assert arg == mock_val
+ arg = args[0].cluster
+ mock_val = instance.Cluster(name="name_value")
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -2019,9 +2184,10 @@ async def test_create_cluster_flattened_error_async():
)
-def test_get_cluster(
- transport: str = "grpc", request_type=bigtable_instance_admin.GetClusterRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_instance_admin.GetClusterRequest, dict,]
+)
+def test_get_cluster(request_type, transport: str = "grpc"):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -2039,6 +2205,11 @@ def test_get_cluster(
state=instance.Cluster.State.READY,
serve_nodes=1181,
default_storage_type=common.StorageType.SSD,
+ cluster_config=instance.Cluster.ClusterConfig(
+ cluster_autoscaling_config=instance.Cluster.ClusterAutoscalingConfig(
+ autoscaling_limits=instance.AutoscalingLimits(min_serve_nodes=1600)
+ )
+ ),
)
response = client.get_cluster(request)
@@ -2056,10 +2227,6 @@ def test_get_cluster(
assert response.default_storage_type == common.StorageType.SSD
-def test_get_cluster_from_dict():
- test_get_cluster(request_type=dict)
-
-
def test_get_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -2191,7 +2358,9 @@ def test_get_cluster_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_get_cluster_flattened_error():
@@ -2227,7 +2396,9 @@ async def test_get_cluster_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -2244,9 +2415,10 @@ async def test_get_cluster_flattened_error_async():
)
-def test_list_clusters(
- transport: str = "grpc", request_type=bigtable_instance_admin.ListClustersRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_instance_admin.ListClustersRequest, dict,]
+)
+def test_list_clusters(request_type, transport: str = "grpc"):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -2276,10 +2448,6 @@ def test_list_clusters(
assert response.next_page_token == "next_page_token_value"
-def test_list_clusters_from_dict():
- test_list_clusters(request_type=dict)
-
-
def test_list_clusters_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -2407,7 +2575,9 @@ def test_list_clusters_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
def test_list_clusters_flattened_error():
@@ -2445,7 +2615,9 @@ async def test_list_clusters_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -2462,7 +2634,8 @@ async def test_list_clusters_flattened_error_async():
)
-def test_update_cluster(transport: str = "grpc", request_type=instance.Cluster):
+@pytest.mark.parametrize("request_type", [instance.Cluster, dict,])
+def test_update_cluster(request_type, transport: str = "grpc"):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -2486,10 +2659,6 @@ def test_update_cluster(transport: str = "grpc", request_type=instance.Cluster):
assert isinstance(response, future.Future)
-def test_update_cluster_from_dict():
- test_update_cluster(request_type=dict)
-
-
def test_update_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -2594,9 +2763,252 @@ async def test_update_cluster_field_headers_async():
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
-def test_delete_cluster(
- transport: str = "grpc", request_type=bigtable_instance_admin.DeleteClusterRequest
+@pytest.mark.parametrize(
+ "request_type", [bigtable_instance_admin.PartialUpdateClusterRequest, dict,]
+)
+def test_partial_update_cluster(request_type, transport: str = "grpc"):
+ client = BigtableInstanceAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.partial_update_cluster), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.partial_update_cluster(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_partial_update_cluster_empty_call():
+ # This test is a coverage failsafe to make sure that totally empty calls,
+ # i.e. request == None and no flattened fields passed, work.
+ client = BigtableInstanceAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.partial_update_cluster), "__call__"
+ ) as call:
+ client.partial_update_cluster()
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest()
+
+
+@pytest.mark.asyncio
+async def test_partial_update_cluster_async(
+ transport: str = "grpc_asyncio",
+ request_type=bigtable_instance_admin.PartialUpdateClusterRequest,
):
+ client = BigtableInstanceAdminAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.partial_update_cluster), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.partial_update_cluster(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_partial_update_cluster_async_from_dict():
+ await test_partial_update_cluster_async(request_type=dict)
+
+
+def test_partial_update_cluster_field_headers():
+ client = BigtableInstanceAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = bigtable_instance_admin.PartialUpdateClusterRequest()
+
+ request.cluster.name = "cluster.name/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.partial_update_cluster), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.partial_update_cluster(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "cluster.name=cluster.name/value",) in kw[
+ "metadata"
+ ]
+
+
+@pytest.mark.asyncio
+async def test_partial_update_cluster_field_headers_async():
+ client = BigtableInstanceAdminAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = bigtable_instance_admin.PartialUpdateClusterRequest()
+
+ request.cluster.name = "cluster.name/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.partial_update_cluster), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.partial_update_cluster(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "cluster.name=cluster.name/value",) in kw[
+ "metadata"
+ ]
+
+
+def test_partial_update_cluster_flattened():
+ client = BigtableInstanceAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.partial_update_cluster), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.partial_update_cluster(
+ cluster=instance.Cluster(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].cluster
+ mock_val = instance.Cluster(name="name_value")
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
+
+
+def test_partial_update_cluster_flattened_error():
+ client = BigtableInstanceAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.partial_update_cluster(
+ bigtable_instance_admin.PartialUpdateClusterRequest(),
+ cluster=instance.Cluster(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+
+@pytest.mark.asyncio
+async def test_partial_update_cluster_flattened_async():
+ client = BigtableInstanceAdminAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.partial_update_cluster), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.partial_update_cluster(
+ cluster=instance.Cluster(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].cluster
+ mock_val = instance.Cluster(name="name_value")
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
+
+
+@pytest.mark.asyncio
+async def test_partial_update_cluster_flattened_error_async():
+ client = BigtableInstanceAdminAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.partial_update_cluster(
+ bigtable_instance_admin.PartialUpdateClusterRequest(),
+ cluster=instance.Cluster(name="name_value"),
+ update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type", [bigtable_instance_admin.DeleteClusterRequest, dict,]
+)
+def test_delete_cluster(request_type, transport: str = "grpc"):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -2620,10 +3032,6 @@ def test_delete_cluster(
assert response is None
-def test_delete_cluster_from_dict():
- test_delete_cluster(request_type=dict)
-
-
def test_delete_cluster_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -2742,7 +3150,9 @@ def test_delete_cluster_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_delete_cluster_flattened_error():
@@ -2778,7 +3188,9 @@ async def test_delete_cluster_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -2795,10 +3207,10 @@ async def test_delete_cluster_flattened_error_async():
)
-def test_create_app_profile(
- transport: str = "grpc",
- request_type=bigtable_instance_admin.CreateAppProfileRequest,
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_instance_admin.CreateAppProfileRequest, dict,]
+)
+def test_create_app_profile(request_type, transport: str = "grpc"):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -2834,10 +3246,6 @@ def test_create_app_profile(
assert response.description == "description_value"
-def test_create_app_profile_from_dict():
- test_create_app_profile(request_type=dict)
-
-
def test_create_app_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -2977,9 +3385,15 @@ def test_create_app_profile_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].app_profile_id == "app_profile_id_value"
- assert args[0].app_profile == instance.AppProfile(name="name_value")
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].app_profile_id
+ mock_val = "app_profile_id_value"
+ assert arg == mock_val
+ arg = args[0].app_profile
+ mock_val = instance.AppProfile(name="name_value")
+ assert arg == mock_val
def test_create_app_profile_flattened_error():
@@ -3024,9 +3438,15 @@ async def test_create_app_profile_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].app_profile_id == "app_profile_id_value"
- assert args[0].app_profile == instance.AppProfile(name="name_value")
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].app_profile_id
+ mock_val = "app_profile_id_value"
+ assert arg == mock_val
+ arg = args[0].app_profile
+ mock_val = instance.AppProfile(name="name_value")
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -3046,9 +3466,10 @@ async def test_create_app_profile_flattened_error_async():
)
-def test_get_app_profile(
- transport: str = "grpc", request_type=bigtable_instance_admin.GetAppProfileRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_instance_admin.GetAppProfileRequest, dict,]
+)
+def test_get_app_profile(request_type, transport: str = "grpc"):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -3082,10 +3503,6 @@ def test_get_app_profile(
assert response.description == "description_value"
-def test_get_app_profile_from_dict():
- test_get_app_profile(request_type=dict)
-
-
def test_get_app_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -3211,7 +3628,9 @@ def test_get_app_profile_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_get_app_profile_flattened_error():
@@ -3247,7 +3666,9 @@ async def test_get_app_profile_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -3264,9 +3685,10 @@ async def test_get_app_profile_flattened_error_async():
)
-def test_list_app_profiles(
- transport: str = "grpc", request_type=bigtable_instance_admin.ListAppProfilesRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_instance_admin.ListAppProfilesRequest, dict,]
+)
+def test_list_app_profiles(request_type, transport: str = "grpc"):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -3297,10 +3719,6 @@ def test_list_app_profiles(
assert response.failed_locations == ["failed_locations_value"]
-def test_list_app_profiles_from_dict():
- test_list_app_profiles(request_type=dict)
-
-
def test_list_app_profiles_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -3438,7 +3856,9 @@ def test_list_app_profiles_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
def test_list_app_profiles_flattened_error():
@@ -3478,7 +3898,9 @@ async def test_list_app_profiles_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -3495,9 +3917,9 @@ async def test_list_app_profiles_flattened_error_async():
)
-def test_list_app_profiles_pager():
+def test_list_app_profiles_pager(transport_name: str = "grpc"):
client = BigtableInstanceAdminClient(
- credentials=ga_credentials.AnonymousCredentials,
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -3539,9 +3961,9 @@ def test_list_app_profiles_pager():
assert all(isinstance(i, instance.AppProfile) for i in results)
-def test_list_app_profiles_pages():
+def test_list_app_profiles_pages(transport_name: str = "grpc"):
client = BigtableInstanceAdminClient(
- credentials=ga_credentials.AnonymousCredentials,
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
@@ -3657,10 +4079,10 @@ async def test_list_app_profiles_async_pages():
assert page_.raw_page.next_page_token == token
-def test_update_app_profile(
- transport: str = "grpc",
- request_type=bigtable_instance_admin.UpdateAppProfileRequest,
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_instance_admin.UpdateAppProfileRequest, dict,]
+)
+def test_update_app_profile(request_type, transport: str = "grpc"):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -3686,10 +4108,6 @@ def test_update_app_profile(
assert isinstance(response, future.Future)
-def test_update_app_profile_from_dict():
- test_update_app_profile(request_type=dict)
-
-
def test_update_app_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -3829,8 +4247,12 @@ def test_update_app_profile_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].app_profile == instance.AppProfile(name="name_value")
- assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
+ arg = args[0].app_profile
+ mock_val = instance.AppProfile(name="name_value")
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
def test_update_app_profile_flattened_error():
@@ -3875,8 +4297,12 @@ async def test_update_app_profile_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].app_profile == instance.AppProfile(name="name_value")
- assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
+ arg = args[0].app_profile
+ mock_val = instance.AppProfile(name="name_value")
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -3895,10 +4321,10 @@ async def test_update_app_profile_flattened_error_async():
)
-def test_delete_app_profile(
- transport: str = "grpc",
- request_type=bigtable_instance_admin.DeleteAppProfileRequest,
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_instance_admin.DeleteAppProfileRequest, dict,]
+)
+def test_delete_app_profile(request_type, transport: str = "grpc"):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -3924,10 +4350,6 @@ def test_delete_app_profile(
assert response is None
-def test_delete_app_profile_from_dict():
- test_delete_app_profile(request_type=dict)
-
-
def test_delete_app_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -4056,7 +4478,9 @@ def test_delete_app_profile_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_delete_app_profile_flattened_error():
@@ -4094,7 +4518,9 @@ async def test_delete_app_profile_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -4111,9 +4537,8 @@ async def test_delete_app_profile_flattened_error_async():
)
-def test_get_iam_policy(
- transport: str = "grpc", request_type=iam_policy_pb2.GetIamPolicyRequest
-):
+@pytest.mark.parametrize("request_type", [iam_policy_pb2.GetIamPolicyRequest, dict,])
+def test_get_iam_policy(request_type, transport: str = "grpc"):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -4139,10 +4564,6 @@ def test_get_iam_policy(
assert response.etag == b"etag_blob"
-def test_get_iam_policy_from_dict():
- test_get_iam_policy(request_type=dict)
-
-
def test_get_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -4281,7 +4702,9 @@ def test_get_iam_policy_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].resource == "resource_value"
+ arg = args[0].resource
+ mock_val = "resource_value"
+ assert arg == mock_val
def test_get_iam_policy_flattened_error():
@@ -4317,7 +4740,9 @@ async def test_get_iam_policy_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].resource == "resource_value"
+ arg = args[0].resource
+ mock_val = "resource_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -4334,9 +4759,8 @@ async def test_get_iam_policy_flattened_error_async():
)
-def test_set_iam_policy(
- transport: str = "grpc", request_type=iam_policy_pb2.SetIamPolicyRequest
-):
+@pytest.mark.parametrize("request_type", [iam_policy_pb2.SetIamPolicyRequest, dict,])
+def test_set_iam_policy(request_type, transport: str = "grpc"):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -4362,10 +4786,6 @@ def test_set_iam_policy(
assert response.etag == b"etag_blob"
-def test_set_iam_policy_from_dict():
- test_set_iam_policy(request_type=dict)
-
-
def test_set_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -4504,7 +4924,9 @@ def test_set_iam_policy_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].resource == "resource_value"
+ arg = args[0].resource
+ mock_val = "resource_value"
+ assert arg == mock_val
def test_set_iam_policy_flattened_error():
@@ -4540,7 +4962,9 @@ async def test_set_iam_policy_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].resource == "resource_value"
+ arg = args[0].resource
+ mock_val = "resource_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -4557,9 +4981,10 @@ async def test_set_iam_policy_flattened_error_async():
)
-def test_test_iam_permissions(
- transport: str = "grpc", request_type=iam_policy_pb2.TestIamPermissionsRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [iam_policy_pb2.TestIamPermissionsRequest, dict,]
+)
+def test_test_iam_permissions(request_type, transport: str = "grpc"):
client = BigtableInstanceAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -4588,10 +5013,6 @@ def test_test_iam_permissions(
assert response.permissions == ["permissions_value"]
-def test_test_iam_permissions_from_dict():
- test_test_iam_permissions(request_type=dict)
-
-
def test_test_iam_permissions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -4748,8 +5169,12 @@ def test_test_iam_permissions_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].resource == "resource_value"
- assert args[0].permissions == ["permissions_value"]
+ arg = args[0].resource
+ mock_val = "resource_value"
+ assert arg == mock_val
+ arg = args[0].permissions
+ mock_val = ["permissions_value"]
+ assert arg == mock_val
def test_test_iam_permissions_flattened_error():
@@ -4793,8 +5218,12 @@ async def test_test_iam_permissions_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].resource == "resource_value"
- assert args[0].permissions == ["permissions_value"]
+ arg = args[0].resource
+ mock_val = "resource_value"
+ assert arg == mock_val
+ arg = args[0].permissions
+ mock_val = ["permissions_value"]
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -4833,6 +5262,25 @@ def test_credentials_transport_error():
transport=transport,
)
+ # It is an error to provide an api_key and a transport instance.
+ transport = transports.BigtableInstanceAdminGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = BigtableInstanceAdminClient(
+ client_options=options, transport=transport,
+ )
+
+ # It is an error to provide an api_key and a credential.
+ options = mock.Mock()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = BigtableInstanceAdminClient(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+
# It is an error to provide scopes and a transport instance.
transport = transports.BigtableInstanceAdminGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
@@ -4922,6 +5370,7 @@ def test_bigtable_instance_admin_base_transport():
"get_cluster",
"list_clusters",
"update_cluster",
+ "partial_update_cluster",
"delete_cluster",
"create_app_profile",
"get_app_profile",
@@ -4936,13 +5385,15 @@ def test_bigtable_instance_admin_base_transport():
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
+ with pytest.raises(NotImplementedError):
+ transport.close()
+
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
-@requires_google_auth_gte_1_25_0
def test_bigtable_instance_admin_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
@@ -4971,34 +5422,6 @@ def test_bigtable_instance_admin_base_transport_with_credentials_file():
)
-@requires_google_auth_lt_1_25_0
-def test_bigtable_instance_admin_base_transport_with_credentials_file_old_google_auth():
- # Instantiate the base transport with a credentials file
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch(
- "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages"
- ) as Transport:
- Transport.return_value = None
- load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
- transport = transports.BigtableInstanceAdminTransport(
- credentials_file="credentials.json", quota_project_id="octopus",
- )
- load_creds.assert_called_once_with(
- "credentials.json",
- scopes=(
- "https://www.googleapis.com/auth/bigtable.admin",
- "https://www.googleapis.com/auth/bigtable.admin.cluster",
- "https://www.googleapis.com/auth/bigtable.admin.instance",
- "https://www.googleapis.com/auth/cloud-bigtable.admin",
- "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster",
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- ),
- quota_project_id="octopus",
- )
-
-
def test_bigtable_instance_admin_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
@@ -5010,7 +5433,6 @@ def test_bigtable_instance_admin_base_transport_with_adc():
adc.assert_called_once()
-@requires_google_auth_gte_1_25_0
def test_bigtable_instance_admin_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
@@ -5031,26 +5453,6 @@ def test_bigtable_instance_admin_auth_adc():
)
-@requires_google_auth_lt_1_25_0
-def test_bigtable_instance_admin_auth_adc_old_google_auth():
- # If no credentials are provided, we should use ADC credentials.
- with mock.patch.object(google.auth, "default", autospec=True) as adc:
- adc.return_value = (ga_credentials.AnonymousCredentials(), None)
- BigtableInstanceAdminClient()
- adc.assert_called_once_with(
- scopes=(
- "https://www.googleapis.com/auth/bigtable.admin",
- "https://www.googleapis.com/auth/bigtable.admin.cluster",
- "https://www.googleapis.com/auth/bigtable.admin.instance",
- "https://www.googleapis.com/auth/cloud-bigtable.admin",
- "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster",
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- ),
- quota_project_id=None,
- )
-
-
@pytest.mark.parametrize(
"transport_class",
[
@@ -5058,7 +5460,6 @@ def test_bigtable_instance_admin_auth_adc_old_google_auth():
transports.BigtableInstanceAdminGrpcAsyncIOTransport,
],
)
-@requires_google_auth_gte_1_25_0
def test_bigtable_instance_admin_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
@@ -5080,34 +5481,6 @@ def test_bigtable_instance_admin_transport_auth_adc(transport_class):
)
-@pytest.mark.parametrize(
- "transport_class",
- [
- transports.BigtableInstanceAdminGrpcTransport,
- transports.BigtableInstanceAdminGrpcAsyncIOTransport,
- ],
-)
-@requires_google_auth_lt_1_25_0
-def test_bigtable_instance_admin_transport_auth_adc_old_google_auth(transport_class):
- # If credentials and host are not provided, the transport class should use
- # ADC credentials.
- with mock.patch.object(google.auth, "default", autospec=True) as adc:
- adc.return_value = (ga_credentials.AnonymousCredentials(), None)
- transport_class(quota_project_id="octopus")
- adc.assert_called_once_with(
- scopes=(
- "https://www.googleapis.com/auth/bigtable.admin",
- "https://www.googleapis.com/auth/bigtable.admin.cluster",
- "https://www.googleapis.com/auth/bigtable.admin.instance",
- "https://www.googleapis.com/auth/cloud-bigtable.admin",
- "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster",
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- ),
- quota_project_id="octopus",
- )
-
-
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
@@ -5566,7 +5939,7 @@ def test_parse_common_location_path():
assert expected == actual
-def test_client_withDEFAULT_CLIENT_INFO():
+def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
@@ -5585,3 +5958,82 @@ def test_client_withDEFAULT_CLIENT_INFO():
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
+
+
+@pytest.mark.asyncio
+async def test_transport_close_async():
+ client = BigtableInstanceAdminAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "grpc_channel")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_transport_close():
+ transports = {
+ "grpc": "_grpc_channel",
+ }
+
+ for transport, close_name in transports.items():
+ client = BigtableInstanceAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, close_name)), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_client_ctx():
+ transports = [
+ "grpc",
+ ]
+ for transport in transports:
+ client = BigtableInstanceAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ # Test client calls underlying transport.
+ with mock.patch.object(type(client.transport), "close") as close:
+ close.assert_not_called()
+ with client:
+ pass
+ close.assert_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class",
+ [
+ (BigtableInstanceAdminClient, transports.BigtableInstanceAdminGrpcTransport),
+ (
+ BigtableInstanceAdminAsyncClient,
+ transports.BigtableInstanceAdminGrpcAsyncIOTransport,
+ ),
+ ],
+)
+def test_api_key_credentials(client_class, transport_class):
+ with mock.patch.object(
+ google.auth._default, "get_api_key_credentials", create=True
+ ) as get_api_key_credentials:
+ mock_cred = mock.Mock()
+ get_api_key_credentials.return_value = mock_cred
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=mock_cred,
+ credentials_file=None,
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py
index 6bfe7d012..49d2c9ddf 100644
--- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py
+++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py
@@ -15,7 +15,6 @@
#
import os
import mock
-import packaging.version
import grpc
from grpc.experimental import aio
@@ -30,8 +29,10 @@
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
+from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
+from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
@@ -42,9 +43,6 @@
)
from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers
from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import transports
-from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.base import (
- _GOOGLE_AUTH_VERSION,
-)
from google.cloud.bigtable_admin_v2.types import bigtable_table_admin
from google.cloud.bigtable_admin_v2.types import table
from google.cloud.bigtable_admin_v2.types import table as gba_table
@@ -62,20 +60,6 @@
import google.auth
-# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
-# through google-api-core:
-# - Delete the auth "less than" test cases
-# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
-requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
- packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
- reason="This test requires google-auth < 1.25.0",
-)
-requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
- packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
- reason="This test requires google-auth >= 1.25.0",
-)
-
-
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
@@ -233,7 +217,7 @@ def test_bigtable_table_admin_client_client_options(
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(client_options=options)
+ client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -250,7 +234,7 @@ def test_bigtable_table_admin_client_client_options(
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class()
+ client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -267,7 +251,7 @@ def test_bigtable_table_admin_client_client_options(
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class()
+ client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -283,20 +267,20 @@ def test_bigtable_table_admin_client_client_options(
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
- client = client_class()
+ client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
- client = client_class()
+ client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -365,7 +349,7 @@ def test_bigtable_table_admin_client_mtls_env_auto(
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
@@ -407,7 +391,7 @@ def test_bigtable_table_admin_client_mtls_env_auto(
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
- client = client_class()
+ client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -429,7 +413,7 @@ def test_bigtable_table_admin_client_mtls_env_auto(
return_value=False,
):
patched.return_value = None
- client = client_class()
+ client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -442,6 +426,87 @@ def test_bigtable_table_admin_client_mtls_env_auto(
)
+@pytest.mark.parametrize(
+ "client_class", [BigtableTableAdminClient, BigtableTableAdminAsyncClient]
+)
+@mock.patch.object(
+ BigtableTableAdminClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(BigtableTableAdminClient),
+)
+@mock.patch.object(
+ BigtableTableAdminAsyncClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(BigtableTableAdminAsyncClient),
+)
+def test_bigtable_table_admin_client_get_mtls_endpoint_and_cert_source(client_class):
+ mock_client_cert_source = mock.Mock()
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source == mock_client_cert_source
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ mock_client_cert_source = mock.Mock()
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_client_cert_source,
+ ):
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source == mock_client_cert_source
+
+
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
@@ -460,7 +525,7 @@ def test_bigtable_table_admin_client_client_options_scopes(
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -474,24 +539,31 @@ def test_bigtable_table_admin_client_client_options_scopes(
@pytest.mark.parametrize(
- "client_class,transport_class,transport_name",
+ "client_class,transport_class,transport_name,grpc_helpers",
[
- (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"),
+ (
+ BigtableTableAdminClient,
+ transports.BigtableTableAdminGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
(
BigtableTableAdminAsyncClient,
transports.BigtableTableAdminGrpcAsyncIOTransport,
"grpc_asyncio",
+ grpc_helpers_async,
),
],
)
def test_bigtable_table_admin_client_client_options_credentials_file(
- client_class, transport_class, transport_name
+ client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
+
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
@@ -524,9 +596,83 @@ def test_bigtable_table_admin_client_client_options_from_dict():
)
-def test_create_table(
- transport: str = "grpc", request_type=bigtable_table_admin.CreateTableRequest
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ BigtableTableAdminClient,
+ transports.BigtableTableAdminGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ BigtableTableAdminAsyncClient,
+ transports.BigtableTableAdminGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_bigtable_table_admin_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "bigtableadmin.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=(
+ "https://www.googleapis.com/auth/bigtable.admin",
+ "https://www.googleapis.com/auth/bigtable.admin.table",
+ "https://www.googleapis.com/auth/cloud-bigtable.admin",
+ "https://www.googleapis.com/auth/cloud-bigtable.admin.table",
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ ),
+ scopes=None,
+ default_host="bigtableadmin.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "request_type", [bigtable_table_admin.CreateTableRequest, dict,]
+)
+def test_create_table(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -554,10 +700,6 @@ def test_create_table(
assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS
-def test_create_table_from_dict():
- test_create_table(request_type=dict)
-
-
def test_create_table_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -687,9 +829,15 @@ def test_create_table_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].table_id == "table_id_value"
- assert args[0].table == gba_table.Table(name="name_value")
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].table_id
+ mock_val = "table_id_value"
+ assert arg == mock_val
+ arg = args[0].table
+ mock_val = gba_table.Table(name="name_value")
+ assert arg == mock_val
def test_create_table_flattened_error():
@@ -732,9 +880,15 @@ async def test_create_table_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].table_id == "table_id_value"
- assert args[0].table == gba_table.Table(name="name_value")
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].table_id
+ mock_val = "table_id_value"
+ assert arg == mock_val
+ arg = args[0].table
+ mock_val = gba_table.Table(name="name_value")
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -754,10 +908,10 @@ async def test_create_table_flattened_error_async():
)
-def test_create_table_from_snapshot(
- transport: str = "grpc",
- request_type=bigtable_table_admin.CreateTableFromSnapshotRequest,
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_table_admin.CreateTableFromSnapshotRequest, dict,]
+)
+def test_create_table_from_snapshot(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -783,10 +937,6 @@ def test_create_table_from_snapshot(
assert isinstance(response, future.Future)
-def test_create_table_from_snapshot_from_dict():
- test_create_table_from_snapshot(request_type=dict)
-
-
def test_create_table_from_snapshot_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -923,9 +1073,15 @@ def test_create_table_from_snapshot_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].table_id == "table_id_value"
- assert args[0].source_snapshot == "source_snapshot_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].table_id
+ mock_val = "table_id_value"
+ assert arg == mock_val
+ arg = args[0].source_snapshot
+ mock_val = "source_snapshot_value"
+ assert arg == mock_val
def test_create_table_from_snapshot_flattened_error():
@@ -972,9 +1128,15 @@ async def test_create_table_from_snapshot_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].table_id == "table_id_value"
- assert args[0].source_snapshot == "source_snapshot_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].table_id
+ mock_val = "table_id_value"
+ assert arg == mock_val
+ arg = args[0].source_snapshot
+ mock_val = "source_snapshot_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -994,9 +1156,10 @@ async def test_create_table_from_snapshot_flattened_error_async():
)
-def test_list_tables(
- transport: str = "grpc", request_type=bigtable_table_admin.ListTablesRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_table_admin.ListTablesRequest, dict,]
+)
+def test_list_tables(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -1023,10 +1186,6 @@ def test_list_tables(
assert response.next_page_token == "next_page_token_value"
-def test_list_tables_from_dict():
- test_list_tables(request_type=dict)
-
-
def test_list_tables_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -1151,7 +1310,9 @@ def test_list_tables_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
def test_list_tables_flattened_error():
@@ -1189,7 +1350,9 @@ async def test_list_tables_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -1206,8 +1369,10 @@ async def test_list_tables_flattened_error_async():
)
-def test_list_tables_pager():
- client = BigtableTableAdminClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_tables_pager(transport_name: str = "grpc"):
+ client = BigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tables), "__call__") as call:
@@ -1240,8 +1405,10 @@ def test_list_tables_pager():
assert all(isinstance(i, table.Table) for i in results)
-def test_list_tables_pages():
- client = BigtableTableAdminClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_tables_pages(transport_name: str = "grpc"):
+ client = BigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tables), "__call__") as call:
@@ -1332,9 +1499,8 @@ async def test_list_tables_async_pages():
assert page_.raw_page.next_page_token == token
-def test_get_table(
- transport: str = "grpc", request_type=bigtable_table_admin.GetTableRequest
-):
+@pytest.mark.parametrize("request_type", [bigtable_table_admin.GetTableRequest, dict,])
+def test_get_table(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -1362,10 +1528,6 @@ def test_get_table(
assert response.granularity == table.Table.TimestampGranularity.MILLIS
-def test_get_table_from_dict():
- test_get_table(request_type=dict)
-
-
def test_get_table_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -1489,7 +1651,9 @@ def test_get_table_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_get_table_flattened_error():
@@ -1525,7 +1689,9 @@ async def test_get_table_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -1542,9 +1708,10 @@ async def test_get_table_flattened_error_async():
)
-def test_delete_table(
- transport: str = "grpc", request_type=bigtable_table_admin.DeleteTableRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_table_admin.DeleteTableRequest, dict,]
+)
+def test_delete_table(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -1568,10 +1735,6 @@ def test_delete_table(
assert response is None
-def test_delete_table_from_dict():
- test_delete_table(request_type=dict)
-
-
def test_delete_table_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -1690,7 +1853,9 @@ def test_delete_table_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_delete_table_flattened_error():
@@ -1726,7 +1891,9 @@ async def test_delete_table_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -1743,10 +1910,10 @@ async def test_delete_table_flattened_error_async():
)
-def test_modify_column_families(
- transport: str = "grpc",
- request_type=bigtable_table_admin.ModifyColumnFamiliesRequest,
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_table_admin.ModifyColumnFamiliesRequest, dict,]
+)
+def test_modify_column_families(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -1776,10 +1943,6 @@ def test_modify_column_families(
assert response.granularity == table.Table.TimestampGranularity.MILLIS
-def test_modify_column_families_from_dict():
- test_modify_column_families(request_type=dict)
-
-
def test_modify_column_families_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -1921,10 +2084,14 @@ def test_modify_column_families_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].modifications == [
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].modifications
+ mock_val = [
bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id="id_value")
]
+ assert arg == mock_val
def test_modify_column_families_flattened_error():
@@ -1975,10 +2142,14 @@ async def test_modify_column_families_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].modifications == [
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].modifications
+ mock_val = [
bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id="id_value")
]
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -2001,9 +2172,10 @@ async def test_modify_column_families_flattened_error_async():
)
-def test_drop_row_range(
- transport: str = "grpc", request_type=bigtable_table_admin.DropRowRangeRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_table_admin.DropRowRangeRequest, dict,]
+)
+def test_drop_row_range(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -2027,10 +2199,6 @@ def test_drop_row_range(
assert response is None
-def test_drop_row_range_from_dict():
- test_drop_row_range(request_type=dict)
-
-
def test_drop_row_range_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -2132,10 +2300,10 @@ async def test_drop_row_range_field_headers_async():
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
-def test_generate_consistency_token(
- transport: str = "grpc",
- request_type=bigtable_table_admin.GenerateConsistencyTokenRequest,
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_table_admin.GenerateConsistencyTokenRequest, dict,]
+)
+def test_generate_consistency_token(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -2164,10 +2332,6 @@ def test_generate_consistency_token(
assert response.consistency_token == "consistency_token_value"
-def test_generate_consistency_token_from_dict():
- test_generate_consistency_token(request_type=dict)
-
-
def test_generate_consistency_token_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -2303,7 +2467,9 @@ def test_generate_consistency_token_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_generate_consistency_token_flattened_error():
@@ -2343,7 +2509,9 @@ async def test_generate_consistency_token_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -2360,9 +2528,10 @@ async def test_generate_consistency_token_flattened_error_async():
)
-def test_check_consistency(
- transport: str = "grpc", request_type=bigtable_table_admin.CheckConsistencyRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_table_admin.CheckConsistencyRequest, dict,]
+)
+def test_check_consistency(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -2391,10 +2560,6 @@ def test_check_consistency(
assert response.consistent is True
-def test_check_consistency_from_dict():
- test_check_consistency(request_type=dict)
-
-
def test_check_consistency_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -2530,8 +2695,12 @@ def test_check_consistency_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].consistency_token == "consistency_token_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].consistency_token
+ mock_val = "consistency_token_value"
+ assert arg == mock_val
def test_check_consistency_flattened_error():
@@ -2575,8 +2744,12 @@ async def test_check_consistency_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].consistency_token == "consistency_token_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].consistency_token
+ mock_val = "consistency_token_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -2595,9 +2768,10 @@ async def test_check_consistency_flattened_error_async():
)
-def test_snapshot_table(
- transport: str = "grpc", request_type=bigtable_table_admin.SnapshotTableRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_table_admin.SnapshotTableRequest, dict,]
+)
+def test_snapshot_table(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -2621,10 +2795,6 @@ def test_snapshot_table(
assert isinstance(response, future.Future)
-def test_snapshot_table_from_dict():
- test_snapshot_table(request_type=dict)
-
-
def test_snapshot_table_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -2752,10 +2922,18 @@ def test_snapshot_table_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].cluster == "cluster_value"
- assert args[0].snapshot_id == "snapshot_id_value"
- assert args[0].description == "description_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].cluster
+ mock_val = "cluster_value"
+ assert arg == mock_val
+ arg = args[0].snapshot_id
+ mock_val = "snapshot_id_value"
+ assert arg == mock_val
+ arg = args[0].description
+ mock_val = "description_value"
+ assert arg == mock_val
def test_snapshot_table_flattened_error():
@@ -2802,10 +2980,18 @@ async def test_snapshot_table_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].cluster == "cluster_value"
- assert args[0].snapshot_id == "snapshot_id_value"
- assert args[0].description == "description_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
+ arg = args[0].cluster
+ mock_val = "cluster_value"
+ assert arg == mock_val
+ arg = args[0].snapshot_id
+ mock_val = "snapshot_id_value"
+ assert arg == mock_val
+ arg = args[0].description
+ mock_val = "description_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -2826,9 +3012,10 @@ async def test_snapshot_table_flattened_error_async():
)
-def test_get_snapshot(
- transport: str = "grpc", request_type=bigtable_table_admin.GetSnapshotRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_table_admin.GetSnapshotRequest, dict,]
+)
+def test_get_snapshot(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -2861,10 +3048,6 @@ def test_get_snapshot(
assert response.description == "description_value"
-def test_get_snapshot_from_dict():
- test_get_snapshot(request_type=dict)
-
-
def test_get_snapshot_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -2994,7 +3177,9 @@ def test_get_snapshot_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_get_snapshot_flattened_error():
@@ -3030,7 +3215,9 @@ async def test_get_snapshot_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -3047,9 +3234,10 @@ async def test_get_snapshot_flattened_error_async():
)
-def test_list_snapshots(
- transport: str = "grpc", request_type=bigtable_table_admin.ListSnapshotsRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_table_admin.ListSnapshotsRequest, dict,]
+)
+def test_list_snapshots(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -3076,10 +3264,6 @@ def test_list_snapshots(
assert response.next_page_token == "next_page_token_value"
-def test_list_snapshots_from_dict():
- test_list_snapshots(request_type=dict)
-
-
def test_list_snapshots_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -3205,7 +3389,9 @@ def test_list_snapshots_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
def test_list_snapshots_flattened_error():
@@ -3243,7 +3429,9 @@ async def test_list_snapshots_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -3260,8 +3448,10 @@ async def test_list_snapshots_flattened_error_async():
)
-def test_list_snapshots_pager():
- client = BigtableTableAdminClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_snapshots_pager(transport_name: str = "grpc"):
+ client = BigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call:
@@ -3296,8 +3486,10 @@ def test_list_snapshots_pager():
assert all(isinstance(i, table.Snapshot) for i in results)
-def test_list_snapshots_pages():
- client = BigtableTableAdminClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_snapshots_pages(transport_name: str = "grpc"):
+ client = BigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call:
@@ -3394,9 +3586,10 @@ async def test_list_snapshots_async_pages():
assert page_.raw_page.next_page_token == token
-def test_delete_snapshot(
- transport: str = "grpc", request_type=bigtable_table_admin.DeleteSnapshotRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_table_admin.DeleteSnapshotRequest, dict,]
+)
+def test_delete_snapshot(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -3420,10 +3613,6 @@ def test_delete_snapshot(
assert response is None
-def test_delete_snapshot_from_dict():
- test_delete_snapshot(request_type=dict)
-
-
def test_delete_snapshot_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -3542,7 +3731,9 @@ def test_delete_snapshot_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_delete_snapshot_flattened_error():
@@ -3578,7 +3769,9 @@ async def test_delete_snapshot_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -3595,9 +3788,10 @@ async def test_delete_snapshot_flattened_error_async():
)
-def test_create_backup(
- transport: str = "grpc", request_type=bigtable_table_admin.CreateBackupRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_table_admin.CreateBackupRequest, dict,]
+)
+def test_create_backup(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -3621,10 +3815,6 @@ def test_create_backup(
assert isinstance(response, future.Future)
-def test_create_backup_from_dict():
- test_create_backup(request_type=dict)
-
-
def test_create_backup_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -3751,9 +3941,15 @@ def test_create_backup_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].backup_id == "backup_id_value"
- assert args[0].backup == table.Backup(name="name_value")
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].backup_id
+ mock_val = "backup_id_value"
+ assert arg == mock_val
+ arg = args[0].backup
+ mock_val = table.Backup(name="name_value")
+ assert arg == mock_val
def test_create_backup_flattened_error():
@@ -3798,9 +3994,15 @@ async def test_create_backup_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].backup_id == "backup_id_value"
- assert args[0].backup == table.Backup(name="name_value")
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].backup_id
+ mock_val = "backup_id_value"
+ assert arg == mock_val
+ arg = args[0].backup
+ mock_val = table.Backup(name="name_value")
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -3820,9 +4022,8 @@ async def test_create_backup_flattened_error_async():
)
-def test_get_backup(
- transport: str = "grpc", request_type=bigtable_table_admin.GetBackupRequest
-):
+@pytest.mark.parametrize("request_type", [bigtable_table_admin.GetBackupRequest, dict,])
+def test_get_backup(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -3855,10 +4056,6 @@ def test_get_backup(
assert response.state == table.Backup.State.CREATING
-def test_get_backup_from_dict():
- test_get_backup(request_type=dict)
-
-
def test_get_backup_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -3987,7 +4184,9 @@ def test_get_backup_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_get_backup_flattened_error():
@@ -4023,7 +4222,9 @@ async def test_get_backup_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -4040,9 +4241,10 @@ async def test_get_backup_flattened_error_async():
)
-def test_update_backup(
- transport: str = "grpc", request_type=bigtable_table_admin.UpdateBackupRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_table_admin.UpdateBackupRequest, dict,]
+)
+def test_update_backup(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -4075,10 +4277,6 @@ def test_update_backup(
assert response.state == table.Backup.State.CREATING
-def test_update_backup_from_dict():
- test_update_backup(request_type=dict)
-
-
def test_update_backup_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -4211,8 +4409,12 @@ def test_update_backup_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].backup == table.Backup(name="name_value")
- assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
+ arg = args[0].backup
+ mock_val = table.Backup(name="name_value")
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
def test_update_backup_flattened_error():
@@ -4253,8 +4455,12 @@ async def test_update_backup_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].backup == table.Backup(name="name_value")
- assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
+ arg = args[0].backup
+ mock_val = table.Backup(name="name_value")
+ assert arg == mock_val
+ arg = args[0].update_mask
+ mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -4273,9 +4479,10 @@ async def test_update_backup_flattened_error_async():
)
-def test_delete_backup(
- transport: str = "grpc", request_type=bigtable_table_admin.DeleteBackupRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_table_admin.DeleteBackupRequest, dict,]
+)
+def test_delete_backup(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -4299,10 +4506,6 @@ def test_delete_backup(
assert response is None
-def test_delete_backup_from_dict():
- test_delete_backup(request_type=dict)
-
-
def test_delete_backup_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -4421,7 +4624,9 @@ def test_delete_backup_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
def test_delete_backup_flattened_error():
@@ -4457,7 +4662,9 @@ async def test_delete_backup_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
+ arg = args[0].name
+ mock_val = "name_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -4474,9 +4681,10 @@ async def test_delete_backup_flattened_error_async():
)
-def test_list_backups(
- transport: str = "grpc", request_type=bigtable_table_admin.ListBackupsRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_table_admin.ListBackupsRequest, dict,]
+)
+def test_list_backups(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -4503,10 +4711,6 @@ def test_list_backups(
assert response.next_page_token == "next_page_token_value"
-def test_list_backups_from_dict():
- test_list_backups(request_type=dict)
-
-
def test_list_backups_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -4632,7 +4836,9 @@ def test_list_backups_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
def test_list_backups_flattened_error():
@@ -4670,7 +4876,9 @@ async def test_list_backups_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -4687,8 +4895,10 @@ async def test_list_backups_flattened_error_async():
)
-def test_list_backups_pager():
- client = BigtableTableAdminClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_backups_pager(transport_name: str = "grpc"):
+ client = BigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_backups), "__call__") as call:
@@ -4723,8 +4933,10 @@ def test_list_backups_pager():
assert all(isinstance(i, table.Backup) for i in results)
-def test_list_backups_pages():
- client = BigtableTableAdminClient(credentials=ga_credentials.AnonymousCredentials,)
+def test_list_backups_pages(transport_name: str = "grpc"):
+ client = BigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
+ )
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_backups), "__call__") as call:
@@ -4821,9 +5033,10 @@ async def test_list_backups_async_pages():
assert page_.raw_page.next_page_token == token
-def test_restore_table(
- transport: str = "grpc", request_type=bigtable_table_admin.RestoreTableRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [bigtable_table_admin.RestoreTableRequest, dict,]
+)
+def test_restore_table(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -4847,10 +5060,6 @@ def test_restore_table(
assert isinstance(response, future.Future)
-def test_restore_table_from_dict():
- test_restore_table(request_type=dict)
-
-
def test_restore_table_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -4956,9 +5165,8 @@ async def test_restore_table_field_headers_async():
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
-def test_get_iam_policy(
- transport: str = "grpc", request_type=iam_policy_pb2.GetIamPolicyRequest
-):
+@pytest.mark.parametrize("request_type", [iam_policy_pb2.GetIamPolicyRequest, dict,])
+def test_get_iam_policy(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -4984,10 +5192,6 @@ def test_get_iam_policy(
assert response.etag == b"etag_blob"
-def test_get_iam_policy_from_dict():
- test_get_iam_policy(request_type=dict)
-
-
def test_get_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -5126,7 +5330,9 @@ def test_get_iam_policy_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].resource == "resource_value"
+ arg = args[0].resource
+ mock_val = "resource_value"
+ assert arg == mock_val
def test_get_iam_policy_flattened_error():
@@ -5162,7 +5368,9 @@ async def test_get_iam_policy_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].resource == "resource_value"
+ arg = args[0].resource
+ mock_val = "resource_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -5179,9 +5387,8 @@ async def test_get_iam_policy_flattened_error_async():
)
-def test_set_iam_policy(
- transport: str = "grpc", request_type=iam_policy_pb2.SetIamPolicyRequest
-):
+@pytest.mark.parametrize("request_type", [iam_policy_pb2.SetIamPolicyRequest, dict,])
+def test_set_iam_policy(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -5207,10 +5414,6 @@ def test_set_iam_policy(
assert response.etag == b"etag_blob"
-def test_set_iam_policy_from_dict():
- test_set_iam_policy(request_type=dict)
-
-
def test_set_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -5349,7 +5552,9 @@ def test_set_iam_policy_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].resource == "resource_value"
+ arg = args[0].resource
+ mock_val = "resource_value"
+ assert arg == mock_val
def test_set_iam_policy_flattened_error():
@@ -5385,7 +5590,9 @@ async def test_set_iam_policy_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].resource == "resource_value"
+ arg = args[0].resource
+ mock_val = "resource_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -5402,9 +5609,10 @@ async def test_set_iam_policy_flattened_error_async():
)
-def test_test_iam_permissions(
- transport: str = "grpc", request_type=iam_policy_pb2.TestIamPermissionsRequest
-):
+@pytest.mark.parametrize(
+ "request_type", [iam_policy_pb2.TestIamPermissionsRequest, dict,]
+)
+def test_test_iam_permissions(request_type, transport: str = "grpc"):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -5433,10 +5641,6 @@ def test_test_iam_permissions(
assert response.permissions == ["permissions_value"]
-def test_test_iam_permissions_from_dict():
- test_test_iam_permissions(request_type=dict)
-
-
def test_test_iam_permissions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -5593,8 +5797,12 @@ def test_test_iam_permissions_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].resource == "resource_value"
- assert args[0].permissions == ["permissions_value"]
+ arg = args[0].resource
+ mock_val = "resource_value"
+ assert arg == mock_val
+ arg = args[0].permissions
+ mock_val = ["permissions_value"]
+ assert arg == mock_val
def test_test_iam_permissions_flattened_error():
@@ -5638,8 +5846,12 @@ async def test_test_iam_permissions_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].resource == "resource_value"
- assert args[0].permissions == ["permissions_value"]
+ arg = args[0].resource
+ mock_val = "resource_value"
+ assert arg == mock_val
+ arg = args[0].permissions
+ mock_val = ["permissions_value"]
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -5678,6 +5890,23 @@ def test_credentials_transport_error():
transport=transport,
)
+ # It is an error to provide an api_key and a transport instance.
+ transport = transports.BigtableTableAdminGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = BigtableTableAdminClient(client_options=options, transport=transport,)
+
+ # It is an error to provide an api_key and a credential.
+ options = mock.Mock()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = BigtableTableAdminClient(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+
# It is an error to provide scopes and a transport instance.
transport = transports.BigtableTableAdminGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
@@ -5784,13 +6013,15 @@ def test_bigtable_table_admin_base_transport():
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
+ with pytest.raises(NotImplementedError):
+ transport.close()
+
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
-@requires_google_auth_gte_1_25_0
def test_bigtable_table_admin_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
@@ -5818,33 +6049,6 @@ def test_bigtable_table_admin_base_transport_with_credentials_file():
)
-@requires_google_auth_lt_1_25_0
-def test_bigtable_table_admin_base_transport_with_credentials_file_old_google_auth():
- # Instantiate the base transport with a credentials file
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch(
- "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport._prep_wrapped_messages"
- ) as Transport:
- Transport.return_value = None
- load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
- transport = transports.BigtableTableAdminTransport(
- credentials_file="credentials.json", quota_project_id="octopus",
- )
- load_creds.assert_called_once_with(
- "credentials.json",
- scopes=(
- "https://www.googleapis.com/auth/bigtable.admin",
- "https://www.googleapis.com/auth/bigtable.admin.table",
- "https://www.googleapis.com/auth/cloud-bigtable.admin",
- "https://www.googleapis.com/auth/cloud-bigtable.admin.table",
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- ),
- quota_project_id="octopus",
- )
-
-
def test_bigtable_table_admin_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
@@ -5856,7 +6060,6 @@ def test_bigtable_table_admin_base_transport_with_adc():
adc.assert_called_once()
-@requires_google_auth_gte_1_25_0
def test_bigtable_table_admin_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
@@ -5876,25 +6079,6 @@ def test_bigtable_table_admin_auth_adc():
)
-@requires_google_auth_lt_1_25_0
-def test_bigtable_table_admin_auth_adc_old_google_auth():
- # If no credentials are provided, we should use ADC credentials.
- with mock.patch.object(google.auth, "default", autospec=True) as adc:
- adc.return_value = (ga_credentials.AnonymousCredentials(), None)
- BigtableTableAdminClient()
- adc.assert_called_once_with(
- scopes=(
- "https://www.googleapis.com/auth/bigtable.admin",
- "https://www.googleapis.com/auth/bigtable.admin.table",
- "https://www.googleapis.com/auth/cloud-bigtable.admin",
- "https://www.googleapis.com/auth/cloud-bigtable.admin.table",
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- ),
- quota_project_id=None,
- )
-
-
@pytest.mark.parametrize(
"transport_class",
[
@@ -5902,7 +6086,6 @@ def test_bigtable_table_admin_auth_adc_old_google_auth():
transports.BigtableTableAdminGrpcAsyncIOTransport,
],
)
-@requires_google_auth_gte_1_25_0
def test_bigtable_table_admin_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
@@ -5923,33 +6106,6 @@ def test_bigtable_table_admin_transport_auth_adc(transport_class):
)
-@pytest.mark.parametrize(
- "transport_class",
- [
- transports.BigtableTableAdminGrpcTransport,
- transports.BigtableTableAdminGrpcAsyncIOTransport,
- ],
-)
-@requires_google_auth_lt_1_25_0
-def test_bigtable_table_admin_transport_auth_adc_old_google_auth(transport_class):
- # If credentials and host are not provided, the transport class should use
- # ADC credentials.
- with mock.patch.object(google.auth, "default", autospec=True) as adc:
- adc.return_value = (ga_credentials.AnonymousCredentials(), None)
- transport_class(quota_project_id="octopus")
- adc.assert_called_once_with(
- scopes=(
- "https://www.googleapis.com/auth/bigtable.admin",
- "https://www.googleapis.com/auth/bigtable.admin.table",
- "https://www.googleapis.com/auth/cloud-bigtable.admin",
- "https://www.googleapis.com/auth/cloud-bigtable.admin.table",
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- ),
- quota_project_id="octopus",
- )
-
-
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
@@ -6463,7 +6619,7 @@ def test_parse_common_location_path():
assert expected == actual
-def test_client_withDEFAULT_CLIENT_INFO():
+def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
@@ -6482,3 +6638,82 @@ def test_client_withDEFAULT_CLIENT_INFO():
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
+
+
+@pytest.mark.asyncio
+async def test_transport_close_async():
+ client = BigtableTableAdminAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "grpc_channel")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_transport_close():
+ transports = {
+ "grpc": "_grpc_channel",
+ }
+
+ for transport, close_name in transports.items():
+ client = BigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, close_name)), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_client_ctx():
+ transports = [
+ "grpc",
+ ]
+ for transport in transports:
+ client = BigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ # Test client calls underlying transport.
+ with mock.patch.object(type(client.transport), "close") as close:
+ close.assert_not_called()
+ with client:
+ pass
+ close.assert_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class",
+ [
+ (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport),
+ (
+ BigtableTableAdminAsyncClient,
+ transports.BigtableTableAdminGrpcAsyncIOTransport,
+ ),
+ ],
+)
+def test_api_key_credentials(client_class, transport_class):
+ with mock.patch.object(
+ google.auth._default, "get_api_key_credentials", create=True
+ ) as get_api_key_credentials:
+ mock_cred = mock.Mock()
+ get_api_key_credentials.return_value = mock_cred
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=mock_cred,
+ credentials_file=None,
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
diff --git a/tests/unit/gapic/bigtable_v2/test_bigtable.py b/tests/unit/gapic/bigtable_v2/test_bigtable.py
index 3735f1074..19868b14e 100644
--- a/tests/unit/gapic/bigtable_v2/test_bigtable.py
+++ b/tests/unit/gapic/bigtable_v2/test_bigtable.py
@@ -15,7 +15,6 @@
#
import os
import mock
-import packaging.version
import grpc
from grpc.experimental import aio
@@ -29,34 +28,18 @@
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
+from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.bigtable_v2.services.bigtable import BigtableAsyncClient
from google.cloud.bigtable_v2.services.bigtable import BigtableClient
from google.cloud.bigtable_v2.services.bigtable import transports
-from google.cloud.bigtable_v2.services.bigtable.transports.base import (
- _GOOGLE_AUTH_VERSION,
-)
from google.cloud.bigtable_v2.types import bigtable
from google.cloud.bigtable_v2.types import data
from google.oauth2 import service_account
import google.auth
-# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
-# through google-api-core:
-# - Delete the auth "less than" test cases
-# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
-requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
- packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
- reason="This test requires google-auth < 1.25.0",
-)
-requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
- packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
- reason="This test requires google-auth >= 1.25.0",
-)
-
-
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
@@ -196,7 +179,7 @@ def test_bigtable_client_client_options(client_class, transport_class, transport
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(client_options=options)
+ client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -213,7 +196,7 @@ def test_bigtable_client_client_options(client_class, transport_class, transport
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class()
+ client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -230,7 +213,7 @@ def test_bigtable_client_client_options(client_class, transport_class, transport
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class()
+ client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -246,20 +229,20 @@ def test_bigtable_client_client_options(client_class, transport_class, transport
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
- client = client_class()
+ client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
- client = client_class()
+ client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -316,7 +299,7 @@ def test_bigtable_client_mtls_env_auto(
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
@@ -358,7 +341,7 @@ def test_bigtable_client_mtls_env_auto(
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
- client = client_class()
+ client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -380,7 +363,7 @@ def test_bigtable_client_mtls_env_auto(
return_value=False,
):
patched.return_value = None
- client = client_class()
+ client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -393,6 +376,83 @@ def test_bigtable_client_mtls_env_auto(
)
+@pytest.mark.parametrize("client_class", [BigtableClient, BigtableAsyncClient])
+@mock.patch.object(
+ BigtableClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableClient)
+)
+@mock.patch.object(
+ BigtableAsyncClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(BigtableAsyncClient),
+)
+def test_bigtable_client_get_mtls_endpoint_and_cert_source(client_class):
+ mock_client_cert_source = mock.Mock()
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source == mock_client_cert_source
+
+ # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
+ mock_client_cert_source = mock.Mock()
+ mock_api_endpoint = "foo"
+ options = client_options.ClientOptions(
+ client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
+ )
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
+ options
+ )
+ assert api_endpoint == mock_api_endpoint
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_ENDPOINT
+ assert cert_source is None
+
+ # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=mock_client_cert_source,
+ ):
+ (
+ api_endpoint,
+ cert_source,
+ ) = client_class.get_mtls_endpoint_and_cert_source()
+ assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
+ assert cert_source == mock_client_cert_source
+
+
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
@@ -407,7 +467,7 @@ def test_bigtable_client_client_options_scopes(
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
@@ -421,20 +481,26 @@ def test_bigtable_client_client_options_scopes(
@pytest.mark.parametrize(
- "client_class,transport_class,transport_name",
+ "client_class,transport_class,transport_name,grpc_helpers",
[
- (BigtableClient, transports.BigtableGrpcTransport, "grpc"),
- (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"),
+ (BigtableClient, transports.BigtableGrpcTransport, "grpc", grpc_helpers),
+ (
+ BigtableAsyncClient,
+ transports.BigtableGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
],
)
def test_bigtable_client_client_options_credentials_file(
- client_class, transport_class, transport_name
+ client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
+
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class(client_options=options)
+ client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
@@ -465,7 +531,76 @@ def test_bigtable_client_client_options_from_dict():
)
-def test_read_rows(transport: str = "grpc", request_type=bigtable.ReadRowsRequest):
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (BigtableClient, transports.BigtableGrpcTransport, "grpc", grpc_helpers),
+ (
+ BigtableAsyncClient,
+ transports.BigtableGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_bigtable_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "bigtable.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=(
+ "https://www.googleapis.com/auth/bigtable.data",
+ "https://www.googleapis.com/auth/bigtable.data.readonly",
+ "https://www.googleapis.com/auth/cloud-bigtable.data",
+ "https://www.googleapis.com/auth/cloud-bigtable.data.readonly",
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ ),
+ scopes=None,
+ default_host="bigtable.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize("request_type", [bigtable.ReadRowsRequest, dict,])
+def test_read_rows(request_type, transport: str = "grpc"):
client = BigtableClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -490,10 +625,6 @@ def test_read_rows(transport: str = "grpc", request_type=bigtable.ReadRowsReques
assert isinstance(message, bigtable.ReadRowsResponse)
-def test_read_rows_from_dict():
- test_read_rows(request_type=dict)
-
-
def test_read_rows_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -614,8 +745,12 @@ def test_read_rows_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].table_name == "table_name_value"
- assert args[0].app_profile_id == "app_profile_id_value"
+ arg = args[0].table_name
+ mock_val = "table_name_value"
+ assert arg == mock_val
+ arg = args[0].app_profile_id
+ mock_val = "app_profile_id_value"
+ assert arg == mock_val
def test_read_rows_flattened_error():
@@ -651,8 +786,12 @@ async def test_read_rows_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].table_name == "table_name_value"
- assert args[0].app_profile_id == "app_profile_id_value"
+ arg = args[0].table_name
+ mock_val = "table_name_value"
+ assert arg == mock_val
+ arg = args[0].app_profile_id
+ mock_val = "app_profile_id_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -669,9 +808,8 @@ async def test_read_rows_flattened_error_async():
)
-def test_sample_row_keys(
- transport: str = "grpc", request_type=bigtable.SampleRowKeysRequest
-):
+@pytest.mark.parametrize("request_type", [bigtable.SampleRowKeysRequest, dict,])
+def test_sample_row_keys(request_type, transport: str = "grpc"):
client = BigtableClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -696,10 +834,6 @@ def test_sample_row_keys(
assert isinstance(message, bigtable.SampleRowKeysResponse)
-def test_sample_row_keys_from_dict():
- test_sample_row_keys(request_type=dict)
-
-
def test_sample_row_keys_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -820,8 +954,12 @@ def test_sample_row_keys_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].table_name == "table_name_value"
- assert args[0].app_profile_id == "app_profile_id_value"
+ arg = args[0].table_name
+ mock_val = "table_name_value"
+ assert arg == mock_val
+ arg = args[0].app_profile_id
+ mock_val = "app_profile_id_value"
+ assert arg == mock_val
def test_sample_row_keys_flattened_error():
@@ -857,8 +995,12 @@ async def test_sample_row_keys_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].table_name == "table_name_value"
- assert args[0].app_profile_id == "app_profile_id_value"
+ arg = args[0].table_name
+ mock_val = "table_name_value"
+ assert arg == mock_val
+ arg = args[0].app_profile_id
+ mock_val = "app_profile_id_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -875,7 +1017,8 @@ async def test_sample_row_keys_flattened_error_async():
)
-def test_mutate_row(transport: str = "grpc", request_type=bigtable.MutateRowRequest):
+@pytest.mark.parametrize("request_type", [bigtable.MutateRowRequest, dict,])
+def test_mutate_row(request_type, transport: str = "grpc"):
client = BigtableClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -899,10 +1042,6 @@ def test_mutate_row(transport: str = "grpc", request_type=bigtable.MutateRowRequ
assert isinstance(response, bigtable.MutateRowResponse)
-def test_mutate_row_from_dict():
- test_mutate_row(request_type=dict)
-
-
def test_mutate_row_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -1027,14 +1166,22 @@ def test_mutate_row_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].table_name == "table_name_value"
- assert args[0].row_key == b"row_key_blob"
- assert args[0].mutations == [
+ arg = args[0].table_name
+ mock_val = "table_name_value"
+ assert arg == mock_val
+ arg = args[0].row_key
+ mock_val = b"row_key_blob"
+ assert arg == mock_val
+ arg = args[0].mutations
+ mock_val = [
data.Mutation(
set_cell=data.Mutation.SetCell(family_name="family_name_value")
)
]
- assert args[0].app_profile_id == "app_profile_id_value"
+ assert arg == mock_val
+ arg = args[0].app_profile_id
+ mock_val = "app_profile_id_value"
+ assert arg == mock_val
def test_mutate_row_flattened_error():
@@ -1085,14 +1232,22 @@ async def test_mutate_row_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].table_name == "table_name_value"
- assert args[0].row_key == b"row_key_blob"
- assert args[0].mutations == [
+ arg = args[0].table_name
+ mock_val = "table_name_value"
+ assert arg == mock_val
+ arg = args[0].row_key
+ mock_val = b"row_key_blob"
+ assert arg == mock_val
+ arg = args[0].mutations
+ mock_val = [
data.Mutation(
set_cell=data.Mutation.SetCell(family_name="family_name_value")
)
]
- assert args[0].app_profile_id == "app_profile_id_value"
+ assert arg == mock_val
+ arg = args[0].app_profile_id
+ mock_val = "app_profile_id_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -1115,7 +1270,8 @@ async def test_mutate_row_flattened_error_async():
)
-def test_mutate_rows(transport: str = "grpc", request_type=bigtable.MutateRowsRequest):
+@pytest.mark.parametrize("request_type", [bigtable.MutateRowsRequest, dict,])
+def test_mutate_rows(request_type, transport: str = "grpc"):
client = BigtableClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -1140,10 +1296,6 @@ def test_mutate_rows(transport: str = "grpc", request_type=bigtable.MutateRowsRe
assert isinstance(message, bigtable.MutateRowsResponse)
-def test_mutate_rows_from_dict():
- test_mutate_rows(request_type=dict)
-
-
def test_mutate_rows_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -1266,11 +1418,15 @@ def test_mutate_rows_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].table_name == "table_name_value"
- assert args[0].entries == [
- bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")
- ]
- assert args[0].app_profile_id == "app_profile_id_value"
+ arg = args[0].table_name
+ mock_val = "table_name_value"
+ assert arg == mock_val
+ arg = args[0].entries
+ mock_val = [bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")]
+ assert arg == mock_val
+ arg = args[0].app_profile_id
+ mock_val = "app_profile_id_value"
+ assert arg == mock_val
def test_mutate_rows_flattened_error():
@@ -1309,11 +1465,15 @@ async def test_mutate_rows_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].table_name == "table_name_value"
- assert args[0].entries == [
- bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")
- ]
- assert args[0].app_profile_id == "app_profile_id_value"
+ arg = args[0].table_name
+ mock_val = "table_name_value"
+ assert arg == mock_val
+ arg = args[0].entries
+ mock_val = [bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")]
+ assert arg == mock_val
+ arg = args[0].app_profile_id
+ mock_val = "app_profile_id_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -1331,9 +1491,8 @@ async def test_mutate_rows_flattened_error_async():
)
-def test_check_and_mutate_row(
- transport: str = "grpc", request_type=bigtable.CheckAndMutateRowRequest
-):
+@pytest.mark.parametrize("request_type", [bigtable.CheckAndMutateRowRequest, dict,])
+def test_check_and_mutate_row(request_type, transport: str = "grpc"):
client = BigtableClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -1360,10 +1519,6 @@ def test_check_and_mutate_row(
assert response.predicate_matched is True
-def test_check_and_mutate_row_from_dict():
- test_check_and_mutate_row(request_type=dict)
-
-
def test_check_and_mutate_row_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -1515,9 +1670,14 @@ def test_check_and_mutate_row_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].table_name == "table_name_value"
- assert args[0].row_key == b"row_key_blob"
- assert args[0].predicate_filter == data.RowFilter(
+ arg = args[0].table_name
+ mock_val = "table_name_value"
+ assert arg == mock_val
+ arg = args[0].row_key
+ mock_val = b"row_key_blob"
+ assert arg == mock_val
+ arg = args[0].predicate_filter
+ mock_val = data.RowFilter(
chain=data.RowFilter.Chain(
filters=[
data.RowFilter(
@@ -1526,17 +1686,24 @@ def test_check_and_mutate_row_flattened():
]
)
)
- assert args[0].true_mutations == [
+ assert arg == mock_val
+ arg = args[0].true_mutations
+ mock_val = [
data.Mutation(
set_cell=data.Mutation.SetCell(family_name="family_name_value")
)
]
- assert args[0].false_mutations == [
+ assert arg == mock_val
+ arg = args[0].false_mutations
+ mock_val = [
data.Mutation(
set_cell=data.Mutation.SetCell(family_name="family_name_value")
)
]
- assert args[0].app_profile_id == "app_profile_id_value"
+ assert arg == mock_val
+ arg = args[0].app_profile_id
+ mock_val = "app_profile_id_value"
+ assert arg == mock_val
def test_check_and_mutate_row_flattened_error():
@@ -1621,9 +1788,14 @@ async def test_check_and_mutate_row_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].table_name == "table_name_value"
- assert args[0].row_key == b"row_key_blob"
- assert args[0].predicate_filter == data.RowFilter(
+ arg = args[0].table_name
+ mock_val = "table_name_value"
+ assert arg == mock_val
+ arg = args[0].row_key
+ mock_val = b"row_key_blob"
+ assert arg == mock_val
+ arg = args[0].predicate_filter
+ mock_val = data.RowFilter(
chain=data.RowFilter.Chain(
filters=[
data.RowFilter(
@@ -1632,17 +1804,24 @@ async def test_check_and_mutate_row_flattened_async():
]
)
)
- assert args[0].true_mutations == [
+ assert arg == mock_val
+ arg = args[0].true_mutations
+ mock_val = [
data.Mutation(
set_cell=data.Mutation.SetCell(family_name="family_name_value")
)
]
- assert args[0].false_mutations == [
+ assert arg == mock_val
+ arg = args[0].false_mutations
+ mock_val = [
data.Mutation(
set_cell=data.Mutation.SetCell(family_name="family_name_value")
)
]
- assert args[0].app_profile_id == "app_profile_id_value"
+ assert arg == mock_val
+ arg = args[0].app_profile_id
+ mock_val = "app_profile_id_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -1681,9 +1860,8 @@ async def test_check_and_mutate_row_flattened_error_async():
)
-def test_read_modify_write_row(
- transport: str = "grpc", request_type=bigtable.ReadModifyWriteRowRequest
-):
+@pytest.mark.parametrize("request_type", [bigtable.ReadModifyWriteRowRequest, dict,])
+def test_read_modify_write_row(request_type, transport: str = "grpc"):
client = BigtableClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
@@ -1709,10 +1887,6 @@ def test_read_modify_write_row(
assert isinstance(response, bigtable.ReadModifyWriteRowResponse)
-def test_read_modify_write_row_from_dict():
- test_read_modify_write_row(request_type=dict)
-
-
def test_read_modify_write_row_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
@@ -1843,12 +2017,18 @@ def test_read_modify_write_row_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0].table_name == "table_name_value"
- assert args[0].row_key == b"row_key_blob"
- assert args[0].rules == [
- data.ReadModifyWriteRule(family_name="family_name_value")
- ]
- assert args[0].app_profile_id == "app_profile_id_value"
+ arg = args[0].table_name
+ mock_val = "table_name_value"
+ assert arg == mock_val
+ arg = args[0].row_key
+ mock_val = b"row_key_blob"
+ assert arg == mock_val
+ arg = args[0].rules
+ mock_val = [data.ReadModifyWriteRule(family_name="family_name_value")]
+ assert arg == mock_val
+ arg = args[0].app_profile_id
+ mock_val = "app_profile_id_value"
+ assert arg == mock_val
def test_read_modify_write_row_flattened_error():
@@ -1893,12 +2073,18 @@ async def test_read_modify_write_row_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0].table_name == "table_name_value"
- assert args[0].row_key == b"row_key_blob"
- assert args[0].rules == [
- data.ReadModifyWriteRule(family_name="family_name_value")
- ]
- assert args[0].app_profile_id == "app_profile_id_value"
+ arg = args[0].table_name
+ mock_val = "table_name_value"
+ assert arg == mock_val
+ arg = args[0].row_key
+ mock_val = b"row_key_blob"
+ assert arg == mock_val
+ arg = args[0].rules
+ mock_val = [data.ReadModifyWriteRule(family_name="family_name_value")]
+ assert arg == mock_val
+ arg = args[0].app_profile_id
+ mock_val = "app_profile_id_value"
+ assert arg == mock_val
@pytest.mark.asyncio
@@ -1937,6 +2123,23 @@ def test_credentials_transport_error():
transport=transport,
)
+ # It is an error to provide an api_key and a transport instance.
+ transport = transports.BigtableGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = BigtableClient(client_options=options, transport=transport,)
+
+ # It is an error to provide an api_key and a credential.
+ options = mock.Mock()
+ options.api_key = "api_key"
+ with pytest.raises(ValueError):
+ client = BigtableClient(
+ client_options=options, credentials=ga_credentials.AnonymousCredentials()
+ )
+
# It is an error to provide scopes and a transport instance.
transport = transports.BigtableGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
@@ -2022,8 +2225,10 @@ def test_bigtable_base_transport():
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
+ with pytest.raises(NotImplementedError):
+ transport.close()
+
-@requires_google_auth_gte_1_25_0
def test_bigtable_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
@@ -2051,33 +2256,6 @@ def test_bigtable_base_transport_with_credentials_file():
)
-@requires_google_auth_lt_1_25_0
-def test_bigtable_base_transport_with_credentials_file_old_google_auth():
- # Instantiate the base transport with a credentials file
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch(
- "google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport._prep_wrapped_messages"
- ) as Transport:
- Transport.return_value = None
- load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
- transport = transports.BigtableTransport(
- credentials_file="credentials.json", quota_project_id="octopus",
- )
- load_creds.assert_called_once_with(
- "credentials.json",
- scopes=(
- "https://www.googleapis.com/auth/bigtable.data",
- "https://www.googleapis.com/auth/bigtable.data.readonly",
- "https://www.googleapis.com/auth/cloud-bigtable.data",
- "https://www.googleapis.com/auth/cloud-bigtable.data.readonly",
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- ),
- quota_project_id="octopus",
- )
-
-
def test_bigtable_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
@@ -2089,7 +2267,6 @@ def test_bigtable_base_transport_with_adc():
adc.assert_called_once()
-@requires_google_auth_gte_1_25_0
def test_bigtable_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
@@ -2109,30 +2286,10 @@ def test_bigtable_auth_adc():
)
-@requires_google_auth_lt_1_25_0
-def test_bigtable_auth_adc_old_google_auth():
- # If no credentials are provided, we should use ADC credentials.
- with mock.patch.object(google.auth, "default", autospec=True) as adc:
- adc.return_value = (ga_credentials.AnonymousCredentials(), None)
- BigtableClient()
- adc.assert_called_once_with(
- scopes=(
- "https://www.googleapis.com/auth/bigtable.data",
- "https://www.googleapis.com/auth/bigtable.data.readonly",
- "https://www.googleapis.com/auth/cloud-bigtable.data",
- "https://www.googleapis.com/auth/cloud-bigtable.data.readonly",
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- ),
- quota_project_id=None,
- )
-
-
@pytest.mark.parametrize(
"transport_class",
[transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport,],
)
-@requires_google_auth_gte_1_25_0
def test_bigtable_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
@@ -2153,30 +2310,6 @@ def test_bigtable_transport_auth_adc(transport_class):
)
-@pytest.mark.parametrize(
- "transport_class",
- [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport,],
-)
-@requires_google_auth_lt_1_25_0
-def test_bigtable_transport_auth_adc_old_google_auth(transport_class):
- # If credentials and host are not provided, the transport class should use
- # ADC credentials.
- with mock.patch.object(google.auth, "default", autospec=True) as adc:
- adc.return_value = (ga_credentials.AnonymousCredentials(), None)
- transport_class(quota_project_id="octopus")
- adc.assert_called_once_with(
- scopes=(
- "https://www.googleapis.com/auth/bigtable.data",
- "https://www.googleapis.com/auth/bigtable.data.readonly",
- "https://www.googleapis.com/auth/cloud-bigtable.data",
- "https://www.googleapis.com/auth/cloud-bigtable.data.readonly",
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- ),
- quota_project_id="octopus",
- )
-
-
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
@@ -2517,7 +2650,7 @@ def test_parse_common_location_path():
assert expected == actual
-def test_client_withDEFAULT_CLIENT_INFO():
+def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
@@ -2536,3 +2669,79 @@ def test_client_withDEFAULT_CLIENT_INFO():
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
+
+
+@pytest.mark.asyncio
+async def test_transport_close_async():
+ client = BigtableAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "grpc_channel")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_transport_close():
+ transports = {
+ "grpc": "_grpc_channel",
+ }
+
+ for transport, close_name in transports.items():
+ client = BigtableClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, close_name)), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_client_ctx():
+ transports = [
+ "grpc",
+ ]
+ for transport in transports:
+ client = BigtableClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ # Test client calls underlying transport.
+ with mock.patch.object(type(client.transport), "close") as close:
+ close.assert_not_called()
+ with client:
+ pass
+ close.assert_called()
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class",
+ [
+ (BigtableClient, transports.BigtableGrpcTransport),
+ (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport),
+ ],
+)
+def test_api_key_credentials(client_class, transport_class):
+ with mock.patch.object(
+ google.auth._default, "get_api_key_credentials", create=True
+ ) as get_api_key_credentials:
+ mock_cred = mock.Mock()
+ get_api_key_credentials.return_value = mock_cred
+ options = client_options.ClientOptions()
+ options.api_key = "api_key"
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=mock_cred,
+ credentials_file=None,
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
diff --git a/tests/unit/test_app_profile.py b/tests/unit/test_app_profile.py
index 6422e87e9..07c686fb8 100644
--- a/tests/unit/test_app_profile.py
+++ b/tests/unit/test_app_profile.py
@@ -12,650 +12,624 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
-import unittest
-
import mock
+import pytest
from ._testing import _make_credentials
+PROJECT = "project"
+INSTANCE_ID = "instance-id"
+APP_PROFILE_ID = "app-profile-id"
+APP_PROFILE_NAME = "projects/{}/instances/{}/appProfiles/{}".format(
+ PROJECT, INSTANCE_ID, APP_PROFILE_ID
+)
+CLUSTER_ID = "cluster-id"
+OP_ID = 8765
+OP_NAME = "operations/projects/{}/instances/{}/appProfiles/{}/operations/{}".format(
+ PROJECT, INSTANCE_ID, APP_PROFILE_ID, OP_ID
+)
+
+
+def _make_app_profile(*args, **kwargs):
+ from google.cloud.bigtable.app_profile import AppProfile
+
+ return AppProfile(*args, **kwargs)
+
+
+def _make_client(*args, **kwargs):
+ from google.cloud.bigtable.client import Client
+
+ return Client(*args, **kwargs)
+
+
+def test_app_profile_constructor_defaults():
+ from google.cloud.bigtable.app_profile import AppProfile
+
+ client = _Client(PROJECT)
+ instance = _Instance(INSTANCE_ID, client)
+
+ app_profile = _make_app_profile(APP_PROFILE_ID, instance)
+ assert isinstance(app_profile, AppProfile)
+ assert app_profile._instance == instance
+ assert app_profile.routing_policy_type is None
+ assert app_profile.description is None
+ assert app_profile.cluster_id is None
+ assert app_profile.allow_transactional_writes is None
+
+
+def test_app_profile_constructor_explicit():
+ from google.cloud.bigtable.enums import RoutingPolicyType
+
+ ANY = RoutingPolicyType.ANY
+ DESCRIPTION_1 = "routing policy any"
+ APP_PROFILE_ID_2 = "app-profile-id-2"
+ SINGLE = RoutingPolicyType.SINGLE
+ DESCRIPTION_2 = "routing policy single"
+ ALLOW_WRITES = True
+ client = _Client(PROJECT)
+ instance = _Instance(INSTANCE_ID, client)
+
+ app_profile1 = _make_app_profile(
+ APP_PROFILE_ID, instance, routing_policy_type=ANY, description=DESCRIPTION_1,
+ )
+ app_profile2 = _make_app_profile(
+ APP_PROFILE_ID_2,
+ instance,
+ routing_policy_type=SINGLE,
+ description=DESCRIPTION_2,
+ cluster_id=CLUSTER_ID,
+ allow_transactional_writes=ALLOW_WRITES,
+ )
+ assert app_profile1.app_profile_id == APP_PROFILE_ID
+ assert app_profile1._instance is instance
+ assert app_profile1.routing_policy_type == ANY
+ assert app_profile1.description == DESCRIPTION_1
+ assert app_profile2.app_profile_id == APP_PROFILE_ID_2
+ assert app_profile2._instance is instance
+ assert app_profile2.routing_policy_type == SINGLE
+ assert app_profile2.description == DESCRIPTION_2
+ assert app_profile2.cluster_id == CLUSTER_ID
+ assert app_profile2.allow_transactional_writes == ALLOW_WRITES
+
+
+def test_app_profile_name():
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = _Instance(INSTANCE_ID, client)
+
+ app_profile = _make_app_profile(APP_PROFILE_ID, instance)
+ assert app_profile.name == APP_PROFILE_NAME
+
+
+def test_app_profile___eq__():
+ client = _Client(PROJECT)
+ instance = _Instance(INSTANCE_ID, client)
+ app_profile1 = _make_app_profile(APP_PROFILE_ID, instance)
+ app_profile2 = _make_app_profile(APP_PROFILE_ID, instance)
+ assert app_profile1 == app_profile2
+
+
+def test_app_profile___eq___w_type_instance_differ():
+ client = _Client(PROJECT)
+ instance = _Instance(INSTANCE_ID, client)
+ alt_instance = _Instance("other-instance", client)
+ other_object = _Other(APP_PROFILE_ID, instance)
+ app_profile1 = _make_app_profile(APP_PROFILE_ID, instance)
+ app_profile2 = _make_app_profile(APP_PROFILE_ID, alt_instance)
+ assert not (app_profile1 == other_object)
+ assert not (app_profile1 == app_profile2)
+
+
+def test_app_profile___ne___w_same_value():
+ client = _Client(PROJECT)
+ instance = _Instance(INSTANCE_ID, client)
+ app_profile1 = _make_app_profile(APP_PROFILE_ID, instance)
+ app_profile2 = _make_app_profile(APP_PROFILE_ID, instance)
+ assert not (app_profile1 != app_profile2)
+
+
+def test_app_profile___ne__():
+ client = _Client(PROJECT)
+ instance = _Instance(INSTANCE_ID, client)
+ app_profile1 = _make_app_profile("app_profile_id1", instance)
+ app_profile2 = _make_app_profile("app_profile_id2", instance)
+ assert app_profile1 != app_profile2
+
+
+def test_app_profile_from_pb_success_w_routing_any():
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable.app_profile import AppProfile
+ from google.cloud.bigtable.enums import RoutingPolicyType
+
+ client = _Client(PROJECT)
+ instance = _Instance(INSTANCE_ID, client)
+
+ desctiption = "routing any"
+ routing = RoutingPolicyType.ANY
+ multi_cluster_routing_use_any = data_v2_pb2.AppProfile.MultiClusterRoutingUseAny()
+
+ app_profile_pb = data_v2_pb2.AppProfile(
+ name=APP_PROFILE_NAME,
+ description=desctiption,
+ multi_cluster_routing_use_any=multi_cluster_routing_use_any,
+ )
+
+ app_profile = AppProfile.from_pb(app_profile_pb, instance)
+ assert isinstance(app_profile, AppProfile)
+ assert app_profile._instance is instance
+ assert app_profile.app_profile_id == APP_PROFILE_ID
+ assert app_profile.description == desctiption
+ assert app_profile.routing_policy_type == routing
+ assert app_profile.cluster_id is None
+ assert app_profile.allow_transactional_writes is False
+
+
+def test_app_profile_from_pb_success_w_routing_single():
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable.app_profile import AppProfile
+ from google.cloud.bigtable.enums import RoutingPolicyType
+
+ client = _Client(PROJECT)
+ instance = _Instance(INSTANCE_ID, client)
+
+ desctiption = "routing single"
+ allow_transactional_writes = True
+ routing = RoutingPolicyType.SINGLE
+ single_cluster_routing = data_v2_pb2.AppProfile.SingleClusterRouting(
+ cluster_id=CLUSTER_ID, allow_transactional_writes=allow_transactional_writes,
+ )
+
+ app_profile_pb = data_v2_pb2.AppProfile(
+ name=APP_PROFILE_NAME,
+ description=desctiption,
+ single_cluster_routing=single_cluster_routing,
+ )
+
+ app_profile = AppProfile.from_pb(app_profile_pb, instance)
+ assert isinstance(app_profile, AppProfile)
+ assert app_profile._instance is instance
+ assert app_profile.app_profile_id == APP_PROFILE_ID
+ assert app_profile.description == desctiption
+ assert app_profile.routing_policy_type == routing
+ assert app_profile.cluster_id == CLUSTER_ID
+ assert app_profile.allow_transactional_writes == allow_transactional_writes
+
+
+def test_app_profile_from_pb_w_bad_app_profile_name():
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable.app_profile import AppProfile
+
+ bad_app_profile_name = "BAD_NAME"
+
+ app_profile_pb = data_v2_pb2.AppProfile(name=bad_app_profile_name)
+
+ with pytest.raises(ValueError):
+ AppProfile.from_pb(app_profile_pb, None)
+
+
+def test_app_profile_from_pb_w_instance_id_mistmatch():
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable.app_profile import AppProfile
+
+ ALT_INSTANCE_ID = "ALT_INSTANCE_ID"
+ client = _Client(PROJECT)
+ instance = _Instance(ALT_INSTANCE_ID, client)
+ assert instance.instance_id == ALT_INSTANCE_ID
+
+ app_profile_pb = data_v2_pb2.AppProfile(name=APP_PROFILE_NAME)
+
+ with pytest.raises(ValueError):
+ AppProfile.from_pb(app_profile_pb, instance)
+
+
+def test_app_profile_from_pb_w_project_mistmatch():
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable.app_profile import AppProfile
+
+ ALT_PROJECT = "ALT_PROJECT"
+ client = _Client(project=ALT_PROJECT)
+ instance = _Instance(INSTANCE_ID, client)
+ assert client.project == ALT_PROJECT
+
+ app_profile_pb = data_v2_pb2.AppProfile(name=APP_PROFILE_NAME)
+
+ with pytest.raises(ValueError):
+ AppProfile.from_pb(app_profile_pb, instance)
+
+
+def test_app_profile_reload_w_routing_any():
+ from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
+ BigtableInstanceAdminClient,
+ )
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable.enums import RoutingPolicyType
+
+ api = mock.create_autospec(BigtableInstanceAdminClient)
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = _Instance(INSTANCE_ID, client)
+
+ routing = RoutingPolicyType.ANY
+ description = "routing policy any"
+
+ app_profile = _make_app_profile(
+ APP_PROFILE_ID, instance, routing_policy_type=routing, description=description,
+ )
+
+ # Create response_pb
+ description_from_server = "routing policy switched to single"
+ cluster_id_from_server = CLUSTER_ID
+ allow_transactional_writes = True
+ single_cluster_routing = data_v2_pb2.AppProfile.SingleClusterRouting(
+ cluster_id=cluster_id_from_server,
+ allow_transactional_writes=allow_transactional_writes,
+ )
+
+ response_pb = data_v2_pb2.AppProfile(
+ name=app_profile.name,
+ single_cluster_routing=single_cluster_routing,
+ description=description_from_server,
+ )
+
+ # Patch the stub used by the API method.
+ client._instance_admin_client = api
+ instance_stub = client._instance_admin_client
+ instance_stub.get_app_profile.side_effect = [response_pb]
+
+ # Create expected_result.
+ expected_result = None # reload() has no return value.
-class TestAppProfile(unittest.TestCase):
-
- PROJECT = "project"
- INSTANCE_ID = "instance-id"
- APP_PROFILE_ID = "app-profile-id"
- APP_PROFILE_NAME = "projects/{}/instances/{}/appProfiles/{}".format(
- PROJECT, INSTANCE_ID, APP_PROFILE_ID
- )
- CLUSTER_ID = "cluster-id"
- OP_ID = 8765
- OP_NAME = "operations/projects/{}/instances/{}/appProfiles/{}/operations/{}".format(
- PROJECT, INSTANCE_ID, APP_PROFILE_ID, OP_ID
- )
-
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.app_profile import AppProfile
-
- return AppProfile
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- @staticmethod
- def _get_target_client_class():
- from google.cloud.bigtable.client import Client
-
- return Client
-
- def _make_client(self, *args, **kwargs):
- return self._get_target_client_class()(*args, **kwargs)
-
- def test_constructor_defaults(self):
- client = _Client(self.PROJECT)
- instance = _Instance(self.INSTANCE_ID, client)
-
- app_profile = self._make_one(self.APP_PROFILE_ID, instance)
- self.assertIsInstance(app_profile, self._get_target_class())
- self.assertEqual(app_profile._instance, instance)
- self.assertIsNone(app_profile.routing_policy_type)
- self.assertIsNone(app_profile.description)
- self.assertIsNone(app_profile.cluster_id)
- self.assertIsNone(app_profile.allow_transactional_writes)
-
- def test_constructor_non_defaults(self):
- from google.cloud.bigtable.enums import RoutingPolicyType
-
- ANY = RoutingPolicyType.ANY
- DESCRIPTION_1 = "routing policy any"
- APP_PROFILE_ID_2 = "app-profile-id-2"
- SINGLE = RoutingPolicyType.SINGLE
- DESCRIPTION_2 = "routing policy single"
- ALLOW_WRITES = True
- client = _Client(self.PROJECT)
- instance = _Instance(self.INSTANCE_ID, client)
-
- app_profile1 = self._make_one(
- self.APP_PROFILE_ID,
- instance,
- routing_policy_type=ANY,
- description=DESCRIPTION_1,
- )
- app_profile2 = self._make_one(
- APP_PROFILE_ID_2,
- instance,
- routing_policy_type=SINGLE,
- description=DESCRIPTION_2,
- cluster_id=self.CLUSTER_ID,
- allow_transactional_writes=ALLOW_WRITES,
- )
- self.assertEqual(app_profile1.app_profile_id, self.APP_PROFILE_ID)
- self.assertIs(app_profile1._instance, instance)
- self.assertEqual(app_profile1.routing_policy_type, ANY)
- self.assertEqual(app_profile1.description, DESCRIPTION_1)
- self.assertEqual(app_profile2.app_profile_id, APP_PROFILE_ID_2)
- self.assertIs(app_profile2._instance, instance)
- self.assertEqual(app_profile2.routing_policy_type, SINGLE)
- self.assertEqual(app_profile2.description, DESCRIPTION_2)
- self.assertEqual(app_profile2.cluster_id, self.CLUSTER_ID)
- self.assertEqual(app_profile2.allow_transactional_writes, ALLOW_WRITES)
-
- def test_name_property(self):
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = _Instance(self.INSTANCE_ID, client)
-
- app_profile = self._make_one(self.APP_PROFILE_ID, instance)
- self.assertEqual(app_profile.name, self.APP_PROFILE_NAME)
-
- def test___eq__(self):
- client = _Client(self.PROJECT)
- instance = _Instance(self.INSTANCE_ID, client)
- app_profile1 = self._make_one(self.APP_PROFILE_ID, instance)
- app_profile2 = self._make_one(self.APP_PROFILE_ID, instance)
- self.assertTrue(app_profile1 == app_profile2)
-
- def test___eq__type_instance_differ(self):
- client = _Client(self.PROJECT)
- instance = _Instance(self.INSTANCE_ID, client)
- alt_instance = _Instance("other-instance", client)
- other_object = _Other(self.APP_PROFILE_ID, instance)
- app_profile1 = self._make_one(self.APP_PROFILE_ID, instance)
- app_profile2 = self._make_one(self.APP_PROFILE_ID, alt_instance)
- self.assertFalse(app_profile1 == other_object)
- self.assertFalse(app_profile1 == app_profile2)
-
- def test___ne__same_value(self):
- client = _Client(self.PROJECT)
- instance = _Instance(self.INSTANCE_ID, client)
- app_profile1 = self._make_one(self.APP_PROFILE_ID, instance)
- app_profile2 = self._make_one(self.APP_PROFILE_ID, instance)
- self.assertFalse(app_profile1 != app_profile2)
-
- def test___ne__(self):
- client = _Client(self.PROJECT)
- instance = _Instance(self.INSTANCE_ID, client)
- app_profile1 = self._make_one("app_profile_id1", instance)
- app_profile2 = self._make_one("app_profile_id2", instance)
- self.assertTrue(app_profile1 != app_profile2)
-
- def test_from_pb_success_routing_any(self):
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
- from google.cloud.bigtable.enums import RoutingPolicyType
-
- client = _Client(self.PROJECT)
- instance = _Instance(self.INSTANCE_ID, client)
-
- desctiption = "routing any"
- routing = RoutingPolicyType.ANY
- multi_cluster_routing_use_any = (
- data_v2_pb2.AppProfile.MultiClusterRoutingUseAny()
- )
-
- app_profile_pb = data_v2_pb2.AppProfile(
- name=self.APP_PROFILE_NAME,
- description=desctiption,
- multi_cluster_routing_use_any=multi_cluster_routing_use_any,
- )
-
- klass = self._get_target_class()
- app_profile = klass.from_pb(app_profile_pb, instance)
- self.assertIsInstance(app_profile, klass)
- self.assertIs(app_profile._instance, instance)
- self.assertEqual(app_profile.app_profile_id, self.APP_PROFILE_ID)
- self.assertEqual(app_profile.description, desctiption)
- self.assertEqual(app_profile.routing_policy_type, routing)
- self.assertIsNone(app_profile.cluster_id)
- self.assertEqual(app_profile.allow_transactional_writes, False)
-
- def test_from_pb_success_routing_single(self):
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
- from google.cloud.bigtable.enums import RoutingPolicyType
-
- client = _Client(self.PROJECT)
- instance = _Instance(self.INSTANCE_ID, client)
-
- desctiption = "routing single"
- allow_transactional_writes = True
- routing = RoutingPolicyType.SINGLE
- single_cluster_routing = data_v2_pb2.AppProfile.SingleClusterRouting(
- cluster_id=self.CLUSTER_ID,
- allow_transactional_writes=allow_transactional_writes,
- )
-
- app_profile_pb = data_v2_pb2.AppProfile(
- name=self.APP_PROFILE_NAME,
- description=desctiption,
- single_cluster_routing=single_cluster_routing,
- )
-
- klass = self._get_target_class()
- app_profile = klass.from_pb(app_profile_pb, instance)
- self.assertIsInstance(app_profile, klass)
- self.assertIs(app_profile._instance, instance)
- self.assertEqual(app_profile.app_profile_id, self.APP_PROFILE_ID)
- self.assertEqual(app_profile.description, desctiption)
- self.assertEqual(app_profile.routing_policy_type, routing)
- self.assertEqual(app_profile.cluster_id, self.CLUSTER_ID)
- self.assertEqual(
- app_profile.allow_transactional_writes, allow_transactional_writes
- )
-
- def test_from_pb_bad_app_profile_name(self):
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
-
- bad_app_profile_name = "BAD_NAME"
-
- app_profile_pb = data_v2_pb2.AppProfile(name=bad_app_profile_name)
-
- klass = self._get_target_class()
- with self.assertRaises(ValueError):
- klass.from_pb(app_profile_pb, None)
-
- def test_from_pb_instance_id_mistmatch(self):
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
-
- ALT_INSTANCE_ID = "ALT_INSTANCE_ID"
- client = _Client(self.PROJECT)
- instance = _Instance(ALT_INSTANCE_ID, client)
- self.assertEqual(instance.instance_id, ALT_INSTANCE_ID)
-
- app_profile_pb = data_v2_pb2.AppProfile(name=self.APP_PROFILE_NAME)
-
- klass = self._get_target_class()
- with self.assertRaises(ValueError):
- klass.from_pb(app_profile_pb, instance)
-
- def test_from_pb_project_mistmatch(self):
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
-
- ALT_PROJECT = "ALT_PROJECT"
- client = _Client(project=ALT_PROJECT)
- instance = _Instance(self.INSTANCE_ID, client)
- self.assertEqual(client.project, ALT_PROJECT)
-
- app_profile_pb = data_v2_pb2.AppProfile(name=self.APP_PROFILE_NAME)
-
- klass = self._get_target_class()
- with self.assertRaises(ValueError):
- klass.from_pb(app_profile_pb, instance)
-
- def test_reload_routing_any(self):
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
- from google.cloud.bigtable.enums import RoutingPolicyType
-
- api = mock.create_autospec(BigtableInstanceAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = _Instance(self.INSTANCE_ID, client)
-
- routing = RoutingPolicyType.ANY
- description = "routing policy any"
-
- app_profile = self._make_one(
- self.APP_PROFILE_ID,
- instance,
- routing_policy_type=routing,
- description=description,
- )
-
- # Create response_pb
- description_from_server = "routing policy switched to single"
- cluster_id_from_server = self.CLUSTER_ID
- allow_transactional_writes = True
- single_cluster_routing = data_v2_pb2.AppProfile.SingleClusterRouting(
- cluster_id=cluster_id_from_server,
- allow_transactional_writes=allow_transactional_writes,
- )
-
- response_pb = data_v2_pb2.AppProfile(
- name=app_profile.name,
- single_cluster_routing=single_cluster_routing,
- description=description_from_server,
- )
-
- # Patch the stub used by the API method.
- client._instance_admin_client = api
- instance_stub = client._instance_admin_client
- instance_stub.get_app_profile.side_effect = [response_pb]
-
- # Create expected_result.
- expected_result = None # reload() has no return value.
-
- # Check app_profile config values before.
- self.assertEqual(app_profile.routing_policy_type, routing)
- self.assertEqual(app_profile.description, description)
- self.assertIsNone(app_profile.cluster_id)
- self.assertIsNone(app_profile.allow_transactional_writes)
-
- # Perform the method and check the result.
- result = app_profile.reload()
- self.assertEqual(result, expected_result)
- self.assertEqual(app_profile.routing_policy_type, RoutingPolicyType.SINGLE)
- self.assertEqual(app_profile.description, description_from_server)
- self.assertEqual(app_profile.cluster_id, cluster_id_from_server)
- self.assertEqual(
- app_profile.allow_transactional_writes, allow_transactional_writes
- )
-
- def test_exists(self):
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
- from google.api_core import exceptions
-
- instance_api = mock.create_autospec(BigtableInstanceAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = client.instance(self.INSTANCE_ID)
-
- # Create response_pb
- response_pb = data_v2_pb2.AppProfile(name=self.APP_PROFILE_NAME)
- client._instance_admin_client = instance_api
-
- # Patch the stub used by the API method.
- client._instance_admin_client = instance_api
- instance_stub = client._instance_admin_client
- instance_stub.get_app_profile.side_effect = [
- response_pb,
- exceptions.NotFound("testing"),
- exceptions.BadRequest("testing"),
- ]
-
- # Perform the method and check the result.
- non_existing_app_profile_id = "other-app-profile-id"
- app_profile = self._make_one(self.APP_PROFILE_ID, instance)
- alt_app_profile = self._make_one(non_existing_app_profile_id, instance)
- self.assertTrue(app_profile.exists())
- self.assertFalse(alt_app_profile.exists())
- with self.assertRaises(exceptions.BadRequest):
- alt_app_profile.exists()
-
- def test_create_routing_any(self):
- from google.cloud.bigtable.enums import RoutingPolicyType
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
-
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = client.instance(self.INSTANCE_ID)
-
- routing = RoutingPolicyType.ANY
- description = "routing policy any"
- ignore_warnings = True
-
- app_profile = self._make_one(
- self.APP_PROFILE_ID,
- instance,
- routing_policy_type=routing,
- description=description,
- )
-
- expected_request_app_profile = app_profile._to_pb()
- name = instance.name
- expected_request = {
- "request": {
- "parent": name,
- "app_profile_id": self.APP_PROFILE_ID,
- "app_profile": expected_request_app_profile,
- "ignore_warnings": ignore_warnings,
- }
+ # Check app_profile config values before.
+ assert app_profile.routing_policy_type == routing
+ assert app_profile.description == description
+ assert app_profile.cluster_id is None
+ assert app_profile.allow_transactional_writes is None
+
+ # Perform the method and check the result.
+ result = app_profile.reload()
+ assert result == expected_result
+ assert app_profile.routing_policy_type == RoutingPolicyType.SINGLE
+ assert app_profile.description == description_from_server
+ assert app_profile.cluster_id == cluster_id_from_server
+ assert app_profile.allow_transactional_writes == allow_transactional_writes
+
+
+def test_app_profile_exists():
+ from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
+ BigtableInstanceAdminClient,
+ )
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.api_core import exceptions
+
+ instance_api = mock.create_autospec(BigtableInstanceAdminClient)
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = client.instance(INSTANCE_ID)
+
+ # Create response_pb
+ response_pb = data_v2_pb2.AppProfile(name=APP_PROFILE_NAME)
+ client._instance_admin_client = instance_api
+
+ # Patch the stub used by the API method.
+ client._instance_admin_client = instance_api
+ instance_stub = client._instance_admin_client
+ instance_stub.get_app_profile.side_effect = [
+ response_pb,
+ exceptions.NotFound("testing"),
+ exceptions.BadRequest("testing"),
+ ]
+
+ # Perform the method and check the result.
+ non_existing_app_profile_id = "other-app-profile-id"
+ app_profile = _make_app_profile(APP_PROFILE_ID, instance)
+ alt_app_profile = _make_app_profile(non_existing_app_profile_id, instance)
+ assert app_profile.exists()
+ assert not alt_app_profile.exists()
+ with pytest.raises(exceptions.BadRequest):
+ alt_app_profile.exists()
+
+
+def test_app_profile_create_w_routing_any():
+ from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
+ BigtableInstanceAdminClient,
+ )
+ from google.cloud.bigtable.app_profile import AppProfile
+ from google.cloud.bigtable.enums import RoutingPolicyType
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = client.instance(INSTANCE_ID)
+
+ routing = RoutingPolicyType.ANY
+ description = "routing policy any"
+ ignore_warnings = True
+
+ app_profile = _make_app_profile(
+ APP_PROFILE_ID, instance, routing_policy_type=routing, description=description,
+ )
+
+ expected_request_app_profile = app_profile._to_pb()
+ name = instance.name
+ expected_request = {
+ "request": {
+ "parent": name,
+ "app_profile_id": APP_PROFILE_ID,
+ "app_profile": expected_request_app_profile,
+ "ignore_warnings": ignore_warnings,
}
+ }
- instance_api = mock.create_autospec(BigtableInstanceAdminClient)
- instance_api.app_profile_path.return_value = (
- "projects/project/instances/instance-id/appProfiles/app-profile-id"
- )
- instance_api.instance_path.return_value = name
- instance_api.create_app_profile.return_value = expected_request_app_profile
-
- # Patch the stub used by the API method.
- client._instance_admin_client = instance_api
- app_profile._instance._client._instance_admin_client = instance_api
- # Perform the method and check the result.
- result = app_profile.create(ignore_warnings)
-
- actual_request = client._instance_admin_client.create_app_profile.call_args_list[
- 0
- ].kwargs
-
- self.assertEqual(actual_request, expected_request)
- self.assertIsInstance(result, self._get_target_class())
- self.assertEqual(result.app_profile_id, self.APP_PROFILE_ID)
- self.assertIs(result._instance, instance)
- self.assertEqual(result.routing_policy_type, routing)
- self.assertEqual(result.description, description)
- self.assertEqual(result.allow_transactional_writes, False)
- self.assertIsNone(result.cluster_id)
-
- def test_create_routing_single(self):
- from google.cloud.bigtable.enums import RoutingPolicyType
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
-
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = client.instance(self.INSTANCE_ID)
-
- routing = RoutingPolicyType.SINGLE
- description = "routing policy single"
- allow_writes = False
- ignore_warnings = True
-
- app_profile = self._make_one(
- self.APP_PROFILE_ID,
- instance,
- routing_policy_type=routing,
- description=description,
- cluster_id=self.CLUSTER_ID,
- allow_transactional_writes=allow_writes,
- )
- expected_request_app_profile = app_profile._to_pb()
- instance_name = instance.name
- expected_request = {
- "request": {
- "parent": instance_name,
- "app_profile_id": self.APP_PROFILE_ID,
- "app_profile": expected_request_app_profile,
- "ignore_warnings": ignore_warnings,
- }
+ instance_api = mock.create_autospec(BigtableInstanceAdminClient)
+ instance_api.app_profile_path.return_value = (
+ "projects/project/instances/instance-id/appProfiles/app-profile-id"
+ )
+ instance_api.instance_path.return_value = name
+ instance_api.create_app_profile.return_value = expected_request_app_profile
+
+ # Patch the stub used by the API method.
+ client._instance_admin_client = instance_api
+ app_profile._instance._client._instance_admin_client = instance_api
+ # Perform the method and check the result.
+ result = app_profile.create(ignore_warnings)
+
+ actual_request = client._instance_admin_client.create_app_profile.call_args_list[
+ 0
+ ].kwargs
+
+ assert actual_request == expected_request
+ assert isinstance(result, AppProfile)
+ assert result.app_profile_id == APP_PROFILE_ID
+ assert result._instance is instance
+ assert result.routing_policy_type == routing
+ assert result.description == description
+ assert result.allow_transactional_writes is False
+ assert result.cluster_id is None
+
+
+def test_app_profile_create_w_routing_single():
+ from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
+ BigtableInstanceAdminClient,
+ )
+ from google.cloud.bigtable.app_profile import AppProfile
+ from google.cloud.bigtable.enums import RoutingPolicyType
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = client.instance(INSTANCE_ID)
+
+ routing = RoutingPolicyType.SINGLE
+ description = "routing policy single"
+ allow_writes = False
+ ignore_warnings = True
+
+ app_profile = _make_app_profile(
+ APP_PROFILE_ID,
+ instance,
+ routing_policy_type=routing,
+ description=description,
+ cluster_id=CLUSTER_ID,
+ allow_transactional_writes=allow_writes,
+ )
+ expected_request_app_profile = app_profile._to_pb()
+ instance_name = instance.name
+ expected_request = {
+ "request": {
+ "parent": instance_name,
+ "app_profile_id": APP_PROFILE_ID,
+ "app_profile": expected_request_app_profile,
+ "ignore_warnings": ignore_warnings,
}
+ }
+
+ # Patch the stub used by the API method.
+ instance_api = mock.create_autospec(BigtableInstanceAdminClient)
+ instance_api.app_profile_path.return_value = (
+ "projects/project/instances/instance-id/appProfiles/app-profile-id"
+ )
+ instance_api.instance_path.return_value = instance_name
+ instance_api.create_app_profile.return_value = expected_request_app_profile
+ client._instance_admin_client = instance_api
+ # Perform the method and check the result.
+ result = app_profile.create(ignore_warnings)
+
+ actual_request = client._instance_admin_client.create_app_profile.call_args_list[
+ 0
+ ].kwargs
+
+ assert actual_request == expected_request
+ assert isinstance(result, AppProfile)
+ assert result.app_profile_id == APP_PROFILE_ID
+ assert result._instance is instance
+ assert result.routing_policy_type == routing
+ assert result.description == description
+ assert result.allow_transactional_writes == allow_writes
+ assert result.cluster_id == CLUSTER_ID
+
+
+def test_app_profile_create_w_wrong_routing_policy():
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = client.instance(INSTANCE_ID)
+ app_profile = _make_app_profile(APP_PROFILE_ID, instance, routing_policy_type=None)
+ with pytest.raises(ValueError):
+ app_profile.create()
+
+
+def test_app_profile_update_w_routing_any():
+ from google.longrunning import operations_pb2
+ from google.protobuf.any_pb2 import Any
+ from google.cloud.bigtable_admin_v2.types import (
+ bigtable_instance_admin as messages_v2_pb2,
+ )
+ from google.cloud.bigtable.enums import RoutingPolicyType
+ from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
+ BigtableInstanceAdminClient,
+ )
+ from google.protobuf import field_mask_pb2
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = client.instance(INSTANCE_ID)
+
+ routing = RoutingPolicyType.SINGLE
+ description = "to routing policy single"
+ allow_writes = True
+ app_profile = _make_app_profile(
+ APP_PROFILE_ID,
+ instance,
+ routing_policy_type=routing,
+ description=description,
+ cluster_id=CLUSTER_ID,
+ allow_transactional_writes=allow_writes,
+ )
+
+ # Create response_pb
+ metadata = messages_v2_pb2.UpdateAppProfileMetadata()
+ type_url = "type.googleapis.com/{}".format(
+ messages_v2_pb2.UpdateAppProfileMetadata._meta._pb.DESCRIPTOR.full_name
+ )
+ response_pb = operations_pb2.Operation(
+ name=OP_NAME,
+ metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()),
+ )
+
+ # Patch the stub used by the API method.
+ instance_api = mock.create_autospec(BigtableInstanceAdminClient)
+ # Mock api calls
+ instance_api.app_profile_path.return_value = (
+ "projects/project/instances/instance-id/appProfiles/app-profile-id"
+ )
+
+ client._instance_admin_client = instance_api
+
+ # Perform the method and check the result.
+ ignore_warnings = True
+ expected_request_update_mask = field_mask_pb2.FieldMask(
+ paths=["description", "single_cluster_routing"]
+ )
- # Patch the stub used by the API method.
- instance_api = mock.create_autospec(BigtableInstanceAdminClient)
- instance_api.app_profile_path.return_value = (
- "projects/project/instances/instance-id/appProfiles/app-profile-id"
- )
- instance_api.instance_path.return_value = instance_name
- instance_api.create_app_profile.return_value = expected_request_app_profile
- client._instance_admin_client = instance_api
- # Perform the method and check the result.
- result = app_profile.create(ignore_warnings)
-
- actual_request = client._instance_admin_client.create_app_profile.call_args_list[
- 0
- ].kwargs
-
- self.assertEqual(actual_request, expected_request)
- self.assertIsInstance(result, self._get_target_class())
- self.assertEqual(result.app_profile_id, self.APP_PROFILE_ID)
- self.assertIs(result._instance, instance)
- self.assertEqual(result.routing_policy_type, routing)
- self.assertEqual(result.description, description)
- self.assertEqual(result.allow_transactional_writes, allow_writes)
- self.assertEqual(result.cluster_id, self.CLUSTER_ID)
-
- def test_create_app_profile_with_wrong_routing_policy(self):
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = client.instance(self.INSTANCE_ID)
- app_profile = self._make_one(
- self.APP_PROFILE_ID, instance, routing_policy_type=None
- )
- with self.assertRaises(ValueError):
- app_profile.create()
-
- def test_update_app_profile_routing_any(self):
- from google.longrunning import operations_pb2
- from google.protobuf.any_pb2 import Any
- from google.cloud.bigtable_admin_v2.types import (
- bigtable_instance_admin as messages_v2_pb2,
- )
- from google.cloud.bigtable.enums import RoutingPolicyType
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.protobuf import field_mask_pb2
-
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = client.instance(self.INSTANCE_ID)
-
- routing = RoutingPolicyType.SINGLE
- description = "to routing policy single"
- allow_writes = True
- app_profile = self._make_one(
- self.APP_PROFILE_ID,
- instance,
- routing_policy_type=routing,
- description=description,
- cluster_id=self.CLUSTER_ID,
- allow_transactional_writes=allow_writes,
- )
-
- # Create response_pb
- metadata = messages_v2_pb2.UpdateAppProfileMetadata()
- type_url = "type.googleapis.com/{}".format(
- messages_v2_pb2.UpdateAppProfileMetadata._meta._pb.DESCRIPTOR.full_name
- )
- response_pb = operations_pb2.Operation(
- name=self.OP_NAME,
- metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()),
- )
-
- # Patch the stub used by the API method.
- instance_api = mock.create_autospec(BigtableInstanceAdminClient)
- # Mock api calls
- instance_api.app_profile_path.return_value = (
- "projects/project/instances/instance-id/appProfiles/app-profile-id"
- )
-
- client._instance_admin_client = instance_api
-
- # Perform the method and check the result.
- ignore_warnings = True
- expected_request_update_mask = field_mask_pb2.FieldMask(
- paths=["description", "single_cluster_routing"]
- )
-
- expected_request = {
- "request": {
- "app_profile": app_profile._to_pb(),
- "update_mask": expected_request_update_mask,
- "ignore_warnings": ignore_warnings,
- }
+ expected_request = {
+ "request": {
+ "app_profile": app_profile._to_pb(),
+ "update_mask": expected_request_update_mask,
+ "ignore_warnings": ignore_warnings,
}
+ }
+
+ instance_api.update_app_profile.return_value = response_pb
+ app_profile._instance._client._instance_admin_client = instance_api
+ result = app_profile.update(ignore_warnings=ignore_warnings)
+ actual_request = client._instance_admin_client.update_app_profile.call_args_list[
+ 0
+ ].kwargs
+
+ assert actual_request == expected_request
+ assert (
+ result.metadata.type_url
+ == "type.googleapis.com/google.bigtable.admin.v2.UpdateAppProfileMetadata"
+ )
+
- instance_api.update_app_profile.return_value = response_pb
- app_profile._instance._client._instance_admin_client = instance_api
- result = app_profile.update(ignore_warnings=ignore_warnings)
- actual_request = client._instance_admin_client.update_app_profile.call_args_list[
- 0
- ].kwargs
-
- self.assertEqual(actual_request, expected_request)
- self.assertEqual(
- result.metadata.type_url,
- "type.googleapis.com/google.bigtable.admin.v2.UpdateAppProfileMetadata",
- )
-
- def test_update_app_profile_routing_single(self):
- from google.longrunning import operations_pb2
- from google.protobuf.any_pb2 import Any
- from google.cloud.bigtable_admin_v2.types import (
- bigtable_instance_admin as messages_v2_pb2,
- )
- from google.cloud.bigtable.enums import RoutingPolicyType
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.protobuf import field_mask_pb2
-
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = client.instance(self.INSTANCE_ID)
-
- routing = RoutingPolicyType.ANY
- app_profile = self._make_one(
- self.APP_PROFILE_ID, instance, routing_policy_type=routing
- )
-
- # Create response_pb
- metadata = messages_v2_pb2.UpdateAppProfileMetadata()
- type_url = "type.googleapis.com/{}".format(
- messages_v2_pb2.UpdateAppProfileMetadata._meta._pb.DESCRIPTOR.full_name
- )
- response_pb = operations_pb2.Operation(
- name=self.OP_NAME,
- metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()),
- )
-
- # Patch the stub used by the API method.
- instance_api = mock.create_autospec(BigtableInstanceAdminClient)
- # Mock api calls
- instance_api.app_profile_path.return_value = (
- "projects/project/instances/instance-id/appProfiles/app-profile-id"
- )
- client._instance_admin_client = instance_api
- client._instance_admin_client.update_app_profile.return_value = response_pb
- # Perform the method and check the result.
- ignore_warnings = True
- expected_request_update_mask = field_mask_pb2.FieldMask(
- paths=["multi_cluster_routing_use_any"]
- )
- expected_request = {
- "request": {
- "app_profile": app_profile._to_pb(),
- "update_mask": expected_request_update_mask,
- "ignore_warnings": ignore_warnings,
- }
+def test_app_profile_update_w_routing_single():
+ from google.longrunning import operations_pb2
+ from google.protobuf.any_pb2 import Any
+ from google.cloud.bigtable_admin_v2.types import (
+ bigtable_instance_admin as messages_v2_pb2,
+ )
+ from google.cloud.bigtable.enums import RoutingPolicyType
+ from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
+ BigtableInstanceAdminClient,
+ )
+ from google.protobuf import field_mask_pb2
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = client.instance(INSTANCE_ID)
+
+ routing = RoutingPolicyType.ANY
+ app_profile = _make_app_profile(
+ APP_PROFILE_ID, instance, routing_policy_type=routing
+ )
+
+ # Create response_pb
+ metadata = messages_v2_pb2.UpdateAppProfileMetadata()
+ type_url = "type.googleapis.com/{}".format(
+ messages_v2_pb2.UpdateAppProfileMetadata._meta._pb.DESCRIPTOR.full_name
+ )
+ response_pb = operations_pb2.Operation(
+ name=OP_NAME,
+ metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()),
+ )
+
+ # Patch the stub used by the API method.
+ instance_api = mock.create_autospec(BigtableInstanceAdminClient)
+ # Mock api calls
+ instance_api.app_profile_path.return_value = (
+ "projects/project/instances/instance-id/appProfiles/app-profile-id"
+ )
+ client._instance_admin_client = instance_api
+ client._instance_admin_client.update_app_profile.return_value = response_pb
+ # Perform the method and check the result.
+ ignore_warnings = True
+ expected_request_update_mask = field_mask_pb2.FieldMask(
+ paths=["multi_cluster_routing_use_any"]
+ )
+ expected_request = {
+ "request": {
+ "app_profile": app_profile._to_pb(),
+ "update_mask": expected_request_update_mask,
+ "ignore_warnings": ignore_warnings,
}
+ }
+
+ result = app_profile.update(ignore_warnings=ignore_warnings)
+ actual_request = client._instance_admin_client.update_app_profile.call_args_list[
+ 0
+ ].kwargs
+ assert actual_request == expected_request
+ assert (
+ result.metadata.type_url
+ == "type.googleapis.com/google.bigtable.admin.v2.UpdateAppProfileMetadata"
+ )
+
+
+def test_app_profile_update_w_wrong_routing_policy():
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = client.instance(INSTANCE_ID)
+ app_profile = _make_app_profile(APP_PROFILE_ID, instance, routing_policy_type=None)
+ with pytest.raises(ValueError):
+ app_profile.update()
+
+
+def test_app_profile_delete():
+ from google.protobuf import empty_pb2
+ from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
+ BigtableInstanceAdminClient,
+ )
+
+ instance_api = mock.create_autospec(BigtableInstanceAdminClient)
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = client.instance(INSTANCE_ID)
+ app_profile = _make_app_profile(APP_PROFILE_ID, instance)
+
+ # Create response_pb
+ response_pb = empty_pb2.Empty()
+
+ # Patch the stub used by the API method.
+ client._instance_admin_client = instance_api
+ instance_stub = client._instance_admin_client.transport
+ instance_stub.delete_cluster.side_effect = [response_pb]
+
+ # Create expected_result.
+ expected_result = None # delete() has no return value.
+
+ # Perform the method and check the result.
+ result = app_profile.delete()
- result = app_profile.update(ignore_warnings=ignore_warnings)
- actual_request = client._instance_admin_client.update_app_profile.call_args_list[
- 0
- ].kwargs
- self.assertEqual(actual_request, expected_request)
- self.assertEqual(
- result.metadata.type_url,
- "type.googleapis.com/google.bigtable.admin.v2.UpdateAppProfileMetadata",
- )
-
- def test_update_app_profile_with_wrong_routing_policy(self):
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = client.instance(self.INSTANCE_ID)
- app_profile = self._make_one(
- self.APP_PROFILE_ID, instance, routing_policy_type=None
- )
- with self.assertRaises(ValueError):
- app_profile.update()
-
- def test_delete(self):
- from google.protobuf import empty_pb2
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
-
- instance_api = mock.create_autospec(BigtableInstanceAdminClient)
-
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = client.instance(self.INSTANCE_ID)
- app_profile = self._make_one(self.APP_PROFILE_ID, instance)
-
- # Create response_pb
- response_pb = empty_pb2.Empty()
-
- # Patch the stub used by the API method.
- client._instance_admin_client = instance_api
- instance_stub = client._instance_admin_client.transport
- instance_stub.delete_cluster.side_effect = [response_pb]
-
- # Create expected_result.
- expected_result = None # delete() has no return value.
-
- # Perform the method and check the result.
- result = app_profile.delete()
-
- self.assertEqual(result, expected_result)
+ assert result == expected_result
class _Client(object):
diff --git a/tests/unit/test_backup.py b/tests/unit/test_backup.py
index a32e18adb..92e9d7307 100644
--- a/tests/unit/test_backup.py
+++ b/tests/unit/test_backup.py
@@ -14,881 +14,893 @@
import datetime
+
import mock
-import unittest
+import pytest
from ._testing import _make_credentials
from google.cloud._helpers import UTC
+PROJECT_ID = "project-id"
+INSTANCE_ID = "instance-id"
+INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
+CLUSTER_ID = "cluster-id"
+CLUSTER_NAME = INSTANCE_NAME + "/clusters/" + CLUSTER_ID
+TABLE_ID = "table-id"
+TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID
+BACKUP_ID = "backup-id"
+BACKUP_NAME = CLUSTER_NAME + "/backups/" + BACKUP_ID
+
+ALT_INSTANCE = "other-instance-id"
+ALT_INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + ALT_INSTANCE
+ALT_CLUSTER_NAME = ALT_INSTANCE_NAME + "/clusters/" + CLUSTER_ID
+ALT_BACKUP_NAME = ALT_CLUSTER_NAME + "/backups/" + BACKUP_ID
+
+
+def _make_timestamp():
+ return datetime.datetime.utcnow().replace(tzinfo=UTC)
+
+
+def _make_table_admin_client():
+ from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient
+
+ return mock.create_autospec(BigtableTableAdminClient, instance=True)
+
+
+def _make_backup(*args, **kwargs):
+ from google.cloud.bigtable.backup import Backup
+
+ return Backup(*args, **kwargs)
+
+
+def test_backup_constructor_defaults():
+ instance = _Instance(INSTANCE_NAME)
+ backup = _make_backup(BACKUP_ID, instance)
+
+ assert backup.backup_id == BACKUP_ID
+ assert backup._instance is instance
+ assert backup._cluster is None
+ assert backup.table_id is None
+ assert backup._expire_time is None
+
+ assert backup._parent is None
+ assert backup._source_table is None
+ assert backup._start_time is None
+ assert backup._end_time is None
+ assert backup._size_bytes is None
+ assert backup._state is None
+ assert backup._encryption_info is None
+
+
+def test_backup_constructor_explicit():
+ instance = _Instance(INSTANCE_NAME)
+ expire_time = _make_timestamp()
+
+ backup = _make_backup(
+ BACKUP_ID,
+ instance,
+ cluster_id=CLUSTER_ID,
+ table_id=TABLE_ID,
+ expire_time=expire_time,
+ encryption_info="encryption_info",
+ )
+
+ assert backup.backup_id == BACKUP_ID
+ assert backup._instance is instance
+ assert backup._cluster is CLUSTER_ID
+ assert backup.table_id == TABLE_ID
+ assert backup._expire_time == expire_time
+ assert backup._encryption_info == "encryption_info"
+
+ assert backup._parent is None
+ assert backup._source_table is None
+ assert backup._start_time is None
+ assert backup._end_time is None
+ assert backup._size_bytes is None
+ assert backup._state is None
+
+
+def test_backup_from_pb_w_project_mismatch():
+ from google.cloud.bigtable_admin_v2.types import table
+ from google.cloud.bigtable.backup import Backup
+
+ alt_project_id = "alt-project-id"
+ client = _Client(project=alt_project_id)
+ instance = _Instance(INSTANCE_NAME, client)
+ backup_pb = table.Backup(name=BACKUP_NAME)
+
+ with pytest.raises(ValueError):
+ Backup.from_pb(backup_pb, instance)
+
+
+def test_backup_from_pb_w_instance_mismatch():
+ from google.cloud.bigtable_admin_v2.types import table
+ from google.cloud.bigtable.backup import Backup
+
+ alt_instance = "/projects/%s/instances/alt-instance" % PROJECT_ID
+ client = _Client()
+ instance = _Instance(alt_instance, client)
+ backup_pb = table.Backup(name=BACKUP_NAME)
+
+ with pytest.raises(ValueError):
+ Backup.from_pb(backup_pb, instance)
+
+
+def test_backup_from_pb_w_bad_name():
+ from google.cloud.bigtable_admin_v2.types import table
+ from google.cloud.bigtable.backup import Backup
+
+ client = _Client()
+ instance = _Instance(INSTANCE_NAME, client)
+ backup_pb = table.Backup(name="invalid_name")
+
+ with pytest.raises(ValueError):
+ Backup.from_pb(backup_pb, instance)
+
+
+def test_backup_from_pb_success():
+ from google.cloud.bigtable.encryption_info import EncryptionInfo
+ from google.cloud.bigtable.error import Status
+ from google.cloud.bigtable_admin_v2.types import table
+ from google.cloud.bigtable.backup import Backup
+ from google.cloud._helpers import _datetime_to_pb_timestamp
+ from google.rpc.code_pb2 import Code
+
+ client = _Client()
+ instance = _Instance(INSTANCE_NAME, client)
+ timestamp = _datetime_to_pb_timestamp(_make_timestamp())
+ size_bytes = 1234
+ state = table.Backup.State.READY
+ GOOGLE_DEFAULT_ENCRYPTION = (
+ table.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION
+ )
+ backup_pb = table.Backup(
+ name=BACKUP_NAME,
+ source_table=TABLE_NAME,
+ expire_time=timestamp,
+ start_time=timestamp,
+ end_time=timestamp,
+ size_bytes=size_bytes,
+ state=state,
+ encryption_info=table.EncryptionInfo(
+ encryption_type=GOOGLE_DEFAULT_ENCRYPTION,
+ encryption_status=_StatusPB(Code.OK, "Status OK"),
+ kms_key_version="2",
+ ),
+ )
+
+ backup = Backup.from_pb(backup_pb, instance)
+
+ assert isinstance(backup, Backup)
+ assert backup._instance == instance
+ assert backup.backup_id == BACKUP_ID
+ assert backup.cluster == CLUSTER_ID
+ assert backup.table_id == TABLE_ID
+ assert backup._expire_time == timestamp
+ assert backup.start_time == timestamp
+ assert backup.end_time == timestamp
+ assert backup._size_bytes == size_bytes
+ assert backup._state == state
+ expected_info = EncryptionInfo(
+ encryption_type=GOOGLE_DEFAULT_ENCRYPTION,
+ encryption_status=Status(_StatusPB(Code.OK, "Status OK")),
+ kms_key_version="2",
+ )
+ assert backup.encryption_info == expected_info
+
+
+def test_backup_name():
+ from google.cloud.bigtable.client import Client
+ from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
+ BigtableInstanceAdminClient,
+ )
+
+ api = mock.create_autospec(BigtableInstanceAdminClient)
+ credentials = _make_credentials()
+ client = Client(project=PROJECT_ID, credentials=credentials, admin=True)
+ client._table_admin_client = api
+ instance = _Instance(INSTANCE_NAME, client)
+
+ backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID)
+ assert backup.name == BACKUP_NAME
+
+
+def test_backup_cluster():
+ backup = _make_backup(BACKUP_ID, _Instance(INSTANCE_NAME), cluster_id=CLUSTER_ID)
+ assert backup.cluster == CLUSTER_ID
+
+
+def test_backup_cluster_setter():
+ backup = _make_backup(BACKUP_ID, _Instance(INSTANCE_NAME))
+ backup.cluster = CLUSTER_ID
+ assert backup.cluster == CLUSTER_ID
+
+
+def test_backup_parent_none():
+ backup = _make_backup(BACKUP_ID, _Instance(INSTANCE_NAME),)
+ assert backup.parent is None
+
+
+def test_backup_parent_w_cluster():
+ from google.cloud.bigtable.client import Client
+ from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
+ BigtableInstanceAdminClient,
+ )
+
+ api = mock.create_autospec(BigtableInstanceAdminClient)
+ credentials = _make_credentials()
+ client = Client(project=PROJECT_ID, credentials=credentials, admin=True)
+ client._table_admin_client = api
+ instance = _Instance(INSTANCE_NAME, client)
+
+ backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID)
+ assert backup._cluster == CLUSTER_ID
+ assert backup.parent == CLUSTER_NAME
+
+
+def test_backup_source_table_none():
+ from google.cloud.bigtable.client import Client
+ from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
+ BigtableInstanceAdminClient,
+ )
+
+ api = mock.create_autospec(BigtableInstanceAdminClient)
+ credentials = _make_credentials()
+ client = Client(project=PROJECT_ID, credentials=credentials, admin=True)
+ client._table_admin_client = api
+ instance = _Instance(INSTANCE_NAME, client)
+
+ backup = _make_backup(BACKUP_ID, instance)
+ assert backup.source_table is None
+
+
+def test_backup_source_table_valid():
+ from google.cloud.bigtable.client import Client
+ from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
+ BigtableInstanceAdminClient,
+ )
+
+ api = mock.create_autospec(BigtableInstanceAdminClient)
+ credentials = _make_credentials()
+ client = Client(project=PROJECT_ID, credentials=credentials, admin=True)
+ client._table_admin_client = api
+ instance = _Instance(INSTANCE_NAME, client)
+
+ backup = _make_backup(BACKUP_ID, instance, table_id=TABLE_ID)
+ assert backup.source_table == TABLE_NAME
+
+
+def test_backup_expire_time():
+ instance = _Instance(INSTANCE_NAME)
+ expire_time = _make_timestamp()
+ backup = _make_backup(BACKUP_ID, instance, expire_time=expire_time)
+ assert backup.expire_time == expire_time
+
+
+def test_backup_expire_time_setter():
+ instance = _Instance(INSTANCE_NAME)
+ expire_time = _make_timestamp()
+ backup = _make_backup(BACKUP_ID, instance)
+ backup.expire_time = expire_time
+ assert backup.expire_time == expire_time
+
+
+def test_backup_start_time():
+ instance = _Instance(INSTANCE_NAME)
+ backup = _make_backup(BACKUP_ID, instance)
+ expected = backup._start_time = _make_timestamp()
+ assert backup.start_time == expected
+
+
+def test_backup_end_time():
+ instance = _Instance(INSTANCE_NAME)
+ backup = _make_backup(BACKUP_ID, instance)
+ expected = backup._end_time = _make_timestamp()
+ assert backup.end_time == expected
+
+
+def test_backup_size():
+ instance = _Instance(INSTANCE_NAME)
+ backup = _make_backup(BACKUP_ID, instance)
+ expected = backup._size_bytes = 10
+ assert backup.size_bytes == expected
+
+
+def test_backup_state():
+ from google.cloud.bigtable_admin_v2.types import table
+
+ instance = _Instance(INSTANCE_NAME)
+ backup = _make_backup(BACKUP_ID, instance)
+ expected = backup._state = table.Backup.State.READY
+ assert backup.state == expected
+
+
+def test_backup___eq__():
+ instance = object()
+ backup1 = _make_backup(BACKUP_ID, instance)
+ backup2 = _make_backup(BACKUP_ID, instance)
+ assert backup1 == backup2
+
+
+def test_backup___eq___w_different_types():
+ instance = object()
+ backup1 = _make_backup(BACKUP_ID, instance)
+ backup2 = object()
+ assert not (backup1 == backup2)
+
+
+def test_backup___ne___w_same_value():
+ instance = object()
+ backup1 = _make_backup(BACKUP_ID, instance)
+ backup2 = _make_backup(BACKUP_ID, instance)
+ assert not (backup1 != backup2)
+
+
+def test_backup___ne__():
+ backup1 = _make_backup("backup_1", "instance1")
+ backup2 = _make_backup("backup_2", "instance2")
+ assert backup1 != backup2
+
+
+def test_backup_create_w_grpc_error():
+ from google.api_core.exceptions import GoogleAPICallError
+ from google.api_core.exceptions import Unknown
+ from google.cloud._helpers import _datetime_to_pb_timestamp
+ from google.cloud.bigtable_admin_v2.types import table
+
+ client = _Client()
+ api = client.table_admin_client = _make_table_admin_client()
+ api.create_backup.side_effect = Unknown("testing")
+
+ timestamp = _make_timestamp()
+ backup = _make_backup(
+ BACKUP_ID,
+ _Instance(INSTANCE_NAME, client=client),
+ table_id=TABLE_ID,
+ expire_time=timestamp,
+ )
+
+ backup_pb = table.Backup(
+ source_table=TABLE_NAME, expire_time=_datetime_to_pb_timestamp(timestamp),
+ )
+
+ with pytest.raises(GoogleAPICallError):
+ backup.create(CLUSTER_ID)
+
+ api.create_backup.assert_called_once_with(
+ request={"parent": CLUSTER_NAME, "backup_id": BACKUP_ID, "backup": backup_pb}
+ )
+
+
+def test_backup_create_w_already_exists():
+ from google.cloud._helpers import _datetime_to_pb_timestamp
+ from google.cloud.bigtable_admin_v2.types import table
+ from google.cloud.exceptions import Conflict
+
+ client = _Client()
+ api = client.table_admin_client = _make_table_admin_client()
+ api.create_backup.side_effect = Conflict("testing")
+
+ timestamp = _make_timestamp()
+ backup = _make_backup(
+ BACKUP_ID,
+ _Instance(INSTANCE_NAME, client=client),
+ table_id=TABLE_ID,
+ expire_time=timestamp,
+ )
+
+ backup_pb = table.Backup(
+ source_table=TABLE_NAME, expire_time=_datetime_to_pb_timestamp(timestamp),
+ )
+
+ with pytest.raises(Conflict):
+ backup.create(CLUSTER_ID)
+
+ api.create_backup.assert_called_once_with(
+ request={"parent": CLUSTER_NAME, "backup_id": BACKUP_ID, "backup": backup_pb}
+ )
+
+
+def test_backup_create_w_instance_not_found():
+ from google.cloud._helpers import _datetime_to_pb_timestamp
+ from google.cloud.bigtable_admin_v2.types import table
+ from google.cloud.exceptions import NotFound
+
+ client = _Client()
+ api = client.table_admin_client = _make_table_admin_client()
+ api.create_backup.side_effect = NotFound("testing")
+
+ timestamp = _make_timestamp()
+ backup = _make_backup(
+ BACKUP_ID,
+ _Instance(INSTANCE_NAME, client=client),
+ table_id=TABLE_ID,
+ expire_time=timestamp,
+ )
+
+ backup_pb = table.Backup(
+ source_table=TABLE_NAME, expire_time=_datetime_to_pb_timestamp(timestamp),
+ )
+
+ with pytest.raises(NotFound):
+ backup.create(CLUSTER_ID)
+
+ api.create_backup.assert_called_once_with(
+ request={"parent": CLUSTER_NAME, "backup_id": BACKUP_ID, "backup": backup_pb}
+ )
+
+
+def test_backup_create_w_cluster_not_set():
+ backup = _make_backup(
+ BACKUP_ID,
+ _Instance(INSTANCE_NAME),
+ table_id=TABLE_ID,
+ expire_time=_make_timestamp(),
+ )
+
+ with pytest.raises(ValueError):
+ backup.create()
+
+
+def test_backup_create_w_table_not_set():
+ backup = _make_backup(
+ BACKUP_ID, _Instance(INSTANCE_NAME), expire_time=_make_timestamp(),
+ )
+
+ with pytest.raises(ValueError):
+ backup.create(CLUSTER_ID)
+
+
+def test_backup_create_w_expire_time_not_set():
+ backup = _make_backup(BACKUP_ID, _Instance(INSTANCE_NAME), table_id=TABLE_ID,)
+
+ with pytest.raises(ValueError):
+ backup.create(CLUSTER_ID)
+
+
+def test_backup_create_success():
+ from google.cloud._helpers import _datetime_to_pb_timestamp
+ from google.cloud.bigtable_admin_v2.types import table
+ from google.cloud.bigtable import Client
+
+ op_future = object()
+ credentials = _make_credentials()
+ client = Client(project=PROJECT_ID, credentials=credentials, admin=True)
+ api = client._table_admin_client = _make_table_admin_client()
+ api.create_backup.return_value = op_future
+
+ timestamp = _make_timestamp()
+ backup = _make_backup(
+ BACKUP_ID,
+ _Instance(INSTANCE_NAME, client=client),
+ table_id=TABLE_ID,
+ expire_time=timestamp,
+ )
-class TestBackup(unittest.TestCase):
- PROJECT_ID = "project-id"
- INSTANCE_ID = "instance-id"
- INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
- CLUSTER_ID = "cluster-id"
- CLUSTER_NAME = INSTANCE_NAME + "/clusters/" + CLUSTER_ID
- TABLE_ID = "table-id"
- TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID
- BACKUP_ID = "backup-id"
- BACKUP_NAME = CLUSTER_NAME + "/backups/" + BACKUP_ID
-
- ALT_INSTANCE = "other-instance-id"
- ALT_INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + ALT_INSTANCE
- ALT_CLUSTER_NAME = ALT_INSTANCE_NAME + "/clusters/" + CLUSTER_ID
- ALT_BACKUP_NAME = ALT_CLUSTER_NAME + "/backups/" + BACKUP_ID
-
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.backup import Backup
-
- return Backup
-
- @staticmethod
- def _make_table_admin_client():
- from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient
-
- return mock.create_autospec(BigtableTableAdminClient, instance=True)
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- def _make_timestamp(self):
- return datetime.datetime.utcnow().replace(tzinfo=UTC)
-
- def test_constructor_defaults(self):
- instance = _Instance(self.INSTANCE_NAME)
- backup = self._make_one(self.BACKUP_ID, instance)
-
- self.assertEqual(backup.backup_id, self.BACKUP_ID)
- self.assertIs(backup._instance, instance)
- self.assertIsNone(backup._cluster)
- self.assertIsNone(backup.table_id)
- self.assertIsNone(backup._expire_time)
-
- self.assertIsNone(backup._parent)
- self.assertIsNone(backup._source_table)
- self.assertIsNone(backup._start_time)
- self.assertIsNone(backup._end_time)
- self.assertIsNone(backup._size_bytes)
- self.assertIsNone(backup._state)
- self.assertIsNone(backup._encryption_info)
-
- def test_constructor_non_defaults(self):
- instance = _Instance(self.INSTANCE_NAME)
- expire_time = self._make_timestamp()
-
- backup = self._make_one(
- self.BACKUP_ID,
- instance,
- cluster_id=self.CLUSTER_ID,
- table_id=self.TABLE_ID,
- expire_time=expire_time,
- encryption_info="encryption_info",
- )
-
- self.assertEqual(backup.backup_id, self.BACKUP_ID)
- self.assertIs(backup._instance, instance)
- self.assertIs(backup._cluster, self.CLUSTER_ID)
- self.assertEqual(backup.table_id, self.TABLE_ID)
- self.assertEqual(backup._expire_time, expire_time)
- self.assertEqual(backup._encryption_info, "encryption_info")
-
- self.assertIsNone(backup._parent)
- self.assertIsNone(backup._source_table)
- self.assertIsNone(backup._start_time)
- self.assertIsNone(backup._end_time)
- self.assertIsNone(backup._size_bytes)
- self.assertIsNone(backup._state)
-
- def test_from_pb_project_mismatch(self):
- from google.cloud.bigtable_admin_v2.types import table
-
- alt_project_id = "alt-project-id"
- client = _Client(project=alt_project_id)
- instance = _Instance(self.INSTANCE_NAME, client)
- backup_pb = table.Backup(name=self.BACKUP_NAME)
- klasse = self._get_target_class()
-
- with self.assertRaises(ValueError):
- klasse.from_pb(backup_pb, instance)
-
- def test_from_pb_instance_mismatch(self):
- from google.cloud.bigtable_admin_v2.types import table
-
- alt_instance = "/projects/%s/instances/alt-instance" % self.PROJECT_ID
- client = _Client()
- instance = _Instance(alt_instance, client)
- backup_pb = table.Backup(name=self.BACKUP_NAME)
- klasse = self._get_target_class()
-
- with self.assertRaises(ValueError):
- klasse.from_pb(backup_pb, instance)
-
- def test_from_pb_bad_name(self):
- from google.cloud.bigtable_admin_v2.types import table
-
- client = _Client()
- instance = _Instance(self.INSTANCE_NAME, client)
- backup_pb = table.Backup(name="invalid_name")
- klasse = self._get_target_class()
-
- with self.assertRaises(ValueError):
- klasse.from_pb(backup_pb, instance)
-
- def test_from_pb_success(self):
- from google.cloud.bigtable.encryption_info import EncryptionInfo
- from google.cloud.bigtable.error import Status
- from google.cloud.bigtable_admin_v2.types import table
- from google.cloud._helpers import _datetime_to_pb_timestamp
- from google.rpc.code_pb2 import Code
-
- client = _Client()
- instance = _Instance(self.INSTANCE_NAME, client)
- timestamp = _datetime_to_pb_timestamp(self._make_timestamp())
- size_bytes = 1234
- state = table.Backup.State.READY
- GOOGLE_DEFAULT_ENCRYPTION = (
- table.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION
- )
- backup_pb = table.Backup(
- name=self.BACKUP_NAME,
- source_table=self.TABLE_NAME,
- expire_time=timestamp,
- start_time=timestamp,
- end_time=timestamp,
- size_bytes=size_bytes,
- state=state,
- encryption_info=table.EncryptionInfo(
- encryption_type=GOOGLE_DEFAULT_ENCRYPTION,
- encryption_status=_StatusPB(Code.OK, "Status OK"),
- kms_key_version="2",
- ),
- )
- klasse = self._get_target_class()
-
- backup = klasse.from_pb(backup_pb, instance)
-
- self.assertTrue(isinstance(backup, klasse))
- self.assertEqual(backup._instance, instance)
- self.assertEqual(backup.backup_id, self.BACKUP_ID)
- self.assertEqual(backup.cluster, self.CLUSTER_ID)
- self.assertEqual(backup.table_id, self.TABLE_ID)
- self.assertEqual(backup._expire_time, timestamp)
- self.assertEqual(backup.start_time, timestamp)
- self.assertEqual(backup.end_time, timestamp)
- self.assertEqual(backup._size_bytes, size_bytes)
- self.assertEqual(backup._state, state)
- self.assertEqual(
- backup.encryption_info,
- EncryptionInfo(
- encryption_type=GOOGLE_DEFAULT_ENCRYPTION,
- encryption_status=Status(_StatusPB(Code.OK, "Status OK")),
- kms_key_version="2",
- ),
- )
-
- def test_property_name(self):
- from google.cloud.bigtable.client import Client
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
-
- api = mock.create_autospec(BigtableInstanceAdminClient)
- credentials = _make_credentials()
- client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True)
- client._table_admin_client = api
- instance = _Instance(self.INSTANCE_NAME, client)
-
- backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID)
- self.assertEqual(backup.name, self.BACKUP_NAME)
-
- def test_property_cluster(self):
- backup = self._make_one(
- self.BACKUP_ID, _Instance(self.INSTANCE_NAME), cluster_id=self.CLUSTER_ID
- )
- self.assertEqual(backup.cluster, self.CLUSTER_ID)
-
- def test_property_cluster_setter(self):
- backup = self._make_one(self.BACKUP_ID, _Instance(self.INSTANCE_NAME))
- backup.cluster = self.CLUSTER_ID
- self.assertEqual(backup.cluster, self.CLUSTER_ID)
-
- def test_property_parent_none(self):
- backup = self._make_one(self.BACKUP_ID, _Instance(self.INSTANCE_NAME),)
- self.assertIsNone(backup.parent)
-
- def test_property_parent_w_cluster(self):
- from google.cloud.bigtable.client import Client
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
-
- api = mock.create_autospec(BigtableInstanceAdminClient)
- credentials = _make_credentials()
- client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True)
- client._table_admin_client = api
- instance = _Instance(self.INSTANCE_NAME, client)
-
- backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID)
- self.assertEqual(backup._cluster, self.CLUSTER_ID)
- self.assertEqual(backup.parent, self.CLUSTER_NAME)
-
- def test_property_source_table_none(self):
- from google.cloud.bigtable.client import Client
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
-
- api = mock.create_autospec(BigtableInstanceAdminClient)
- credentials = _make_credentials()
- client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True)
- client._table_admin_client = api
- instance = _Instance(self.INSTANCE_NAME, client)
-
- backup = self._make_one(self.BACKUP_ID, instance)
- self.assertIsNone(backup.source_table)
-
- def test_property_source_table_valid(self):
- from google.cloud.bigtable.client import Client
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
-
- api = mock.create_autospec(BigtableInstanceAdminClient)
- credentials = _make_credentials()
- client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True)
- client._table_admin_client = api
- instance = _Instance(self.INSTANCE_NAME, client)
-
- backup = self._make_one(self.BACKUP_ID, instance, table_id=self.TABLE_ID)
- self.assertEqual(backup.source_table, self.TABLE_NAME)
-
- def test_property_expire_time(self):
- instance = _Instance(self.INSTANCE_NAME)
- expire_time = self._make_timestamp()
- backup = self._make_one(self.BACKUP_ID, instance, expire_time=expire_time)
- self.assertEqual(backup.expire_time, expire_time)
-
- def test_property_expire_time_setter(self):
- instance = _Instance(self.INSTANCE_NAME)
- expire_time = self._make_timestamp()
- backup = self._make_one(self.BACKUP_ID, instance)
- backup.expire_time = expire_time
- self.assertEqual(backup.expire_time, expire_time)
-
- def test_property_start_time(self):
- instance = _Instance(self.INSTANCE_NAME)
- backup = self._make_one(self.BACKUP_ID, instance)
- expected = backup._start_time = self._make_timestamp()
- self.assertEqual(backup.start_time, expected)
-
- def test_property_end_time(self):
- instance = _Instance(self.INSTANCE_NAME)
- backup = self._make_one(self.BACKUP_ID, instance)
- expected = backup._end_time = self._make_timestamp()
- self.assertEqual(backup.end_time, expected)
-
- def test_property_size(self):
- instance = _Instance(self.INSTANCE_NAME)
- backup = self._make_one(self.BACKUP_ID, instance)
- expected = backup._size_bytes = 10
- self.assertEqual(backup.size_bytes, expected)
-
- def test_property_state(self):
- from google.cloud.bigtable_admin_v2.types import table
-
- instance = _Instance(self.INSTANCE_NAME)
- backup = self._make_one(self.BACKUP_ID, instance)
- expected = backup._state = table.Backup.State.READY
- self.assertEqual(backup.state, expected)
-
- def test___eq__(self):
- instance = object()
- backup1 = self._make_one(self.BACKUP_ID, instance)
- backup2 = self._make_one(self.BACKUP_ID, instance)
- self.assertTrue(backup1 == backup2)
-
- def test___eq__different_types(self):
- instance = object()
- backup1 = self._make_one(self.BACKUP_ID, instance)
- backup2 = object()
- self.assertFalse(backup1 == backup2)
-
- def test___ne__same_value(self):
- instance = object()
- backup1 = self._make_one(self.BACKUP_ID, instance)
- backup2 = self._make_one(self.BACKUP_ID, instance)
- self.assertFalse(backup1 != backup2)
-
- def test___ne__(self):
- backup1 = self._make_one("backup_1", "instance1")
- backup2 = self._make_one("backup_2", "instance2")
- self.assertTrue(backup1 != backup2)
-
- def test_create_grpc_error(self):
- from google.api_core.exceptions import GoogleAPICallError
- from google.api_core.exceptions import Unknown
- from google.cloud._helpers import _datetime_to_pb_timestamp
- from google.cloud.bigtable_admin_v2.types import table
-
- client = _Client()
- api = client.table_admin_client = self._make_table_admin_client()
- api.create_backup.side_effect = Unknown("testing")
-
- timestamp = self._make_timestamp()
- backup = self._make_one(
- self.BACKUP_ID,
- _Instance(self.INSTANCE_NAME, client=client),
- table_id=self.TABLE_ID,
- expire_time=timestamp,
- )
-
- backup_pb = table.Backup(
- source_table=self.TABLE_NAME,
- expire_time=_datetime_to_pb_timestamp(timestamp),
- )
-
- with self.assertRaises(GoogleAPICallError):
- backup.create(self.CLUSTER_ID)
-
- api.create_backup.assert_called_once_with(
- request={
- "parent": self.CLUSTER_NAME,
- "backup_id": self.BACKUP_ID,
- "backup": backup_pb,
- }
- )
-
- def test_create_already_exists(self):
- from google.cloud._helpers import _datetime_to_pb_timestamp
- from google.cloud.bigtable_admin_v2.types import table
- from google.cloud.exceptions import Conflict
-
- client = _Client()
- api = client.table_admin_client = self._make_table_admin_client()
- api.create_backup.side_effect = Conflict("testing")
-
- timestamp = self._make_timestamp()
- backup = self._make_one(
- self.BACKUP_ID,
- _Instance(self.INSTANCE_NAME, client=client),
- table_id=self.TABLE_ID,
- expire_time=timestamp,
- )
-
- backup_pb = table.Backup(
- source_table=self.TABLE_NAME,
- expire_time=_datetime_to_pb_timestamp(timestamp),
- )
-
- with self.assertRaises(Conflict):
- backup.create(self.CLUSTER_ID)
-
- api.create_backup.assert_called_once_with(
- request={
- "parent": self.CLUSTER_NAME,
- "backup_id": self.BACKUP_ID,
- "backup": backup_pb,
- }
- )
-
- def test_create_instance_not_found(self):
- from google.cloud._helpers import _datetime_to_pb_timestamp
- from google.cloud.bigtable_admin_v2.types import table
- from google.cloud.exceptions import NotFound
-
- client = _Client()
- api = client.table_admin_client = self._make_table_admin_client()
- api.create_backup.side_effect = NotFound("testing")
-
- timestamp = self._make_timestamp()
- backup = self._make_one(
- self.BACKUP_ID,
- _Instance(self.INSTANCE_NAME, client=client),
- table_id=self.TABLE_ID,
- expire_time=timestamp,
- )
-
- backup_pb = table.Backup(
- source_table=self.TABLE_NAME,
- expire_time=_datetime_to_pb_timestamp(timestamp),
- )
-
- with self.assertRaises(NotFound):
- backup.create(self.CLUSTER_ID)
-
- api.create_backup.assert_called_once_with(
- request={
- "parent": self.CLUSTER_NAME,
- "backup_id": self.BACKUP_ID,
- "backup": backup_pb,
- }
- )
-
- def test_create_cluster_not_set(self):
- backup = self._make_one(
- self.BACKUP_ID,
- _Instance(self.INSTANCE_NAME),
- table_id=self.TABLE_ID,
- expire_time=self._make_timestamp(),
- )
-
- with self.assertRaises(ValueError):
- backup.create()
-
- def test_create_table_not_set(self):
- backup = self._make_one(
- self.BACKUP_ID,
- _Instance(self.INSTANCE_NAME),
- expire_time=self._make_timestamp(),
- )
-
- with self.assertRaises(ValueError):
- backup.create(self.CLUSTER_ID)
-
- def test_create_expire_time_not_set(self):
- backup = self._make_one(
- self.BACKUP_ID, _Instance(self.INSTANCE_NAME), table_id=self.TABLE_ID,
- )
-
- with self.assertRaises(ValueError):
- backup.create(self.CLUSTER_ID)
-
- def test_create_success(self):
- from google.cloud._helpers import _datetime_to_pb_timestamp
- from google.cloud.bigtable_admin_v2.types import table
- from google.cloud.bigtable import Client
-
- op_future = object()
- credentials = _make_credentials()
- client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True)
- api = client._table_admin_client = self._make_table_admin_client()
- api.create_backup.return_value = op_future
-
- timestamp = self._make_timestamp()
- backup = self._make_one(
- self.BACKUP_ID,
- _Instance(self.INSTANCE_NAME, client=client),
- table_id=self.TABLE_ID,
- expire_time=timestamp,
- )
-
- backup_pb = table.Backup(
- source_table=self.TABLE_NAME,
- expire_time=_datetime_to_pb_timestamp(timestamp),
- )
-
- future = backup.create(self.CLUSTER_ID)
- self.assertEqual(backup._cluster, self.CLUSTER_ID)
- self.assertIs(future, op_future)
-
- api.create_backup.assert_called_once_with(
- request={
- "parent": self.CLUSTER_NAME,
- "backup_id": self.BACKUP_ID,
- "backup": backup_pb,
- }
- )
-
- def test_exists_grpc_error(self):
- from google.api_core.exceptions import Unknown
-
- client = _Client()
- api = client.table_admin_client = self._make_table_admin_client()
- api.get_backup.side_effect = Unknown("testing")
-
- instance = _Instance(self.INSTANCE_NAME, client=client)
- backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID)
-
- with self.assertRaises(Unknown):
- backup.exists()
-
- request = {"name": self.BACKUP_NAME}
- api.get_backup.assert_called_once_with(request)
-
- def test_exists_not_found(self):
- from google.api_core.exceptions import NotFound
-
- client = _Client()
- api = client.table_admin_client = self._make_table_admin_client()
- api.get_backup.side_effect = NotFound("testing")
-
- instance = _Instance(self.INSTANCE_NAME, client=client)
- backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID)
-
- self.assertFalse(backup.exists())
-
- api.get_backup.assert_called_once_with(request={"name": self.BACKUP_NAME})
-
- def test_get(self):
- from google.cloud.bigtable_admin_v2.types import table
- from google.cloud._helpers import _datetime_to_pb_timestamp
-
- timestamp = _datetime_to_pb_timestamp(self._make_timestamp())
- state = table.Backup.State.READY
-
- client = _Client()
- backup_pb = table.Backup(
- name=self.BACKUP_NAME,
- source_table=self.TABLE_NAME,
- expire_time=timestamp,
- start_time=timestamp,
- end_time=timestamp,
- size_bytes=0,
- state=state,
- )
- api = client.table_admin_client = self._make_table_admin_client()
- api.get_backup.return_value = backup_pb
-
- instance = _Instance(self.INSTANCE_NAME, client=client)
- backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID)
-
- self.assertEqual(backup.get(), backup_pb)
-
- def test_reload(self):
- from google.cloud.bigtable_admin_v2.types import table
- from google.cloud._helpers import _datetime_to_pb_timestamp
-
- timestamp = _datetime_to_pb_timestamp(self._make_timestamp())
- state = table.Backup.State.READY
-
- client = _Client()
- backup_pb = table.Backup(
- name=self.BACKUP_NAME,
- source_table=self.TABLE_NAME,
- expire_time=timestamp,
- start_time=timestamp,
- end_time=timestamp,
- size_bytes=0,
- state=state,
- )
- api = client.table_admin_client = self._make_table_admin_client()
- api.get_backup.return_value = backup_pb
-
- instance = _Instance(self.INSTANCE_NAME, client=client)
- backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID)
-
- backup.reload()
- self.assertEqual(backup._source_table, self.TABLE_NAME)
- self.assertEqual(backup._expire_time, timestamp)
- self.assertEqual(backup._start_time, timestamp)
- self.assertEqual(backup._end_time, timestamp)
- self.assertEqual(backup._size_bytes, 0)
- self.assertEqual(backup._state, state)
+ backup_pb = table.Backup(
+ source_table=TABLE_NAME, expire_time=_datetime_to_pb_timestamp(timestamp),
+ )
- def test_exists_success(self):
- from google.cloud.bigtable_admin_v2.types import table
+ future = backup.create(CLUSTER_ID)
+ assert backup._cluster == CLUSTER_ID
+ assert future is op_future
- client = _Client()
- backup_pb = table.Backup(name=self.BACKUP_NAME)
- api = client.table_admin_client = self._make_table_admin_client()
- api.get_backup.return_value = backup_pb
+ api.create_backup.assert_called_once_with(
+ request={"parent": CLUSTER_NAME, "backup_id": BACKUP_ID, "backup": backup_pb}
+ )
- instance = _Instance(self.INSTANCE_NAME, client=client)
- backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID)
- self.assertTrue(backup.exists())
+def test_backup_get():
+ from google.cloud.bigtable_admin_v2.types import table
+ from google.cloud._helpers import _datetime_to_pb_timestamp
- api.get_backup.assert_called_once_with(request={"name": self.BACKUP_NAME})
+ timestamp = _datetime_to_pb_timestamp(_make_timestamp())
+ state = table.Backup.State.READY
- def test_delete_grpc_error(self):
- from google.api_core.exceptions import Unknown
+ client = _Client()
+ backup_pb = table.Backup(
+ name=BACKUP_NAME,
+ source_table=TABLE_NAME,
+ expire_time=timestamp,
+ start_time=timestamp,
+ end_time=timestamp,
+ size_bytes=0,
+ state=state,
+ )
+ api = client.table_admin_client = _make_table_admin_client()
+ api.get_backup.return_value = backup_pb
- client = _Client()
- api = client.table_admin_client = self._make_table_admin_client()
- api.delete_backup.side_effect = Unknown("testing")
- instance = _Instance(self.INSTANCE_NAME, client=client)
- backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID)
+ instance = _Instance(INSTANCE_NAME, client=client)
+ backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID)
- with self.assertRaises(Unknown):
- backup.delete()
+ assert backup.get() == backup_pb
- api.delete_backup.assert_called_once_with(request={"name": self.BACKUP_NAME})
- def test_delete_not_found(self):
- from google.api_core.exceptions import NotFound
+def test_backup_reload():
+ from google.cloud.bigtable_admin_v2.types import table
+ from google.cloud._helpers import _datetime_to_pb_timestamp
- client = _Client()
- api = client.table_admin_client = self._make_table_admin_client()
- api.delete_backup.side_effect = NotFound("testing")
- instance = _Instance(self.INSTANCE_NAME, client=client)
- backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID)
+ timestamp = _datetime_to_pb_timestamp(_make_timestamp())
+ state = table.Backup.State.READY
- with self.assertRaises(NotFound):
- backup.delete()
-
- api.delete_backup.assert_called_once_with(request={"name": self.BACKUP_NAME})
-
- def test_delete_success(self):
- from google.protobuf.empty_pb2 import Empty
-
- client = _Client()
- api = client.table_admin_client = self._make_table_admin_client()
- api.delete_backup.return_value = Empty()
- instance = _Instance(self.INSTANCE_NAME, client=client)
- backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID)
+ client = _Client()
+ backup_pb = table.Backup(
+ name=BACKUP_NAME,
+ source_table=TABLE_NAME,
+ expire_time=timestamp,
+ start_time=timestamp,
+ end_time=timestamp,
+ size_bytes=0,
+ state=state,
+ )
+ api = client.table_admin_client = _make_table_admin_client()
+ api.get_backup.return_value = backup_pb
+ instance = _Instance(INSTANCE_NAME, client=client)
+ backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID)
+
+ backup.reload()
+ assert backup._source_table == TABLE_NAME
+ assert backup._expire_time == timestamp
+ assert backup._start_time == timestamp
+ assert backup._end_time == timestamp
+ assert backup._size_bytes == 0
+ assert backup._state == state
+
+
+def test_backup_exists_w_grpc_error():
+ from google.api_core.exceptions import Unknown
+
+ client = _Client()
+ api = client.table_admin_client = _make_table_admin_client()
+ api.get_backup.side_effect = Unknown("testing")
+
+ instance = _Instance(INSTANCE_NAME, client=client)
+ backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID)
+
+ with pytest.raises(Unknown):
+ backup.exists()
+
+ request = {"name": BACKUP_NAME}
+ api.get_backup.assert_called_once_with(request)
+
+
+def test_backup_exists_w_not_found():
+ from google.api_core.exceptions import NotFound
+
+ client = _Client()
+ api = client.table_admin_client = _make_table_admin_client()
+ api.get_backup.side_effect = NotFound("testing")
+
+ instance = _Instance(INSTANCE_NAME, client=client)
+ backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID)
+
+ assert not backup.exists()
+
+ api.get_backup.assert_called_once_with(request={"name": BACKUP_NAME})
+
+
+def test_backup_exists_success():
+ from google.cloud.bigtable_admin_v2.types import table
+
+ client = _Client()
+ backup_pb = table.Backup(name=BACKUP_NAME)
+ api = client.table_admin_client = _make_table_admin_client()
+ api.get_backup.return_value = backup_pb
+
+ instance = _Instance(INSTANCE_NAME, client=client)
+ backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID)
+
+ assert backup.exists()
+
+ api.get_backup.assert_called_once_with(request={"name": BACKUP_NAME})
+
+
+def test_backup_delete_w_grpc_error():
+ from google.api_core.exceptions import Unknown
+
+ client = _Client()
+ api = client.table_admin_client = _make_table_admin_client()
+ api.delete_backup.side_effect = Unknown("testing")
+ instance = _Instance(INSTANCE_NAME, client=client)
+ backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID)
+
+ with pytest.raises(Unknown):
backup.delete()
- api.delete_backup.assert_called_once_with(request={"name": self.BACKUP_NAME})
-
- def test_update_expire_time_grpc_error(self):
- from google.api_core.exceptions import Unknown
- from google.cloud._helpers import _datetime_to_pb_timestamp
- from google.cloud.bigtable_admin_v2.types import table
- from google.protobuf import field_mask_pb2
-
- client = _Client()
- api = client.table_admin_client = self._make_table_admin_client()
- api.update_backup.side_effect = Unknown("testing")
- instance = _Instance(self.INSTANCE_NAME, client=client)
- backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID)
- expire_time = self._make_timestamp()
-
- with self.assertRaises(Unknown):
- backup.update_expire_time(expire_time)
-
- backup_update = table.Backup(
- name=self.BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time),
- )
- update_mask = field_mask_pb2.FieldMask(paths=["expire_time"])
- api.update_backup.assert_called_once_with(
- request={"backup": backup_update, "update_mask": update_mask}
- )
-
- def test_update_expire_time_not_found(self):
- from google.api_core.exceptions import NotFound
- from google.cloud._helpers import _datetime_to_pb_timestamp
- from google.cloud.bigtable_admin_v2.types import table
- from google.protobuf import field_mask_pb2
-
- client = _Client()
- api = client.table_admin_client = self._make_table_admin_client()
- api.update_backup.side_effect = NotFound("testing")
- instance = _Instance(self.INSTANCE_NAME, client=client)
- backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID)
- expire_time = self._make_timestamp()
-
- with self.assertRaises(NotFound):
- backup.update_expire_time(expire_time)
-
- backup_update = table.Backup(
- name=self.BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time),
- )
- update_mask = field_mask_pb2.FieldMask(paths=["expire_time"])
- api.update_backup.assert_called_once_with(
- request={"backup": backup_update, "update_mask": update_mask}
- )
-
- def test_update_expire_time_success(self):
- from google.cloud._helpers import _datetime_to_pb_timestamp
- from google.cloud.bigtable_admin_v2.types import table
- from google.protobuf import field_mask_pb2
-
- client = _Client()
- api = client.table_admin_client = self._make_table_admin_client()
- api.update_backup.return_type = table.Backup(name=self.BACKUP_NAME)
- instance = _Instance(self.INSTANCE_NAME, client=client)
- backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID)
- expire_time = self._make_timestamp()
+ api.delete_backup.assert_called_once_with(request={"name": BACKUP_NAME})
+
+
+def test_backup_delete_w_not_found():
+ from google.api_core.exceptions import NotFound
+
+ client = _Client()
+ api = client.table_admin_client = _make_table_admin_client()
+ api.delete_backup.side_effect = NotFound("testing")
+ instance = _Instance(INSTANCE_NAME, client=client)
+ backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID)
+
+ with pytest.raises(NotFound):
+ backup.delete()
+
+ api.delete_backup.assert_called_once_with(request={"name": BACKUP_NAME})
+
+
+def test_backup_delete_success():
+ from google.protobuf.empty_pb2 import Empty
+ client = _Client()
+ api = client.table_admin_client = _make_table_admin_client()
+ api.delete_backup.return_value = Empty()
+ instance = _Instance(INSTANCE_NAME, client=client)
+ backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID)
+
+ backup.delete()
+
+ api.delete_backup.assert_called_once_with(request={"name": BACKUP_NAME})
+
+
+def test_backup_update_expire_time_w_grpc_error():
+ from google.api_core.exceptions import Unknown
+ from google.cloud._helpers import _datetime_to_pb_timestamp
+ from google.cloud.bigtable_admin_v2.types import table
+ from google.protobuf import field_mask_pb2
+
+ client = _Client()
+ api = client.table_admin_client = _make_table_admin_client()
+ api.update_backup.side_effect = Unknown("testing")
+ instance = _Instance(INSTANCE_NAME, client=client)
+ backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID)
+ expire_time = _make_timestamp()
+
+ with pytest.raises(Unknown):
+ backup.update_expire_time(expire_time)
+
+ backup_update = table.Backup(
+ name=BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time),
+ )
+ update_mask = field_mask_pb2.FieldMask(paths=["expire_time"])
+ api.update_backup.assert_called_once_with(
+ request={"backup": backup_update, "update_mask": update_mask}
+ )
+
+
+def test_backup_update_expire_time_w_not_found():
+ from google.api_core.exceptions import NotFound
+ from google.cloud._helpers import _datetime_to_pb_timestamp
+ from google.cloud.bigtable_admin_v2.types import table
+ from google.protobuf import field_mask_pb2
+
+ client = _Client()
+ api = client.table_admin_client = _make_table_admin_client()
+ api.update_backup.side_effect = NotFound("testing")
+ instance = _Instance(INSTANCE_NAME, client=client)
+ backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID)
+ expire_time = _make_timestamp()
+
+ with pytest.raises(NotFound):
backup.update_expire_time(expire_time)
- backup_update = table.Backup(
- name=self.BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time),
- )
- update_mask = field_mask_pb2.FieldMask(paths=["expire_time"])
- api.update_backup.assert_called_once_with(
- request={"backup": backup_update, "update_mask": update_mask}
- )
-
- def test_restore_grpc_error(self):
- from google.api_core.exceptions import GoogleAPICallError
- from google.api_core.exceptions import Unknown
-
- client = _Client()
- api = client.table_admin_client = self._make_table_admin_client()
- api.restore_table.side_effect = Unknown("testing")
-
- timestamp = self._make_timestamp()
- backup = self._make_one(
- self.BACKUP_ID,
- _Instance(self.INSTANCE_NAME, client=client),
- cluster_id=self.CLUSTER_ID,
- table_id=self.TABLE_NAME,
- expire_time=timestamp,
- )
-
- with self.assertRaises(GoogleAPICallError):
- backup.restore(self.TABLE_ID)
-
- api.restore_table.assert_called_once_with(
- request={
- "parent": self.INSTANCE_NAME,
- "table_id": self.TABLE_ID,
- "backup": self.BACKUP_NAME,
- }
- )
-
- def test_restore_cluster_not_set(self):
- client = _Client()
- client.table_admin_client = self._make_table_admin_client()
- backup = self._make_one(
- self.BACKUP_ID,
- _Instance(self.INSTANCE_NAME, client=client),
- table_id=self.TABLE_ID,
- expire_time=self._make_timestamp(),
- )
-
- with self.assertRaises(ValueError):
- backup.restore(self.TABLE_ID)
-
- def _restore_helper(self, instance_id=None, instance_name=None):
- op_future = object()
- client = _Client()
- api = client.table_admin_client = self._make_table_admin_client()
- api.restore_table.return_value = op_future
-
- timestamp = self._make_timestamp()
- backup = self._make_one(
- self.BACKUP_ID,
- _Instance(self.INSTANCE_NAME, client=client),
- cluster_id=self.CLUSTER_ID,
- table_id=self.TABLE_NAME,
- expire_time=timestamp,
- )
-
- future = backup.restore(self.TABLE_ID, instance_id)
- self.assertEqual(backup._cluster, self.CLUSTER_ID)
- self.assertIs(future, op_future)
-
- api.restore_table.assert_called_once_with(
- request={
- "parent": instance_name or self.INSTANCE_NAME,
- "table_id": self.TABLE_ID,
- "backup": self.BACKUP_NAME,
- }
- )
- api.restore_table.reset_mock()
-
- def test_restore_default(self):
- self._restore_helper()
-
- def test_restore_to_another_instance(self):
- self._restore_helper(self.ALT_INSTANCE, self.ALT_INSTANCE_NAME)
-
- def test_get_iam_policy(self):
- from google.cloud.bigtable.client import Client
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- BigtableTableAdminClient,
- )
- from google.iam.v1 import policy_pb2
- from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
-
- credentials = _make_credentials()
- client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True)
-
- instance = client.instance(instance_id=self.INSTANCE_ID)
- backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID)
-
- version = 1
- etag = b"etag_v1"
- members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
- bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}]
- iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
-
- table_api = mock.create_autospec(BigtableTableAdminClient)
- client._table_admin_client = table_api
- table_api.get_iam_policy.return_value = iam_policy
-
- result = backup.get_iam_policy()
-
- table_api.get_iam_policy.assert_called_once_with(
- request={"resource": backup.name}
- )
- self.assertEqual(result.version, version)
- self.assertEqual(result.etag, etag)
-
- admins = result.bigtable_admins
- self.assertEqual(len(admins), len(members))
- for found, expected in zip(sorted(admins), sorted(members)):
- self.assertEqual(found, expected)
-
- def test_set_iam_policy(self):
- from google.cloud.bigtable.client import Client
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- BigtableTableAdminClient,
- )
- from google.iam.v1 import policy_pb2
- from google.cloud.bigtable.policy import Policy
- from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
-
- credentials = _make_credentials()
- client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True)
-
- instance = client.instance(instance_id=self.INSTANCE_ID)
- backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID)
-
- version = 1
- etag = b"etag_v1"
- members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
- bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}]
- iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
-
- table_api = mock.create_autospec(BigtableTableAdminClient)
- client._table_admin_client = table_api
- table_api.set_iam_policy.return_value = iam_policy_pb
-
- iam_policy = Policy(etag=etag, version=version)
- iam_policy[BIGTABLE_ADMIN_ROLE] = [
- Policy.user("user1@test.com"),
- Policy.service_account("service_acc1@test.com"),
- ]
-
- result = backup.set_iam_policy(iam_policy)
-
- table_api.set_iam_policy.assert_called_once_with(
- request={"resource": backup.name, "policy": iam_policy_pb}
- )
- self.assertEqual(result.version, version)
- self.assertEqual(result.etag, etag)
-
- admins = result.bigtable_admins
- self.assertEqual(len(admins), len(members))
- for found, expected in zip(sorted(admins), sorted(members)):
- self.assertEqual(found, expected)
-
- def test_test_iam_permissions(self):
- from google.cloud.bigtable.client import Client
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- BigtableTableAdminClient,
- )
- from google.iam.v1 import iam_policy_pb2
-
- credentials = _make_credentials()
- client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True)
-
- instance = client.instance(instance_id=self.INSTANCE_ID)
- backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID)
+ backup_update = table.Backup(
+ name=BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time),
+ )
+ update_mask = field_mask_pb2.FieldMask(paths=["expire_time"])
+ api.update_backup.assert_called_once_with(
+ request={"backup": backup_update, "update_mask": update_mask}
+ )
+
+
+def test_backup_update_expire_time_success():
+ from google.cloud._helpers import _datetime_to_pb_timestamp
+ from google.cloud.bigtable_admin_v2.types import table
+ from google.protobuf import field_mask_pb2
+
+ client = _Client()
+ api = client.table_admin_client = _make_table_admin_client()
+ api.update_backup.return_type = table.Backup(name=BACKUP_NAME)
+ instance = _Instance(INSTANCE_NAME, client=client)
+ backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID)
+ expire_time = _make_timestamp()
+
+ backup.update_expire_time(expire_time)
+
+ backup_update = table.Backup(
+ name=BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time),
+ )
+ update_mask = field_mask_pb2.FieldMask(paths=["expire_time"])
+ api.update_backup.assert_called_once_with(
+ request={"backup": backup_update, "update_mask": update_mask}
+ )
+
+
+def test_backup_restore_w_grpc_error():
+ from google.api_core.exceptions import GoogleAPICallError
+ from google.api_core.exceptions import Unknown
+
+ client = _Client()
+ api = client.table_admin_client = _make_table_admin_client()
+ api.restore_table.side_effect = Unknown("testing")
+
+ timestamp = _make_timestamp()
+ backup = _make_backup(
+ BACKUP_ID,
+ _Instance(INSTANCE_NAME, client=client),
+ cluster_id=CLUSTER_ID,
+ table_id=TABLE_NAME,
+ expire_time=timestamp,
+ )
+
+ with pytest.raises(GoogleAPICallError):
+ backup.restore(TABLE_ID)
+
+ api.restore_table.assert_called_once_with(
+ request={"parent": INSTANCE_NAME, "table_id": TABLE_ID, "backup": BACKUP_NAME}
+ )
+
+
+def test_backup_restore_w_cluster_not_set():
+ client = _Client()
+ client.table_admin_client = _make_table_admin_client()
+ backup = _make_backup(
+ BACKUP_ID,
+ _Instance(INSTANCE_NAME, client=client),
+ table_id=TABLE_ID,
+ expire_time=_make_timestamp(),
+ )
+
+ with pytest.raises(ValueError):
+ backup.restore(TABLE_ID)
+
+
+def _restore_helper(instance_id=None, instance_name=None):
+ op_future = object()
+ client = _Client()
+ api = client.table_admin_client = _make_table_admin_client()
+ api.restore_table.return_value = op_future
+
+ timestamp = _make_timestamp()
+ backup = _make_backup(
+ BACKUP_ID,
+ _Instance(INSTANCE_NAME, client=client),
+ cluster_id=CLUSTER_ID,
+ table_id=TABLE_NAME,
+ expire_time=timestamp,
+ )
+
+ future = backup.restore(TABLE_ID, instance_id)
+ assert backup._cluster == CLUSTER_ID
+ assert future is op_future
+
+ api.restore_table.assert_called_once_with(
+ request={
+ "parent": instance_name or INSTANCE_NAME,
+ "table_id": TABLE_ID,
+ "backup": BACKUP_NAME,
+ }
+ )
+ api.restore_table.reset_mock()
+
+
+def test_backup_restore_default():
+ _restore_helper()
+
+
+def test_backup_restore_to_another_instance():
+ _restore_helper(ALT_INSTANCE, ALT_INSTANCE_NAME)
+
+
+def test_backup_get_iam_policy():
+ from google.cloud.bigtable.client import Client
+ from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
+ BigtableTableAdminClient,
+ )
+ from google.iam.v1 import policy_pb2
+ from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
+
+ credentials = _make_credentials()
+ client = Client(project=PROJECT_ID, credentials=credentials, admin=True)
+
+ instance = client.instance(instance_id=INSTANCE_ID)
+ backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID)
+
+ version = 1
+ etag = b"etag_v1"
+ members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
+ bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}]
+ iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
+
+ table_api = mock.create_autospec(BigtableTableAdminClient)
+ client._table_admin_client = table_api
+ table_api.get_iam_policy.return_value = iam_policy
+
+ result = backup.get_iam_policy()
+
+ table_api.get_iam_policy.assert_called_once_with(request={"resource": backup.name})
+ assert result.version == version
+ assert result.etag == etag
+
+ admins = result.bigtable_admins
+ assert len(admins) == len(members)
+ for found, expected in zip(sorted(admins), sorted(members)):
+ assert found == expected
+
+
+def test_backup_set_iam_policy():
+ from google.cloud.bigtable.client import Client
+ from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
+ BigtableTableAdminClient,
+ )
+ from google.iam.v1 import policy_pb2
+ from google.cloud.bigtable.policy import Policy
+ from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
+
+ credentials = _make_credentials()
+ client = Client(project=PROJECT_ID, credentials=credentials, admin=True)
+
+ instance = client.instance(instance_id=INSTANCE_ID)
+ backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID)
+
+ version = 1
+ etag = b"etag_v1"
+ members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
+ bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}]
+ iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
+
+ table_api = mock.create_autospec(BigtableTableAdminClient)
+ client._table_admin_client = table_api
+ table_api.set_iam_policy.return_value = iam_policy_pb
+
+ iam_policy = Policy(etag=etag, version=version)
+ iam_policy[BIGTABLE_ADMIN_ROLE] = [
+ Policy.user("user1@test.com"),
+ Policy.service_account("service_acc1@test.com"),
+ ]
+
+ result = backup.set_iam_policy(iam_policy)
+
+ table_api.set_iam_policy.assert_called_once_with(
+ request={"resource": backup.name, "policy": iam_policy_pb}
+ )
+ assert result.version == version
+ assert result.etag == etag
+
+ admins = result.bigtable_admins
+ assert len(admins) == len(members)
+ for found, expected in zip(sorted(admins), sorted(members)):
+ assert found == expected
+
+
+def test_backup_test_iam_permissions():
+ from google.cloud.bigtable.client import Client
+ from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
+ BigtableTableAdminClient,
+ )
+ from google.iam.v1 import iam_policy_pb2
+
+ credentials = _make_credentials()
+ client = Client(project=PROJECT_ID, credentials=credentials, admin=True)
+
+ instance = client.instance(instance_id=INSTANCE_ID)
+ backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID)
+
+ permissions = ["bigtable.backups.create", "bigtable.backups.list"]
+
+ response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions)
- permissions = ["bigtable.backups.create", "bigtable.backups.list"]
+ table_api = mock.create_autospec(BigtableTableAdminClient)
+ table_api.test_iam_permissions.return_value = response
+ client._table_admin_client = table_api
- response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions)
-
- table_api = mock.create_autospec(BigtableTableAdminClient)
- table_api.test_iam_permissions.return_value = response
- client._table_admin_client = table_api
-
- result = backup.test_iam_permissions(permissions)
+ result = backup.test_iam_permissions(permissions)
- self.assertEqual(result, permissions)
- table_api.test_iam_permissions.assert_called_once_with(
- request={"resource": backup.name, "permissions": permissions}
- )
+ assert result == permissions
+ table_api.test_iam_permissions.assert_called_once_with(
+ request={"resource": backup.name, "permissions": permissions}
+ )
class _Client(object):
- def __init__(self, project=TestBackup.PROJECT_ID):
+ def __init__(self, project=PROJECT_ID):
self.project = project
self.project_name = "projects/" + self.project
diff --git a/tests/unit/test_batcher.py b/tests/unit/test_batcher.py
index 8760c3a2d..9ae6ed175 100644
--- a/tests/unit/test_batcher.py
+++ b/tests/unit/test_batcher.py
@@ -13,154 +13,135 @@
# limitations under the License.
-import unittest
-
import mock
+import pytest
-from ._testing import _make_credentials
-
-from google.cloud.bigtable.batcher import MutationsBatcher
from google.cloud.bigtable.row import DirectRow
+TABLE_ID = "table-id"
+TABLE_NAME = "/tables/" + TABLE_ID
-class TestMutationsBatcher(unittest.TestCase):
- from grpc import StatusCode
- TABLE_ID = "table-id"
- TABLE_NAME = "/tables/" + TABLE_ID
+def _make_mutation_batcher(table, **kw):
+ from google.cloud.bigtable.batcher import MutationsBatcher
- # RPC Status Codes
- SUCCESS = StatusCode.OK.value[0]
+ return MutationsBatcher(table, **kw)
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.table import Table
- return Table
+def test_mutation_batcher_constructor():
+ table = _Table(TABLE_NAME)
- def _make_table(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
+ mutation_batcher = _make_mutation_batcher(table)
+ assert table is mutation_batcher.table
- @staticmethod
- def _get_target_client_class():
- from google.cloud.bigtable.client import Client
- return Client
+def test_mutation_batcher_mutate_row():
+ table = _Table(TABLE_NAME)
+ mutation_batcher = _make_mutation_batcher(table=table)
- def _make_client(self, *args, **kwargs):
- return self._get_target_client_class()(*args, **kwargs)
+ rows = [
+ DirectRow(row_key=b"row_key"),
+ DirectRow(row_key=b"row_key_2"),
+ DirectRow(row_key=b"row_key_3"),
+ DirectRow(row_key=b"row_key_4"),
+ ]
- def test_constructor(self):
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
+ mutation_batcher.mutate_rows(rows)
+ mutation_batcher.flush()
- instance = client.instance(instance_id="instance-id")
- table = self._make_table(self.TABLE_ID, instance)
+ assert table.mutation_calls == 1
- mutation_batcher = MutationsBatcher(table)
- self.assertEqual(table, mutation_batcher.table)
- def test_mutate_row(self):
- table = _Table(self.TABLE_NAME)
- mutation_batcher = MutationsBatcher(table=table)
+def test_mutation_batcher_mutate():
+ table = _Table(TABLE_NAME)
+ mutation_batcher = _make_mutation_batcher(table=table)
- rows = [
- DirectRow(row_key=b"row_key"),
- DirectRow(row_key=b"row_key_2"),
- DirectRow(row_key=b"row_key_3"),
- DirectRow(row_key=b"row_key_4"),
- ]
+ row = DirectRow(row_key=b"row_key")
+ row.set_cell("cf1", b"c1", 1)
+ row.set_cell("cf1", b"c2", 2)
+ row.set_cell("cf1", b"c3", 3)
+ row.set_cell("cf1", b"c4", 4)
- mutation_batcher.mutate_rows(rows)
- mutation_batcher.flush()
+ mutation_batcher.mutate(row)
- self.assertEqual(table.mutation_calls, 1)
+ mutation_batcher.flush()
- def test_mutate_rows(self):
- table = _Table(self.TABLE_NAME)
- mutation_batcher = MutationsBatcher(table=table)
+ assert table.mutation_calls == 1
- row = DirectRow(row_key=b"row_key")
- row.set_cell("cf1", b"c1", 1)
- row.set_cell("cf1", b"c2", 2)
- row.set_cell("cf1", b"c3", 3)
- row.set_cell("cf1", b"c4", 4)
- mutation_batcher.mutate(row)
+def test_mutation_batcher_flush_w_no_rows():
+ table = _Table(TABLE_NAME)
+ mutation_batcher = _make_mutation_batcher(table=table)
+ mutation_batcher.flush()
- mutation_batcher.flush()
+ assert table.mutation_calls == 0
- self.assertEqual(table.mutation_calls, 1)
- def test_flush_with_no_rows(self):
- table = _Table(self.TABLE_NAME)
- mutation_batcher = MutationsBatcher(table=table)
- mutation_batcher.flush()
+def test_mutation_batcher_mutate_w_max_flush_count():
+ table = _Table(TABLE_NAME)
+ mutation_batcher = _make_mutation_batcher(table=table, flush_count=3)
- self.assertEqual(table.mutation_calls, 0)
+ row_1 = DirectRow(row_key=b"row_key_1")
+ row_2 = DirectRow(row_key=b"row_key_2")
+ row_3 = DirectRow(row_key=b"row_key_3")
- def test_add_row_with_max_flush_count(self):
- table = _Table(self.TABLE_NAME)
- mutation_batcher = MutationsBatcher(table=table, flush_count=3)
+ mutation_batcher.mutate(row_1)
+ mutation_batcher.mutate(row_2)
+ mutation_batcher.mutate(row_3)
- row_1 = DirectRow(row_key=b"row_key_1")
- row_2 = DirectRow(row_key=b"row_key_2")
- row_3 = DirectRow(row_key=b"row_key_3")
+ assert table.mutation_calls == 1
- mutation_batcher.mutate(row_1)
- mutation_batcher.mutate(row_2)
- mutation_batcher.mutate(row_3)
- self.assertEqual(table.mutation_calls, 1)
+@mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3)
+def test_mutation_batcher_mutate_with_max_mutations_failure():
+ from google.cloud.bigtable.batcher import MaxMutationsError
- @mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3)
- def test_mutate_row_with_max_mutations_failure(self):
- from google.cloud.bigtable.batcher import MaxMutationsError
+ table = _Table(TABLE_NAME)
+ mutation_batcher = _make_mutation_batcher(table=table)
- table = _Table(self.TABLE_NAME)
- mutation_batcher = MutationsBatcher(table=table)
+ row = DirectRow(row_key=b"row_key")
+ row.set_cell("cf1", b"c1", 1)
+ row.set_cell("cf1", b"c2", 2)
+ row.set_cell("cf1", b"c3", 3)
+ row.set_cell("cf1", b"c4", 4)
- row = DirectRow(row_key=b"row_key")
- row.set_cell("cf1", b"c1", 1)
- row.set_cell("cf1", b"c2", 2)
- row.set_cell("cf1", b"c3", 3)
- row.set_cell("cf1", b"c4", 4)
+ with pytest.raises(MaxMutationsError):
+ mutation_batcher.mutate(row)
- with self.assertRaises(MaxMutationsError):
- mutation_batcher.mutate(row)
- @mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3)
- def test_mutate_row_with_max_mutations(self):
- table = _Table(self.TABLE_NAME)
- mutation_batcher = MutationsBatcher(table=table)
+@mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3)
+def test_mutation_batcher_mutate_w_max_mutations():
+ table = _Table(TABLE_NAME)
+ mutation_batcher = _make_mutation_batcher(table=table)
- row = DirectRow(row_key=b"row_key")
- row.set_cell("cf1", b"c1", 1)
- row.set_cell("cf1", b"c2", 2)
- row.set_cell("cf1", b"c3", 3)
+ row = DirectRow(row_key=b"row_key")
+ row.set_cell("cf1", b"c1", 1)
+ row.set_cell("cf1", b"c2", 2)
+ row.set_cell("cf1", b"c3", 3)
- mutation_batcher.mutate(row)
- mutation_batcher.flush()
+ mutation_batcher.mutate(row)
+ mutation_batcher.flush()
- self.assertEqual(table.mutation_calls, 1)
+ assert table.mutation_calls == 1
- def test_mutate_row_with_max_row_bytes(self):
- table = _Table(self.TABLE_NAME)
- mutation_batcher = MutationsBatcher(table=table, max_row_bytes=3 * 1024 * 1024)
- number_of_bytes = 1 * 1024 * 1024
- max_value = b"1" * number_of_bytes
+def test_mutation_batcher_mutate_w_max_row_bytes():
+ table = _Table(TABLE_NAME)
+ mutation_batcher = _make_mutation_batcher(
+ table=table, max_row_bytes=3 * 1024 * 1024
+ )
- row = DirectRow(row_key=b"row_key")
- row.set_cell("cf1", b"c1", max_value)
- row.set_cell("cf1", b"c2", max_value)
- row.set_cell("cf1", b"c3", max_value)
+ number_of_bytes = 1 * 1024 * 1024
+ max_value = b"1" * number_of_bytes
- mutation_batcher.mutate(row)
+ row = DirectRow(row_key=b"row_key")
+ row.set_cell("cf1", b"c1", max_value)
+ row.set_cell("cf1", b"c2", max_value)
+ row.set_cell("cf1", b"c3", max_value)
+
+ mutation_batcher.mutate(row)
- self.assertEqual(table.mutation_calls, 1)
+ assert table.mutation_calls == 1
class _Instance(object):
diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py
index 5c557763a..00f8524bc 100644
--- a/tests/unit/test_client.py
+++ b/tests/unit/test_client.py
@@ -13,709 +13,747 @@
# limitations under the License.
-import unittest
-
import mock
+import pytest
from ._testing import _make_credentials
+PROJECT = "PROJECT"
+INSTANCE_ID = "instance-id"
+DISPLAY_NAME = "display-name"
+USER_AGENT = "you-sir-age-int"
+
+
+def _invoke_client_factory(client_class, **kw):
+ from google.cloud.bigtable.client import _create_gapic_client
+
+ return _create_gapic_client(client_class, **kw)
+
+
+def test___create_gapic_client_wo_emulator():
+ client_class = mock.Mock()
+ credentials = _make_credentials()
+ client = _MockClient(credentials)
+ client_info = client._client_info = mock.Mock()
+ transport = mock.Mock()
+
+ result = _invoke_client_factory(client_class, transport=transport)(client)
+
+ assert result is client_class.return_value
+ client_class.assert_called_once_with(
+ credentials=None,
+ client_info=client_info,
+ client_options=None,
+ transport=transport,
+ )
+
+
+def test___create_gapic_client_wo_emulator_w_client_options():
+ client_class = mock.Mock()
+ credentials = _make_credentials()
+ client = _MockClient(credentials)
+ client_info = client._client_info = mock.Mock()
+ client_options = mock.Mock()
+ transport = mock.Mock()
+
+ result = _invoke_client_factory(
+ client_class, client_options=client_options, transport=transport
+ )(client)
+
+ assert result is client_class.return_value
+ client_class.assert_called_once_with(
+ credentials=None,
+ client_info=client_info,
+ client_options=client_options,
+ transport=transport,
+ )
+
+
+def test___create_gapic_client_w_emulator():
+ client_class = mock.Mock()
+ emulator_host = emulator_channel = object()
+ credentials = _make_credentials()
+ client_options = mock.Mock()
+ transport = mock.Mock()
+
+ client = _MockClient(
+ credentials, emulator_host=emulator_host, emulator_channel=emulator_channel
+ )
+ client_info = client._client_info = mock.Mock()
+ result = _invoke_client_factory(
+ client_class, client_options=client_options, transport=transport
+ )(client)
+
+ assert result is client_class.return_value
+ client_class.assert_called_once_with(
+ credentials=None,
+ client_info=client_info,
+ client_options=client_options,
+ transport=transport,
+ )
+
+
+class _MockClient(object):
+ def __init__(self, credentials, emulator_host=None, emulator_channel=None):
+ self._credentials = credentials
+ self._emulator_host = emulator_host
+ self._emulator_channel = emulator_channel
-class Test__create_gapic_client(unittest.TestCase):
- def _invoke_client_factory(self, client_class, **kw):
- from google.cloud.bigtable.client import _create_gapic_client
- return _create_gapic_client(client_class, **kw)
+def _make_client(*args, **kwargs):
+ from google.cloud.bigtable.client import Client
- def test_wo_emulator(self):
- client_class = mock.Mock()
- credentials = _make_credentials()
- client = _Client(credentials)
- client_info = client._client_info = mock.Mock()
- transport = mock.Mock()
+ return Client(*args, **kwargs)
- result = self._invoke_client_factory(client_class, transport=transport)(client)
- self.assertIs(result, client_class.return_value)
- client_class.assert_called_once_with(
- credentials=None,
- client_info=client_info,
- client_options=None,
- transport=transport,
- )
+@mock.patch("os.environ", {})
+def test_client_constructor_defaults():
+ from google.api_core import client_info
+ from google.cloud.bigtable import __version__
+ from google.cloud.bigtable.client import DATA_SCOPE
- def test_wo_emulator_w_client_options(self):
- client_class = mock.Mock()
- credentials = _make_credentials()
- client = _Client(credentials)
- client_info = client._client_info = mock.Mock()
- client_options = mock.Mock()
- transport = mock.Mock()
-
- result = self._invoke_client_factory(
- client_class, client_options=client_options, transport=transport
- )(client)
-
- self.assertIs(result, client_class.return_value)
- client_class.assert_called_once_with(
- credentials=None,
- client_info=client_info,
- client_options=client_options,
- transport=transport,
- )
+ credentials = _make_credentials()
- def test_w_emulator(self):
- client_class = mock.Mock()
- emulator_host = emulator_channel = object()
- credentials = _make_credentials()
- client_options = mock.Mock()
- transport = mock.Mock()
-
- client = _Client(
- credentials, emulator_host=emulator_host, emulator_channel=emulator_channel
- )
- client_info = client._client_info = mock.Mock()
- result = self._invoke_client_factory(
- client_class, client_options=client_options, transport=transport
- )(client)
-
- self.assertIs(result, client_class.return_value)
- client_class.assert_called_once_with(
- credentials=None,
- client_info=client_info,
- client_options=client_options,
- transport=transport,
- )
+ with mock.patch("google.auth.default") as mocked:
+ mocked.return_value = credentials, PROJECT
+ client = _make_client()
+ assert client.project == PROJECT
+ assert client._credentials is credentials.with_scopes.return_value
+ assert not client._read_only
+ assert not client._admin
+ assert isinstance(client._client_info, client_info.ClientInfo)
+ assert client._client_info.client_library_version == __version__
+ assert client._channel is None
+ assert client._emulator_host is None
+ assert client.SCOPE == (DATA_SCOPE,)
-class _Client(object):
- def __init__(self, credentials, emulator_host=None, emulator_channel=None):
- self._credentials = credentials
- self._emulator_host = emulator_host
- self._emulator_channel = emulator_channel
+def test_client_constructor_explicit():
+ import warnings
+ from google.cloud.bigtable.client import ADMIN_SCOPE
+ from google.cloud.bigtable.client import DATA_SCOPE
-class TestClient(unittest.TestCase):
-
- PROJECT = "PROJECT"
- INSTANCE_ID = "instance-id"
- DISPLAY_NAME = "display-name"
- USER_AGENT = "you-sir-age-int"
-
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.client import Client
-
- return Client
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- @mock.patch("os.environ", {})
- def test_constructor_defaults(self):
- from google.cloud.bigtable.client import _CLIENT_INFO
- from google.cloud.bigtable.client import DATA_SCOPE
-
- credentials = _make_credentials()
-
- with mock.patch("google.auth.default") as mocked:
- mocked.return_value = credentials, self.PROJECT
- client = self._make_one()
-
- self.assertEqual(client.project, self.PROJECT)
- self.assertIs(client._credentials, credentials.with_scopes.return_value)
- self.assertFalse(client._read_only)
- self.assertFalse(client._admin)
- self.assertIs(client._client_info, _CLIENT_INFO)
- self.assertIsNone(client._channel)
- self.assertIsNone(client._emulator_host)
- self.assertEqual(client.SCOPE, (DATA_SCOPE,))
-
- def test_constructor_explicit(self):
- import warnings
- from google.cloud.bigtable.client import ADMIN_SCOPE
- from google.cloud.bigtable.client import DATA_SCOPE
-
- credentials = _make_credentials()
- client_info = mock.Mock()
-
- with warnings.catch_warnings(record=True) as warned:
- client = self._make_one(
- project=self.PROJECT,
- credentials=credentials,
- read_only=False,
- admin=True,
- client_info=client_info,
- channel=mock.sentinel.channel,
- )
-
- self.assertEqual(len(warned), 1)
-
- self.assertEqual(client.project, self.PROJECT)
- self.assertIs(client._credentials, credentials.with_scopes.return_value)
- self.assertFalse(client._read_only)
- self.assertTrue(client._admin)
- self.assertIs(client._client_info, client_info)
- self.assertIs(client._channel, mock.sentinel.channel)
- self.assertEqual(client.SCOPE, (DATA_SCOPE, ADMIN_SCOPE))
-
- def test_constructor_both_admin_and_read_only(self):
- credentials = _make_credentials()
- with self.assertRaises(ValueError):
- self._make_one(
- project=self.PROJECT,
- credentials=credentials,
- admin=True,
- read_only=True,
- )
-
- def test_constructor_with_emulator_host(self):
- from google.cloud.environment_vars import BIGTABLE_EMULATOR
- from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS
-
- credentials = _make_credentials()
- emulator_host = "localhost:8081"
- with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}):
- with mock.patch("grpc.secure_channel") as factory:
- client = self._make_one(project=self.PROJECT, credentials=credentials)
- # don't test local_composite_credentials
- client._local_composite_credentials = lambda: credentials
- # channels are formed when needed, so access a client
- # create a gapic channel
- client.table_data_client
-
- self.assertEqual(client._emulator_host, emulator_host)
- factory.assert_called_once_with(
- emulator_host, credentials, options=_GRPC_CHANNEL_OPTIONS,
- )
+ credentials = _make_credentials()
+ client_info = mock.Mock()
+
+ with warnings.catch_warnings(record=True) as warned:
+ client = _make_client(
+ project=PROJECT,
+ credentials=credentials,
+ read_only=False,
+ admin=True,
+ client_info=client_info,
+ channel=mock.sentinel.channel,
+ )
+
+ assert len(warned) == 1
+
+ assert client.project == PROJECT
+ assert client._credentials is credentials.with_scopes.return_value
+ assert not client._read_only
+ assert client._admin
+ assert client._client_info is client_info
+ assert client._channel is mock.sentinel.channel
+ assert client.SCOPE == (DATA_SCOPE, ADMIN_SCOPE)
- def test__get_scopes_default(self):
- from google.cloud.bigtable.client import DATA_SCOPE
- client = self._make_one(project=self.PROJECT, credentials=_make_credentials())
- self.assertEqual(client._get_scopes(), (DATA_SCOPE,))
+def test_client_constructor_w_both_admin_and_read_only():
+ credentials = _make_credentials()
+ with pytest.raises(ValueError):
+ _make_client(
+ project=PROJECT, credentials=credentials, admin=True, read_only=True,
+ )
+
+
+def test_client_constructor_w_emulator_host():
+ from google.cloud.environment_vars import BIGTABLE_EMULATOR
+ from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT
+ from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS
+
+ emulator_host = "localhost:8081"
+ with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}):
+ with mock.patch("grpc.secure_channel") as factory:
+ client = _make_client()
+ # don't test local_composite_credentials
+ # client._local_composite_credentials = lambda: credentials
+ # channels are formed when needed, so access a client
+ # create a gapic channel
+ client.table_data_client
+
+ assert client._emulator_host == emulator_host
+ assert client.project == _DEFAULT_BIGTABLE_EMULATOR_CLIENT
+ factory.assert_called_once_with(
+ emulator_host,
+ mock.ANY, # test of creds wrapping in '_emulator_host' below
+ options=_GRPC_CHANNEL_OPTIONS,
+ )
+
+
+def test_client_constructor_w_emulator_host_w_project():
+ from google.cloud.environment_vars import BIGTABLE_EMULATOR
+ from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS
+
+ emulator_host = "localhost:8081"
+ with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}):
+ with mock.patch("grpc.secure_channel") as factory:
+ client = _make_client(project=PROJECT)
+ # channels are formed when needed, so access a client
+ # create a gapic channel
+ client.table_data_client
+
+ assert client._emulator_host == emulator_host
+ assert client.project == PROJECT
+ factory.assert_called_once_with(
+ emulator_host,
+ mock.ANY, # test of creds wrapping in '_emulator_host' below
+ options=_GRPC_CHANNEL_OPTIONS,
+ )
+
+
+def test_client_constructor_w_emulator_host_w_credentials():
+ from google.cloud.environment_vars import BIGTABLE_EMULATOR
+ from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT
+ from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS
+
+ emulator_host = "localhost:8081"
+ credentials = _make_credentials()
+ with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}):
+ with mock.patch("grpc.secure_channel") as factory:
+ client = _make_client(credentials=credentials)
+ # channels are formed when needed, so access a client
+ # create a gapic channel
+ client.table_data_client
+
+ assert client._emulator_host == emulator_host
+ assert client.project == _DEFAULT_BIGTABLE_EMULATOR_CLIENT
+ factory.assert_called_once_with(
+ emulator_host,
+ mock.ANY, # test of creds wrapping in '_emulator_host' below
+ options=_GRPC_CHANNEL_OPTIONS,
+ )
+
+
+def test_client__get_scopes_default():
+ from google.cloud.bigtable.client import DATA_SCOPE
+
+ client = _make_client(project=PROJECT, credentials=_make_credentials())
+ assert client._get_scopes() == (DATA_SCOPE,)
+
+
+def test_client__get_scopes_w_admin():
+ from google.cloud.bigtable.client import ADMIN_SCOPE
+ from google.cloud.bigtable.client import DATA_SCOPE
+
+ client = _make_client(project=PROJECT, credentials=_make_credentials(), admin=True)
+ expected_scopes = (DATA_SCOPE, ADMIN_SCOPE)
+ assert client._get_scopes() == expected_scopes
+
+
+def test_client__get_scopes_w_read_only():
+ from google.cloud.bigtable.client import READ_ONLY_SCOPE
+
+ client = _make_client(
+ project=PROJECT, credentials=_make_credentials(), read_only=True
+ )
+ assert client._get_scopes() == (READ_ONLY_SCOPE,)
+
+
+def test_client__emulator_channel_w_sync():
+ emulator_host = "localhost:8081"
+ transport_name = "GrpcTransportTesting"
+ transport = mock.Mock(spec=["__name__"], __name__=transport_name)
+ options = mock.Mock(spec=[])
+ client = _make_client(
+ project=PROJECT, credentials=_make_credentials(), read_only=True
+ )
+ client._emulator_host = emulator_host
+ lcc = client._local_composite_credentials = mock.Mock(spec=[])
+
+ with mock.patch("grpc.secure_channel") as patched:
+ channel = client._emulator_channel(transport, options)
+
+ assert channel is patched.return_value
+ patched.assert_called_once_with(
+ emulator_host, lcc.return_value, options=options,
+ )
+
+
+def test_client__emulator_channel_w_async():
+ emulator_host = "localhost:8081"
+ transport_name = "GrpcAsyncIOTransportTesting"
+ transport = mock.Mock(spec=["__name__"], __name__=transport_name)
+ options = mock.Mock(spec=[])
+ client = _make_client(
+ project=PROJECT, credentials=_make_credentials(), read_only=True
+ )
+ client._emulator_host = emulator_host
+ lcc = client._local_composite_credentials = mock.Mock(spec=[])
+
+ with mock.patch("grpc.aio.secure_channel") as patched:
+ channel = client._emulator_channel(transport, options)
+
+ assert channel is patched.return_value
+ patched.assert_called_once_with(
+ emulator_host, lcc.return_value, options=options,
+ )
+
+
+def test_client__local_composite_credentials():
+ client = _make_client(
+ project=PROJECT, credentials=_make_credentials(), read_only=True
+ )
+
+ wsir_patch = mock.patch("google.auth.credentials.with_scopes_if_required")
+ request_patch = mock.patch("google.auth.transport.requests.Request")
+ amp_patch = mock.patch("google.auth.transport.grpc.AuthMetadataPlugin")
+ grpc_patches = mock.patch.multiple(
+ "grpc",
+ metadata_call_credentials=mock.DEFAULT,
+ local_channel_credentials=mock.DEFAULT,
+ composite_channel_credentials=mock.DEFAULT,
+ )
+ with wsir_patch as wsir_patched:
+ with request_patch as request_patched:
+ with amp_patch as amp_patched:
+ with grpc_patches as grpc_patched:
+ credentials = client._local_composite_credentials()
+
+ grpc_mcc = grpc_patched["metadata_call_credentials"]
+ grpc_lcc = grpc_patched["local_channel_credentials"]
+ grpc_ccc = grpc_patched["composite_channel_credentials"]
+
+ assert credentials is grpc_ccc.return_value
+
+ wsir_patched.assert_called_once_with(client._credentials, None)
+ request_patched.assert_called_once_with()
+ amp_patched.assert_called_once_with(
+ wsir_patched.return_value, request_patched.return_value,
+ )
+ grpc_mcc.assert_called_once_with(amp_patched.return_value)
+ grpc_lcc.assert_called_once_with()
+ grpc_ccc.assert_called_once_with(grpc_lcc.return_value, grpc_mcc.return_value)
+
+
+def _create_gapic_client_channel_helper(endpoint=None, emulator_host=None):
+ from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS
+
+ client_class = mock.Mock(spec=["DEFAULT_ENDPOINT"])
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials)
+
+ if endpoint is not None:
+ client._client_options = mock.Mock(
+ spec=["api_endpoint"], api_endpoint=endpoint,
+ )
+ expected_host = endpoint
+ else:
+ expected_host = client_class.DEFAULT_ENDPOINT
+
+ if emulator_host is not None:
+ client._emulator_host = emulator_host
+ client._emulator_channel = mock.Mock(spec=[])
+ expected_host = emulator_host
- def test__get_scopes_admin(self):
- from google.cloud.bigtable.client import ADMIN_SCOPE
- from google.cloud.bigtable.client import DATA_SCOPE
+ grpc_transport = mock.Mock(spec=["create_channel"])
- client = self._make_one(
- project=self.PROJECT, credentials=_make_credentials(), admin=True
- )
- expected_scopes = (DATA_SCOPE, ADMIN_SCOPE)
- self.assertEqual(client._get_scopes(), expected_scopes)
+ transport = client._create_gapic_client_channel(client_class, grpc_transport)
- def test__get_scopes_read_only(self):
- from google.cloud.bigtable.client import READ_ONLY_SCOPE
+ assert transport is grpc_transport.return_value
- client = self._make_one(
- project=self.PROJECT, credentials=_make_credentials(), read_only=True
+ if emulator_host is not None:
+ client._emulator_channel.assert_called_once_with(
+ transport=grpc_transport, options=_GRPC_CHANNEL_OPTIONS,
)
- self.assertEqual(client._get_scopes(), (READ_ONLY_SCOPE,))
-
- def test__emulator_channel_sync(self):
- emulator_host = "localhost:8081"
- transport_name = "GrpcTransportTesting"
- transport = mock.Mock(spec=["__name__"], __name__=transport_name)
- options = mock.Mock(spec=[])
- client = self._make_one(
- project=self.PROJECT, credentials=_make_credentials(), read_only=True
+ grpc_transport.assert_called_once_with(
+ channel=client._emulator_channel.return_value, host=expected_host,
+ )
+ else:
+ grpc_transport.create_channel.assert_called_once_with(
+ host=expected_host,
+ credentials=client._credentials,
+ options=_GRPC_CHANNEL_OPTIONS,
+ )
+ grpc_transport.assert_called_once_with(
+ channel=grpc_transport.create_channel.return_value, host=expected_host,
)
- client._emulator_host = emulator_host
- lcc = client._local_composite_credentials = mock.Mock(spec=[])
- with mock.patch("grpc.secure_channel") as patched:
- channel = client._emulator_channel(transport, options)
- assert channel is patched.return_value
- patched.assert_called_once_with(
- emulator_host, lcc.return_value, options=options,
- )
+def test_client__create_gapic_client_channel_w_defaults():
+ _create_gapic_client_channel_helper()
- def test__emulator_channel_async(self):
- emulator_host = "localhost:8081"
- transport_name = "GrpcAsyncIOTransportTesting"
- transport = mock.Mock(spec=["__name__"], __name__=transport_name)
- options = mock.Mock(spec=[])
- client = self._make_one(
- project=self.PROJECT, credentials=_make_credentials(), read_only=True
- )
- client._emulator_host = emulator_host
- lcc = client._local_composite_credentials = mock.Mock(spec=[])
- with mock.patch("grpc.aio.secure_channel") as patched:
- channel = client._emulator_channel(transport, options)
+def test_client__create_gapic_client_channel_w_endpoint():
+ endpoint = "api.example.com"
+ _create_gapic_client_channel_helper(endpoint=endpoint)
- assert channel is patched.return_value
- patched.assert_called_once_with(
- emulator_host, lcc.return_value, options=options,
- )
- def test__local_composite_credentials(self):
- client = self._make_one(
- project=self.PROJECT, credentials=_make_credentials(), read_only=True
- )
+def test_client__create_gapic_client_channel_w_emulator_host():
+ host = "api.example.com:1234"
+ _create_gapic_client_channel_helper(emulator_host=host)
- wsir_patch = mock.patch("google.auth.credentials.with_scopes_if_required")
- request_patch = mock.patch("google.auth.transport.requests.Request")
- amp_patch = mock.patch("google.auth.transport.grpc.AuthMetadataPlugin")
- grpc_patches = mock.patch.multiple(
- "grpc",
- metadata_call_credentials=mock.DEFAULT,
- local_channel_credentials=mock.DEFAULT,
- composite_channel_credentials=mock.DEFAULT,
- )
- with wsir_patch as wsir_patched:
- with request_patch as request_patched:
- with amp_patch as amp_patched:
- with grpc_patches as grpc_patched:
- credentials = client._local_composite_credentials()
-
- grpc_mcc = grpc_patched["metadata_call_credentials"]
- grpc_lcc = grpc_patched["local_channel_credentials"]
- grpc_ccc = grpc_patched["composite_channel_credentials"]
-
- self.assertIs(credentials, grpc_ccc.return_value)
-
- wsir_patched.assert_called_once_with(client._credentials, None)
- request_patched.assert_called_once_with()
- amp_patched.assert_called_once_with(
- wsir_patched.return_value, request_patched.return_value,
- )
- grpc_mcc.assert_called_once_with(amp_patched.return_value)
- grpc_lcc.assert_called_once_with()
- grpc_ccc.assert_called_once_with(grpc_lcc.return_value, grpc_mcc.return_value)
-
- def _create_gapic_client_channel_helper(
- self, endpoint=None, emulator_host=None,
- ):
- from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS
-
- client_class = mock.Mock(spec=["DEFAULT_ENDPOINT"])
- credentials = _make_credentials()
- client = self._make_one(project=self.PROJECT, credentials=credentials)
-
- if endpoint is not None:
- client._client_options = mock.Mock(
- spec=["api_endpoint"], api_endpoint=endpoint,
- )
- expected_host = endpoint
- else:
- expected_host = client_class.DEFAULT_ENDPOINT
-
- if emulator_host is not None:
- client._emulator_host = emulator_host
- client._emulator_channel = mock.Mock(spec=[])
- expected_host = emulator_host
-
- grpc_transport = mock.Mock(spec=["create_channel"])
-
- transport = client._create_gapic_client_channel(client_class, grpc_transport)
-
- self.assertIs(transport, grpc_transport.return_value)
-
- if emulator_host is not None:
- client._emulator_channel.assert_called_once_with(
- transport=grpc_transport, options=_GRPC_CHANNEL_OPTIONS,
- )
- grpc_transport.assert_called_once_with(
- channel=client._emulator_channel.return_value, host=expected_host,
- )
- else:
- grpc_transport.create_channel.assert_called_once_with(
- host=expected_host,
- credentials=client._credentials,
- options=_GRPC_CHANNEL_OPTIONS,
- )
- grpc_transport.assert_called_once_with(
- channel=grpc_transport.create_channel.return_value, host=expected_host,
- )
-
- def test__create_gapic_client_channel_w_defaults(self):
- self._create_gapic_client_channel_helper()
-
- def test__create_gapic_client_channel_w_endpoint(self):
- endpoint = "api.example.com"
- self._create_gapic_client_channel_helper(endpoint=endpoint)
-
- def test__create_gapic_client_channel_w_emulator_host(self):
- host = "api.example.com:1234"
- self._create_gapic_client_channel_helper(emulator_host=host)
-
- def test__create_gapic_client_channel_w_endpoint_w_emulator_host(self):
- endpoint = "api.example.com"
- host = "other.example.com:1234"
- self._create_gapic_client_channel_helper(endpoint=endpoint, emulator_host=host)
-
- def test_project_path_property(self):
- credentials = _make_credentials()
- project = "PROJECT"
- client = self._make_one(project=project, credentials=credentials, admin=True)
- project_name = "projects/" + project
- self.assertEqual(client.project_path, project_name)
-
- def test_table_data_client_not_initialized(self):
- from google.cloud.bigtable.client import _CLIENT_INFO
- from google.cloud.bigtable_v2 import BigtableClient
-
- credentials = _make_credentials()
- client = self._make_one(project=self.PROJECT, credentials=credentials)
- table_data_client = client.table_data_client
- self.assertIsInstance(table_data_client, BigtableClient)
- self.assertIs(client._client_info, _CLIENT_INFO)
- self.assertIs(client._table_data_client, table_data_client)
+def test_client__create_gapic_client_channel_w_endpoint_w_emulator_host():
+ endpoint = "api.example.com"
+ host = "other.example.com:1234"
+ _create_gapic_client_channel_helper(endpoint=endpoint, emulator_host=host)
- def test_table_data_client_not_initialized_w_client_info(self):
- from google.cloud.bigtable_v2 import BigtableClient
- credentials = _make_credentials()
- client_info = mock.Mock()
- client = self._make_one(
- project=self.PROJECT, credentials=credentials, client_info=client_info
- )
+def test_client_project_path():
+ credentials = _make_credentials()
+ project = "PROJECT"
+ client = _make_client(project=project, credentials=credentials, admin=True)
+ project_name = "projects/" + project
+ assert client.project_path == project_name
- table_data_client = client.table_data_client
- self.assertIsInstance(table_data_client, BigtableClient)
- self.assertIs(client._client_info, client_info)
- self.assertIs(client._table_data_client, table_data_client)
- def test_table_data_client_not_initialized_w_client_options(self):
- from google.api_core.client_options import ClientOptions
+def test_client_table_data_client_not_initialized():
+ from google.cloud.bigtable_v2 import BigtableClient
- credentials = _make_credentials()
- client_options = ClientOptions(
- quota_project_id="QUOTA-PROJECT", api_endpoint="xyz"
- )
- client = self._make_one(
- project=self.PROJECT, credentials=credentials, client_options=client_options
- )
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials)
- patch = mock.patch("google.cloud.bigtable_v2.BigtableClient")
- with patch as mocked:
- table_data_client = client.table_data_client
+ table_data_client = client.table_data_client
+ assert isinstance(table_data_client, BigtableClient)
+ assert client._table_data_client is table_data_client
- self.assertIs(table_data_client, mocked.return_value)
- self.assertIs(client._table_data_client, table_data_client)
- mocked.assert_called_once_with(
- client_info=client._client_info,
- credentials=None,
- transport=mock.ANY,
- client_options=client_options,
- )
+def test_client_table_data_client_not_initialized_w_client_info():
+ from google.cloud.bigtable_v2 import BigtableClient
- def test_table_data_client_initialized(self):
- credentials = _make_credentials()
- client = self._make_one(
- project=self.PROJECT, credentials=credentials, admin=True
- )
+ credentials = _make_credentials()
+ client_info = mock.Mock()
+ client = _make_client(
+ project=PROJECT, credentials=credentials, client_info=client_info
+ )
- already = client._table_data_client = object()
- self.assertIs(client.table_data_client, already)
+ table_data_client = client.table_data_client
+ assert isinstance(table_data_client, BigtableClient)
+ assert client._client_info is client_info
+ assert client._table_data_client is table_data_client
- def test_table_admin_client_not_initialized_no_admin_flag(self):
- credentials = _make_credentials()
- client = self._make_one(project=self.PROJECT, credentials=credentials)
- with self.assertRaises(ValueError):
- client.table_admin_client()
+def test_client_table_data_client_not_initialized_w_client_options():
+ from google.api_core.client_options import ClientOptions
- def test_table_admin_client_not_initialized_w_admin_flag(self):
- from google.cloud.bigtable.client import _CLIENT_INFO
- from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient
+ credentials = _make_credentials()
+ client_options = ClientOptions(quota_project_id="QUOTA-PROJECT", api_endpoint="xyz")
+ client = _make_client(
+ project=PROJECT, credentials=credentials, client_options=client_options
+ )
- credentials = _make_credentials()
- client = self._make_one(
- project=self.PROJECT, credentials=credentials, admin=True
- )
+ patch = mock.patch("google.cloud.bigtable_v2.BigtableClient")
+ with patch as mocked:
+ table_data_client = client.table_data_client
- table_admin_client = client.table_admin_client
- self.assertIsInstance(table_admin_client, BigtableTableAdminClient)
- self.assertIs(client._client_info, _CLIENT_INFO)
- self.assertIs(client._table_admin_client, table_admin_client)
+ assert table_data_client is mocked.return_value
+ assert client._table_data_client is table_data_client
- def test_table_admin_client_not_initialized_w_client_info(self):
- from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient
+ mocked.assert_called_once_with(
+ client_info=client._client_info,
+ credentials=None,
+ transport=mock.ANY,
+ client_options=client_options,
+ )
- credentials = _make_credentials()
- client_info = mock.Mock()
- client = self._make_one(
- project=self.PROJECT,
- credentials=credentials,
- admin=True,
- client_info=client_info,
- )
- table_admin_client = client.table_admin_client
- self.assertIsInstance(table_admin_client, BigtableTableAdminClient)
- self.assertIs(client._client_info, client_info)
- self.assertIs(client._table_admin_client, table_admin_client)
-
- def test_table_admin_client_not_initialized_w_client_options(self):
- credentials = _make_credentials()
- admin_client_options = mock.Mock()
- client = self._make_one(
- project=self.PROJECT,
- credentials=credentials,
- admin=True,
- admin_client_options=admin_client_options,
- )
+def test_client_table_data_client_initialized():
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
- client._create_gapic_client_channel = mock.Mock()
- patch = mock.patch("google.cloud.bigtable_admin_v2.BigtableTableAdminClient")
- with patch as mocked:
- table_admin_client = client.table_admin_client
-
- self.assertIs(table_admin_client, mocked.return_value)
- self.assertIs(client._table_admin_client, table_admin_client)
- mocked.assert_called_once_with(
- client_info=client._client_info,
- credentials=None,
- transport=mock.ANY,
- client_options=admin_client_options,
- )
+ already = client._table_data_client = object()
+ assert client.table_data_client is already
- def test_table_admin_client_initialized(self):
- credentials = _make_credentials()
- client = self._make_one(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- already = client._table_admin_client = object()
- self.assertIs(client.table_admin_client, already)
+def test_client_table_admin_client_not_initialized_no_admin_flag():
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials)
- def test_instance_admin_client_not_initialized_no_admin_flag(self):
- credentials = _make_credentials()
- client = self._make_one(project=self.PROJECT, credentials=credentials)
+ with pytest.raises(ValueError):
+ client.table_admin_client()
- with self.assertRaises(ValueError):
- client.instance_admin_client()
- def test_instance_admin_client_not_initialized_w_admin_flag(self):
- from google.cloud.bigtable.client import _CLIENT_INFO
- from google.cloud.bigtable_admin_v2 import BigtableInstanceAdminClient
+def test_client_table_admin_client_not_initialized_w_admin_flag():
+ from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient
- credentials = _make_credentials()
- client = self._make_one(
- project=self.PROJECT, credentials=credentials, admin=True
- )
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
- instance_admin_client = client.instance_admin_client
- self.assertIsInstance(instance_admin_client, BigtableInstanceAdminClient)
- self.assertIs(client._client_info, _CLIENT_INFO)
- self.assertIs(client._instance_admin_client, instance_admin_client)
+ table_admin_client = client.table_admin_client
+ assert isinstance(table_admin_client, BigtableTableAdminClient)
+ assert client._table_admin_client is table_admin_client
- def test_instance_admin_client_not_initialized_w_client_info(self):
- from google.cloud.bigtable_admin_v2 import BigtableInstanceAdminClient
- credentials = _make_credentials()
- client_info = mock.Mock()
- client = self._make_one(
- project=self.PROJECT,
- credentials=credentials,
- admin=True,
- client_info=client_info,
- )
+def test_client_table_admin_client_not_initialized_w_client_info():
+ from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient
- instance_admin_client = client.instance_admin_client
- self.assertIsInstance(instance_admin_client, BigtableInstanceAdminClient)
- self.assertIs(client._client_info, client_info)
- self.assertIs(client._instance_admin_client, instance_admin_client)
-
- def test_instance_admin_client_not_initialized_w_client_options(self):
- credentials = _make_credentials()
- admin_client_options = mock.Mock()
- client = self._make_one(
- project=self.PROJECT,
- credentials=credentials,
- admin=True,
- admin_client_options=admin_client_options,
- )
+ credentials = _make_credentials()
+ client_info = mock.Mock()
+ client = _make_client(
+ project=PROJECT, credentials=credentials, admin=True, client_info=client_info,
+ )
- client._create_gapic_client_channel = mock.Mock()
- patch = mock.patch("google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient")
- with patch as mocked:
- instance_admin_client = client.instance_admin_client
-
- self.assertIs(instance_admin_client, mocked.return_value)
- self.assertIs(client._instance_admin_client, instance_admin_client)
- mocked.assert_called_once_with(
- client_info=client._client_info,
- credentials=None,
- transport=mock.ANY,
- client_options=admin_client_options,
- )
+ table_admin_client = client.table_admin_client
+ assert isinstance(table_admin_client, BigtableTableAdminClient)
+ assert client._client_info is client_info
+ assert client._table_admin_client is table_admin_client
- def test_instance_admin_client_initialized(self):
- credentials = _make_credentials()
- client = self._make_one(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- already = client._instance_admin_client = object()
- self.assertIs(client.instance_admin_client, already)
-
- def test_instance_factory_defaults(self):
- from google.cloud.bigtable.instance import Instance
-
- PROJECT = "PROJECT"
- INSTANCE_ID = "instance-id"
- credentials = _make_credentials()
- client = self._make_one(project=PROJECT, credentials=credentials)
-
- instance = client.instance(INSTANCE_ID)
-
- self.assertIsInstance(instance, Instance)
- self.assertEqual(instance.instance_id, INSTANCE_ID)
- self.assertEqual(instance.display_name, INSTANCE_ID)
- self.assertIsNone(instance.type_)
- self.assertIsNone(instance.labels)
- self.assertIs(instance._client, client)
-
- def test_instance_factory_non_defaults(self):
- from google.cloud.bigtable.instance import Instance
- from google.cloud.bigtable import enums
-
- PROJECT = "PROJECT"
- INSTANCE_ID = "instance-id"
- DISPLAY_NAME = "display-name"
- instance_type = enums.Instance.Type.DEVELOPMENT
- labels = {"foo": "bar"}
- credentials = _make_credentials()
- client = self._make_one(project=PROJECT, credentials=credentials)
-
- instance = client.instance(
- INSTANCE_ID,
- display_name=DISPLAY_NAME,
- instance_type=instance_type,
- labels=labels,
- )
+def test_client_table_admin_client_not_initialized_w_client_options():
+ credentials = _make_credentials()
+ admin_client_options = mock.Mock()
+ client = _make_client(
+ project=PROJECT,
+ credentials=credentials,
+ admin=True,
+ admin_client_options=admin_client_options,
+ )
- self.assertIsInstance(instance, Instance)
- self.assertEqual(instance.instance_id, INSTANCE_ID)
- self.assertEqual(instance.display_name, DISPLAY_NAME)
- self.assertEqual(instance.type_, instance_type)
- self.assertEqual(instance.labels, labels)
- self.assertIs(instance._client, client)
-
- def test_list_instances(self):
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
- from google.cloud.bigtable_admin_v2.types import (
- bigtable_instance_admin as messages_v2_pb2,
- )
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.cloud.bigtable.instance import Instance
+ client._create_gapic_client_channel = mock.Mock()
+ patch = mock.patch("google.cloud.bigtable_admin_v2.BigtableTableAdminClient")
+ with patch as mocked:
+ table_admin_client = client.table_admin_client
- FAILED_LOCATION = "FAILED"
- INSTANCE_ID1 = "instance-id1"
- INSTANCE_ID2 = "instance-id2"
- INSTANCE_NAME1 = "projects/" + self.PROJECT + "/instances/" + INSTANCE_ID1
- INSTANCE_NAME2 = "projects/" + self.PROJECT + "/instances/" + INSTANCE_ID2
+ assert table_admin_client is mocked.return_value
+ assert client._table_admin_client is table_admin_client
+ mocked.assert_called_once_with(
+ client_info=client._client_info,
+ credentials=None,
+ transport=mock.ANY,
+ client_options=admin_client_options,
+ )
- api = mock.create_autospec(BigtableInstanceAdminClient)
- credentials = _make_credentials()
- client = self._make_one(
- project=self.PROJECT, credentials=credentials, admin=True
- )
+def test_client_table_admin_client_initialized():
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
- # Create response_pb
- response_pb = messages_v2_pb2.ListInstancesResponse(
- failed_locations=[FAILED_LOCATION],
- instances=[
- data_v2_pb2.Instance(name=INSTANCE_NAME1, display_name=INSTANCE_NAME1),
- data_v2_pb2.Instance(name=INSTANCE_NAME2, display_name=INSTANCE_NAME2),
- ],
- )
+ already = client._table_admin_client = object()
+ assert client.table_admin_client is already
- # Patch the stub used by the API method.
- client._instance_admin_client = api
- instance_stub = client._instance_admin_client
- instance_stub.list_instances.side_effect = [response_pb]
+def test_client_instance_admin_client_not_initialized_no_admin_flag():
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials)
- # Perform the method and check the result.
- instances, failed_locations = client.list_instances()
+ with pytest.raises(ValueError):
+ client.instance_admin_client()
- instance_1, instance_2 = instances
- self.assertIsInstance(instance_1, Instance)
- self.assertEqual(instance_1.instance_id, INSTANCE_ID1)
- self.assertTrue(instance_1._client is client)
+def test_client_instance_admin_client_not_initialized_w_admin_flag():
+ from google.cloud.bigtable_admin_v2 import BigtableInstanceAdminClient
- self.assertIsInstance(instance_2, Instance)
- self.assertEqual(instance_2.instance_id, INSTANCE_ID2)
- self.assertTrue(instance_2._client is client)
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
- self.assertEqual(failed_locations, [FAILED_LOCATION])
+ instance_admin_client = client.instance_admin_client
+ assert isinstance(instance_admin_client, BigtableInstanceAdminClient)
+ assert client._instance_admin_client is instance_admin_client
- def test_list_clusters(self):
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.cloud.bigtable_admin_v2.types import (
- bigtable_instance_admin as messages_v2_pb2,
- )
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
- from google.cloud.bigtable.instance import Cluster
- instance_api = mock.create_autospec(BigtableInstanceAdminClient)
+def test_client_instance_admin_client_not_initialized_w_client_info():
+ from google.cloud.bigtable_admin_v2 import BigtableInstanceAdminClient
- credentials = _make_credentials()
- client = self._make_one(
- project=self.PROJECT, credentials=credentials, admin=True
- )
+ credentials = _make_credentials()
+ client_info = mock.Mock()
+ client = _make_client(
+ project=PROJECT, credentials=credentials, admin=True, client_info=client_info,
+ )
- INSTANCE_ID1 = "instance-id1"
- INSTANCE_ID2 = "instance-id2"
+ instance_admin_client = client.instance_admin_client
+ assert isinstance(instance_admin_client, BigtableInstanceAdminClient)
+ assert client._client_info is client_info
+ assert client._instance_admin_client is instance_admin_client
- failed_location = "FAILED"
- cluster_id1 = "{}-cluster".format(INSTANCE_ID1)
- cluster_id2 = "{}-cluster-1".format(INSTANCE_ID2)
- cluster_id3 = "{}-cluster-2".format(INSTANCE_ID2)
- cluster_name1 = client.instance_admin_client.cluster_path(
- self.PROJECT, INSTANCE_ID1, cluster_id1
- )
- cluster_name2 = client.instance_admin_client.cluster_path(
- self.PROJECT, INSTANCE_ID2, cluster_id2
- )
- cluster_name3 = client.instance_admin_client.cluster_path(
- self.PROJECT, INSTANCE_ID2, cluster_id3
- )
- # Create response_pb
- response_pb = messages_v2_pb2.ListClustersResponse(
- failed_locations=[failed_location],
- clusters=[
- data_v2_pb2.Cluster(name=cluster_name1),
- data_v2_pb2.Cluster(name=cluster_name2),
- data_v2_pb2.Cluster(name=cluster_name3),
- ],
- )
+def test_client_instance_admin_client_not_initialized_w_client_options():
+ credentials = _make_credentials()
+ admin_client_options = mock.Mock()
+ client = _make_client(
+ project=PROJECT,
+ credentials=credentials,
+ admin=True,
+ admin_client_options=admin_client_options,
+ )
+
+ client._create_gapic_client_channel = mock.Mock()
+ patch = mock.patch("google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient")
+ with patch as mocked:
+ instance_admin_client = client.instance_admin_client
+
+ assert instance_admin_client is mocked.return_value
+ assert client._instance_admin_client is instance_admin_client
+ mocked.assert_called_once_with(
+ client_info=client._client_info,
+ credentials=None,
+ transport=mock.ANY,
+ client_options=admin_client_options,
+ )
+
+
+def test_client_instance_admin_client_initialized():
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
- # Patch the stub used by the API method.
- client._instance_admin_client = instance_api
- instance_stub = client._instance_admin_client
+ already = client._instance_admin_client = object()
+ assert client.instance_admin_client is already
- instance_stub.list_clusters.side_effect = [response_pb]
- # Perform the method and check the result.
- clusters, failed_locations = client.list_clusters()
+def test_client_instance_factory_defaults():
+ from google.cloud.bigtable.instance import Instance
- cluster_1, cluster_2, cluster_3 = clusters
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials)
- self.assertIsInstance(cluster_1, Cluster)
- self.assertEqual(cluster_1.cluster_id, cluster_id1)
- self.assertEqual(cluster_1._instance.instance_id, INSTANCE_ID1)
+ instance = client.instance(INSTANCE_ID)
- self.assertIsInstance(cluster_2, Cluster)
- self.assertEqual(cluster_2.cluster_id, cluster_id2)
- self.assertEqual(cluster_2._instance.instance_id, INSTANCE_ID2)
+ assert isinstance(instance, Instance)
+ assert instance.instance_id == INSTANCE_ID
+ assert instance.display_name == INSTANCE_ID
+ assert instance.type_ is None
+ assert instance.labels is None
+ assert instance._client is client
- self.assertIsInstance(cluster_3, Cluster)
- self.assertEqual(cluster_3.cluster_id, cluster_id3)
- self.assertEqual(cluster_3._instance.instance_id, INSTANCE_ID2)
- self.assertEqual(failed_locations, [failed_location])
+def test_client_instance_factory_non_defaults():
+ from google.cloud.bigtable.instance import Instance
+ from google.cloud.bigtable import enums
+
+ instance_type = enums.Instance.Type.DEVELOPMENT
+ labels = {"foo": "bar"}
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials)
+
+ instance = client.instance(
+ INSTANCE_ID,
+ display_name=DISPLAY_NAME,
+ instance_type=instance_type,
+ labels=labels,
+ )
+
+ assert isinstance(instance, Instance)
+ assert instance.instance_id == INSTANCE_ID
+ assert instance.display_name == DISPLAY_NAME
+ assert instance.type_ == instance_type
+ assert instance.labels == labels
+ assert instance._client is client
+
+
+def test_client_list_instances():
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable_admin_v2.types import (
+ bigtable_instance_admin as messages_v2_pb2,
+ )
+ from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
+ BigtableInstanceAdminClient,
+ )
+ from google.cloud.bigtable.instance import Instance
+
+ FAILED_LOCATION = "FAILED"
+ INSTANCE_ID1 = "instance-id1"
+ INSTANCE_ID2 = "instance-id2"
+ INSTANCE_NAME1 = "projects/" + PROJECT + "/instances/" + INSTANCE_ID1
+ INSTANCE_NAME2 = "projects/" + PROJECT + "/instances/" + INSTANCE_ID2
+
+ api = mock.create_autospec(BigtableInstanceAdminClient)
+ credentials = _make_credentials()
+
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+
+ # Create response_pb
+ response_pb = messages_v2_pb2.ListInstancesResponse(
+ failed_locations=[FAILED_LOCATION],
+ instances=[
+ data_v2_pb2.Instance(name=INSTANCE_NAME1, display_name=INSTANCE_NAME1),
+ data_v2_pb2.Instance(name=INSTANCE_NAME2, display_name=INSTANCE_NAME2),
+ ],
+ )
+
+ # Patch the stub used by the API method.
+ client._instance_admin_client = api
+ instance_stub = client._instance_admin_client
+
+ instance_stub.list_instances.side_effect = [response_pb]
+
+ # Perform the method and check the result.
+ instances, failed_locations = client.list_instances()
+
+ instance_1, instance_2 = instances
+
+ assert isinstance(instance_1, Instance)
+ assert instance_1.instance_id == INSTANCE_ID1
+ assert instance_1._client is client
+
+ assert isinstance(instance_2, Instance)
+ assert instance_2.instance_id == INSTANCE_ID2
+ assert instance_2._client is client
+
+ assert failed_locations == [FAILED_LOCATION]
+
+
+def test_client_list_clusters():
+ from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
+ BigtableInstanceAdminClient,
+ )
+ from google.cloud.bigtable_admin_v2.types import (
+ bigtable_instance_admin as messages_v2_pb2,
+ )
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable.instance import Cluster
+
+ instance_api = mock.create_autospec(BigtableInstanceAdminClient)
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+
+ INSTANCE_ID1 = "instance-id1"
+ INSTANCE_ID2 = "instance-id2"
+
+ failed_location = "FAILED"
+ cluster_id1 = "{}-cluster".format(INSTANCE_ID1)
+ cluster_id2 = "{}-cluster-1".format(INSTANCE_ID2)
+ cluster_id3 = "{}-cluster-2".format(INSTANCE_ID2)
+ cluster_name1 = client.instance_admin_client.cluster_path(
+ PROJECT, INSTANCE_ID1, cluster_id1
+ )
+ cluster_name2 = client.instance_admin_client.cluster_path(
+ PROJECT, INSTANCE_ID2, cluster_id2
+ )
+ cluster_name3 = client.instance_admin_client.cluster_path(
+ PROJECT, INSTANCE_ID2, cluster_id3
+ )
+
+ # Create response_pb
+ response_pb = messages_v2_pb2.ListClustersResponse(
+ failed_locations=[failed_location],
+ clusters=[
+ data_v2_pb2.Cluster(name=cluster_name1),
+ data_v2_pb2.Cluster(name=cluster_name2),
+ data_v2_pb2.Cluster(name=cluster_name3),
+ ],
+ )
+
+ # Patch the stub used by the API method.
+ client._instance_admin_client = instance_api
+ instance_stub = client._instance_admin_client
+
+ instance_stub.list_clusters.side_effect = [response_pb]
+
+ # Perform the method and check the result.
+ clusters, failed_locations = client.list_clusters()
+
+ cluster_1, cluster_2, cluster_3 = clusters
+
+ assert isinstance(cluster_1, Cluster)
+ assert cluster_1.cluster_id == cluster_id1
+ assert cluster_1._instance.instance_id == INSTANCE_ID1
+
+ assert isinstance(cluster_2, Cluster)
+ assert cluster_2.cluster_id == cluster_id2
+ assert cluster_2._instance.instance_id == INSTANCE_ID2
+
+ assert isinstance(cluster_3, Cluster)
+ assert cluster_3.cluster_id == cluster_id3
+ assert cluster_3._instance.instance_id == INSTANCE_ID2
+
+ assert failed_locations == [failed_location]
diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py
index 1194e53c9..74ca98830 100644
--- a/tests/unit/test_cluster.py
+++ b/tests/unit/test_cluster.py
@@ -13,552 +13,522 @@
# limitations under the License.
-import unittest
-
import mock
import pytest
from ._testing import _make_credentials
+PROJECT = "project"
+INSTANCE_ID = "instance-id"
+LOCATION_ID = "location-id"
+CLUSTER_ID = "cluster-id"
+LOCATION_ID = "location-id"
+CLUSTER_NAME = (
+ "projects/" + PROJECT + "/instances/" + INSTANCE_ID + "/clusters/" + CLUSTER_ID
+)
+LOCATION_PATH = "projects/" + PROJECT + "/locations/"
+SERVE_NODES = 5
+OP_ID = 5678
+OP_NAME = "operations/projects/{}/instances/{}/clusters/{}/operations/{}".format(
+ PROJECT, INSTANCE_ID, CLUSTER_ID, OP_ID
+)
+KEY_RING_ID = "key-ring-id"
+CRYPTO_KEY_ID = "crypto-key-id"
+KMS_KEY_NAME = f"{LOCATION_PATH}/keyRings/{KEY_RING_ID}/cryptoKeys/{CRYPTO_KEY_ID}"
+
+
+def _make_cluster(*args, **kwargs):
+ from google.cloud.bigtable.cluster import Cluster
+
+ return Cluster(*args, **kwargs)
+
+
+def _make_client(*args, **kwargs):
+ from google.cloud.bigtable.client import Client
+
+ return Client(*args, **kwargs)
+
+
+def test_cluster_constructor_defaults():
+ client = _Client(PROJECT)
+ instance = _Instance(INSTANCE_ID, client)
+
+ cluster = _make_cluster(CLUSTER_ID, instance)
+
+ assert cluster.cluster_id == CLUSTER_ID
+ assert cluster._instance is instance
+ assert cluster.location_id is None
+ assert cluster.state is None
+ assert cluster.serve_nodes is None
+ assert cluster.default_storage_type is None
+ assert cluster.kms_key_name is None
+
+
+def test_cluster_constructor_explicit():
+ from google.cloud.bigtable.enums import StorageType
+ from google.cloud.bigtable.enums import Cluster
+
+ STATE = Cluster.State.READY
+ STORAGE_TYPE_SSD = StorageType.SSD
+ client = _Client(PROJECT)
+ instance = _Instance(INSTANCE_ID, client)
+
+ cluster = _make_cluster(
+ CLUSTER_ID,
+ instance,
+ location_id=LOCATION_ID,
+ _state=STATE,
+ serve_nodes=SERVE_NODES,
+ default_storage_type=STORAGE_TYPE_SSD,
+ kms_key_name=KMS_KEY_NAME,
+ )
+ assert cluster.cluster_id == CLUSTER_ID
+ assert cluster._instance is instance
+ assert cluster.location_id == LOCATION_ID
+ assert cluster.state == STATE
+ assert cluster.serve_nodes == SERVE_NODES
+ assert cluster.default_storage_type == STORAGE_TYPE_SSD
+ assert cluster.kms_key_name == KMS_KEY_NAME
+
+
+def test_cluster_name():
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = _Instance(INSTANCE_ID, client)
+ cluster = _make_cluster(CLUSTER_ID, instance)
+
+ assert cluster.name == CLUSTER_NAME
+
+
+def test_cluster_kms_key_name():
+ client = _Client(PROJECT)
+ instance = _Instance(INSTANCE_ID, client)
+ cluster = _make_cluster(CLUSTER_ID, instance, kms_key_name=KMS_KEY_NAME)
+
+ assert cluster.kms_key_name == KMS_KEY_NAME
+
+
+def test_cluster_kms_key_name_setter():
+ client = _Client(PROJECT)
+ instance = _Instance(INSTANCE_ID, client)
+ cluster = _make_cluster(CLUSTER_ID, instance, kms_key_name=KMS_KEY_NAME)
+
+ with pytest.raises(AttributeError):
+ cluster.kms_key_name = "I'm read only"
+
+
+def test_cluster_from_pb_success():
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable.cluster import Cluster
+ from google.cloud.bigtable import enums
+
+ client = _Client(PROJECT)
+ instance = _Instance(INSTANCE_ID, client)
+
+ location = LOCATION_PATH + LOCATION_ID
+ state = enums.Cluster.State.RESIZING
+ storage_type = enums.StorageType.SSD
+ cluster_pb = data_v2_pb2.Cluster(
+ name=CLUSTER_NAME,
+ location=location,
+ state=state,
+ serve_nodes=SERVE_NODES,
+ default_storage_type=storage_type,
+ encryption_config=data_v2_pb2.Cluster.EncryptionConfig(
+ kms_key_name=KMS_KEY_NAME,
+ ),
+ )
+
+ cluster = Cluster.from_pb(cluster_pb, instance)
+ assert isinstance(cluster, Cluster)
+ assert cluster._instance == instance
+ assert cluster.cluster_id == CLUSTER_ID
+ assert cluster.location_id == LOCATION_ID
+ assert cluster.state == state
+ assert cluster.serve_nodes == SERVE_NODES
+ assert cluster.default_storage_type == storage_type
+ assert cluster.kms_key_name == KMS_KEY_NAME
+
+
+def test_cluster_from_pb_w_bad_cluster_name():
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable.cluster import Cluster
+
+ bad_cluster_name = "BAD_NAME"
+
+ cluster_pb = data_v2_pb2.Cluster(name=bad_cluster_name)
+
+ with pytest.raises(ValueError):
+ Cluster.from_pb(cluster_pb, None)
+
+
+def test_cluster_from_pb_w_instance_id_mistmatch():
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable.cluster import Cluster
+
+ ALT_INSTANCE_ID = "ALT_INSTANCE_ID"
+ client = _Client(PROJECT)
+ instance = _Instance(ALT_INSTANCE_ID, client)
+
+ assert INSTANCE_ID != ALT_INSTANCE_ID
+ cluster_pb = data_v2_pb2.Cluster(name=CLUSTER_NAME)
-class TestCluster(unittest.TestCase):
+ with pytest.raises(ValueError):
+ Cluster.from_pb(cluster_pb, instance)
- PROJECT = "project"
- INSTANCE_ID = "instance-id"
- LOCATION_ID = "location-id"
- CLUSTER_ID = "cluster-id"
- LOCATION_ID = "location-id"
- CLUSTER_NAME = (
- "projects/" + PROJECT + "/instances/" + INSTANCE_ID + "/clusters/" + CLUSTER_ID
+
+def test_cluster_from_pb_w_project_mistmatch():
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable.cluster import Cluster
+
+ ALT_PROJECT = "ALT_PROJECT"
+ client = _Client(project=ALT_PROJECT)
+ instance = _Instance(INSTANCE_ID, client)
+
+ assert PROJECT != ALT_PROJECT
+ cluster_pb = data_v2_pb2.Cluster(name=CLUSTER_NAME)
+
+ with pytest.raises(ValueError):
+ Cluster.from_pb(cluster_pb, instance)
+
+
+def test_cluster___eq__():
+ client = _Client(PROJECT)
+ instance = _Instance(INSTANCE_ID, client)
+ cluster1 = _make_cluster(CLUSTER_ID, instance, LOCATION_ID)
+ cluster2 = _make_cluster(CLUSTER_ID, instance, LOCATION_ID)
+ assert cluster1 == cluster2
+
+
+def test_cluster___eq___w_type_differ():
+ client = _Client(PROJECT)
+ instance = _Instance(INSTANCE_ID, client)
+ cluster1 = _make_cluster(CLUSTER_ID, instance, LOCATION_ID)
+ cluster2 = object()
+ assert cluster1 != cluster2
+
+
+def test_cluster___ne___w_same_value():
+ client = _Client(PROJECT)
+ instance = _Instance(INSTANCE_ID, client)
+ cluster1 = _make_cluster(CLUSTER_ID, instance, LOCATION_ID)
+ cluster2 = _make_cluster(CLUSTER_ID, instance, LOCATION_ID)
+ assert not (cluster1 != cluster2)
+
+
+def test_cluster___ne__():
+ client = _Client(PROJECT)
+ instance = _Instance(INSTANCE_ID, client)
+ cluster1 = _make_cluster("cluster_id1", instance, LOCATION_ID)
+ cluster2 = _make_cluster("cluster_id2", instance, LOCATION_ID)
+ assert cluster1 != cluster2
+
+
+def _make_instance_admin_client():
+ from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
+ BigtableInstanceAdminClient,
+ )
+
+ return mock.create_autospec(BigtableInstanceAdminClient)
+
+
+def test_cluster_reload():
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable.enums import StorageType
+ from google.cloud.bigtable.enums import Cluster
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ STORAGE_TYPE_SSD = StorageType.SSD
+ instance = _Instance(INSTANCE_ID, client)
+ cluster = _make_cluster(
+ CLUSTER_ID,
+ instance,
+ location_id=LOCATION_ID,
+ serve_nodes=SERVE_NODES,
+ default_storage_type=STORAGE_TYPE_SSD,
+ kms_key_name=KMS_KEY_NAME,
)
- LOCATION_PATH = "projects/" + PROJECT + "/locations/"
- SERVE_NODES = 5
- OP_ID = 5678
- OP_NAME = "operations/projects/{}/instances/{}/clusters/{}/operations/{}".format(
- PROJECT, INSTANCE_ID, CLUSTER_ID, OP_ID
+
+ # Create response_pb
+ LOCATION_ID_FROM_SERVER = "new-location-id"
+ STATE = Cluster.State.READY
+ SERVE_NODES_FROM_SERVER = 10
+ STORAGE_TYPE_FROM_SERVER = StorageType.HDD
+
+ response_pb = data_v2_pb2.Cluster(
+ name=cluster.name,
+ location=LOCATION_PATH + LOCATION_ID_FROM_SERVER,
+ state=STATE,
+ serve_nodes=SERVE_NODES_FROM_SERVER,
+ default_storage_type=STORAGE_TYPE_FROM_SERVER,
)
- KEY_RING_ID = "key-ring-id"
- CRYPTO_KEY_ID = "crypto-key-id"
- KMS_KEY_NAME = f"{LOCATION_PATH}/keyRings/{KEY_RING_ID}/cryptoKeys/{CRYPTO_KEY_ID}"
-
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.cluster import Cluster
-
- return Cluster
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- @staticmethod
- def _get_target_client_class():
- from google.cloud.bigtable.client import Client
-
- return Client
-
- def _make_client(self, *args, **kwargs):
- return self._get_target_client_class()(*args, **kwargs)
-
- def test_constructor_defaults(self):
- client = _Client(self.PROJECT)
- instance = _Instance(self.INSTANCE_ID, client)
-
- cluster = self._make_one(self.CLUSTER_ID, instance)
- self.assertEqual(cluster.cluster_id, self.CLUSTER_ID)
- self.assertIs(cluster._instance, instance)
- self.assertIsNone(cluster.location_id)
- self.assertIsNone(cluster.state)
- self.assertIsNone(cluster.serve_nodes)
- self.assertIsNone(cluster.default_storage_type)
- self.assertIsNone(cluster.kms_key_name)
-
- def test_constructor_non_default(self):
- from google.cloud.bigtable.enums import StorageType
- from google.cloud.bigtable.enums import Cluster
-
- STATE = Cluster.State.READY
- STORAGE_TYPE_SSD = StorageType.SSD
- client = _Client(self.PROJECT)
- instance = _Instance(self.INSTANCE_ID, client)
-
- cluster = self._make_one(
- self.CLUSTER_ID,
- instance,
- location_id=self.LOCATION_ID,
- _state=STATE,
- serve_nodes=self.SERVE_NODES,
- default_storage_type=STORAGE_TYPE_SSD,
- kms_key_name=self.KMS_KEY_NAME,
- )
- self.assertEqual(cluster.cluster_id, self.CLUSTER_ID)
- self.assertIs(cluster._instance, instance)
- self.assertEqual(cluster.location_id, self.LOCATION_ID)
- self.assertEqual(cluster.state, STATE)
- self.assertEqual(cluster.serve_nodes, self.SERVE_NODES)
- self.assertEqual(cluster.default_storage_type, STORAGE_TYPE_SSD)
- self.assertEqual(cluster.kms_key_name, self.KMS_KEY_NAME)
-
- def test_name_property(self):
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = _Instance(self.INSTANCE_ID, client)
- cluster = self._make_one(self.CLUSTER_ID, instance)
-
- self.assertEqual(cluster.name, self.CLUSTER_NAME)
-
- def test_kms_key_name_property(self):
- client = _Client(self.PROJECT)
- instance = _Instance(self.INSTANCE_ID, client)
-
- cluster = self._make_one(
- self.CLUSTER_ID, instance, kms_key_name=self.KMS_KEY_NAME
- )
-
- self.assertEqual(cluster.kms_key_name, self.KMS_KEY_NAME)
- with pytest.raises(AttributeError):
- cluster.kms_key_name = "I'm read only"
-
- def test_from_pb_success(self):
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
- from google.cloud.bigtable import enums
-
- client = _Client(self.PROJECT)
- instance = _Instance(self.INSTANCE_ID, client)
-
- location = self.LOCATION_PATH + self.LOCATION_ID
- state = enums.Cluster.State.RESIZING
- storage_type = enums.StorageType.SSD
- cluster_pb = data_v2_pb2.Cluster(
- name=self.CLUSTER_NAME,
- location=location,
- state=state,
- serve_nodes=self.SERVE_NODES,
- default_storage_type=storage_type,
- encryption_config=data_v2_pb2.Cluster.EncryptionConfig(
- kms_key_name=self.KMS_KEY_NAME,
- ),
- )
-
- klass = self._get_target_class()
- cluster = klass.from_pb(cluster_pb, instance)
- self.assertIsInstance(cluster, klass)
- self.assertEqual(cluster._instance, instance)
- self.assertEqual(cluster.cluster_id, self.CLUSTER_ID)
- self.assertEqual(cluster.location_id, self.LOCATION_ID)
- self.assertEqual(cluster.state, state)
- self.assertEqual(cluster.serve_nodes, self.SERVE_NODES)
- self.assertEqual(cluster.default_storage_type, storage_type)
- self.assertEqual(cluster.kms_key_name, self.KMS_KEY_NAME)
-
- def test_from_pb_bad_cluster_name(self):
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
-
- bad_cluster_name = "BAD_NAME"
-
- cluster_pb = data_v2_pb2.Cluster(name=bad_cluster_name)
-
- klass = self._get_target_class()
- with self.assertRaises(ValueError):
- klass.from_pb(cluster_pb, None)
-
- def test_from_pb_instance_id_mistmatch(self):
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
-
- ALT_INSTANCE_ID = "ALT_INSTANCE_ID"
- client = _Client(self.PROJECT)
- instance = _Instance(ALT_INSTANCE_ID, client)
-
- self.assertNotEqual(self.INSTANCE_ID, ALT_INSTANCE_ID)
- cluster_pb = data_v2_pb2.Cluster(name=self.CLUSTER_NAME)
-
- klass = self._get_target_class()
- with self.assertRaises(ValueError):
- klass.from_pb(cluster_pb, instance)
-
- def test_from_pb_project_mistmatch(self):
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
-
- ALT_PROJECT = "ALT_PROJECT"
- client = _Client(project=ALT_PROJECT)
- instance = _Instance(self.INSTANCE_ID, client)
-
- self.assertNotEqual(self.PROJECT, ALT_PROJECT)
- cluster_pb = data_v2_pb2.Cluster(name=self.CLUSTER_NAME)
-
- klass = self._get_target_class()
- with self.assertRaises(ValueError):
- klass.from_pb(cluster_pb, instance)
-
- def test___eq__(self):
- client = _Client(self.PROJECT)
- instance = _Instance(self.INSTANCE_ID, client)
- cluster1 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID)
- cluster2 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID)
- self.assertEqual(cluster1, cluster2)
-
- def test___eq__type_differ(self):
- client = _Client(self.PROJECT)
- instance = _Instance(self.INSTANCE_ID, client)
- cluster1 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID)
- cluster2 = object()
- self.assertNotEqual(cluster1, cluster2)
-
- def test___ne__same_value(self):
- client = _Client(self.PROJECT)
- instance = _Instance(self.INSTANCE_ID, client)
- cluster1 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID)
- cluster2 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID)
- comparison_val = cluster1 != cluster2
- self.assertFalse(comparison_val)
-
- def test___ne__(self):
- client = _Client(self.PROJECT)
- instance = _Instance(self.INSTANCE_ID, client)
- cluster1 = self._make_one("cluster_id1", instance, self.LOCATION_ID)
- cluster2 = self._make_one("cluster_id2", instance, self.LOCATION_ID)
- self.assertNotEqual(cluster1, cluster2)
-
- def test_reload(self):
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
- from google.cloud.bigtable.enums import StorageType
- from google.cloud.bigtable.enums import Cluster
-
- api = mock.create_autospec(BigtableInstanceAdminClient)
-
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- STORAGE_TYPE_SSD = StorageType.SSD
- instance = _Instance(self.INSTANCE_ID, client)
- cluster = self._make_one(
- self.CLUSTER_ID,
- instance,
- location_id=self.LOCATION_ID,
- serve_nodes=self.SERVE_NODES,
- default_storage_type=STORAGE_TYPE_SSD,
- kms_key_name=self.KMS_KEY_NAME,
- )
-
- # Create response_pb
- LOCATION_ID_FROM_SERVER = "new-location-id"
- STATE = Cluster.State.READY
- SERVE_NODES_FROM_SERVER = 10
- STORAGE_TYPE_FROM_SERVER = StorageType.HDD
-
- response_pb = data_v2_pb2.Cluster(
- name=cluster.name,
- location=self.LOCATION_PATH + LOCATION_ID_FROM_SERVER,
- state=STATE,
- serve_nodes=SERVE_NODES_FROM_SERVER,
- default_storage_type=STORAGE_TYPE_FROM_SERVER,
- )
-
- # Patch the stub used by the API method.
- client._instance_admin_client = api
- instance_stub = client._instance_admin_client
-
- instance_stub.get_cluster.side_effect = [response_pb]
-
- # Create expected_result.
- expected_result = None # reload() has no return value.
-
- # Check Cluster optional config values before.
- self.assertEqual(cluster.location_id, self.LOCATION_ID)
- self.assertIsNone(cluster.state)
- self.assertEqual(cluster.serve_nodes, self.SERVE_NODES)
- self.assertEqual(cluster.default_storage_type, STORAGE_TYPE_SSD)
-
- # Perform the method and check the result.
- result = cluster.reload()
- self.assertEqual(result, expected_result)
- self.assertEqual(cluster.location_id, LOCATION_ID_FROM_SERVER)
- self.assertEqual(cluster.state, STATE)
- self.assertEqual(cluster.serve_nodes, SERVE_NODES_FROM_SERVER)
- self.assertEqual(cluster.default_storage_type, STORAGE_TYPE_FROM_SERVER)
- self.assertEqual(cluster.kms_key_name, None)
-
- def test_exists(self):
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
- from google.cloud.bigtable.instance import Instance
- from google.api_core import exceptions
-
- instance_api = mock.create_autospec(BigtableInstanceAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = Instance(self.INSTANCE_ID, client)
-
- # Create response_pb
- cluster_name = client.instance_admin_client.cluster_path(
- self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID
- )
- response_pb = data_v2_pb2.Cluster(name=cluster_name)
-
- # Patch the stub used by the API method.
- client._instance_admin_client = instance_api
- bigtable_instance_stub = client._instance_admin_client
-
- bigtable_instance_stub.get_cluster.side_effect = [
- response_pb,
- exceptions.NotFound("testing"),
- exceptions.BadRequest("testing"),
- ]
-
- # Perform the method and check the result.
- non_existing_cluster_id = "cluster-id-2"
- alt_cluster_1 = self._make_one(self.CLUSTER_ID, instance)
- alt_cluster_2 = self._make_one(non_existing_cluster_id, instance)
- self.assertTrue(alt_cluster_1.exists())
- self.assertFalse(alt_cluster_2.exists())
- with self.assertRaises(exceptions.BadRequest):
- alt_cluster_1.exists()
-
- def test_create(self):
- import datetime
- from google.longrunning import operations_pb2
- from google.protobuf.any_pb2 import Any
- from google.cloud.bigtable_admin_v2.types import (
- bigtable_instance_admin as messages_v2_pb2,
- )
- from google.cloud._helpers import _datetime_to_pb_timestamp
- from google.cloud.bigtable.instance import Instance
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2
- from google.cloud.bigtable.enums import StorageType
-
- NOW = datetime.datetime.utcnow()
- NOW_PB = _datetime_to_pb_timestamp(NOW)
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- STORAGE_TYPE_SSD = StorageType.SSD
- LOCATION = self.LOCATION_PATH + self.LOCATION_ID
- instance = Instance(self.INSTANCE_ID, client)
- cluster = self._make_one(
- self.CLUSTER_ID,
- instance,
- location_id=self.LOCATION_ID,
- serve_nodes=self.SERVE_NODES,
- default_storage_type=STORAGE_TYPE_SSD,
- )
- expected_request_cluster = instance_v2_pb2.Cluster(
- location=LOCATION,
- serve_nodes=cluster.serve_nodes,
- default_storage_type=cluster.default_storage_type,
- )
- expected_request = {
- "request": {
- "parent": instance.name,
- "cluster_id": self.CLUSTER_ID,
- "cluster": expected_request_cluster,
- }
- }
- name = instance.name
- metadata = messages_v2_pb2.CreateClusterMetadata(request_time=NOW_PB)
- type_url = "type.googleapis.com/{}".format(
- messages_v2_pb2.CreateClusterMetadata._meta._pb.DESCRIPTOR.full_name
- )
- response_pb = operations_pb2.Operation(
- name=self.OP_NAME,
- metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()),
- )
-
- # Patch the stub used by the API method.
- api = mock.create_autospec(BigtableInstanceAdminClient)
- api.common_location_path.return_value = LOCATION
- client._instance_admin_client = api
- cluster._instance._client = client
- cluster._instance._client.instance_admin_client.instance_path.return_value = (
- name
- )
- client._instance_admin_client.create_cluster.return_value = response_pb
- # Perform the method and check the result.
- cluster.create()
-
- actual_request = client._instance_admin_client.create_cluster.call_args_list[
- 0
- ].kwargs
- self.assertEqual(actual_request, expected_request)
-
- def test_create_w_cmek(self):
- import datetime
- from google.longrunning import operations_pb2
- from google.protobuf.any_pb2 import Any
- from google.cloud.bigtable_admin_v2.types import (
- bigtable_instance_admin as messages_v2_pb2,
- )
- from google.cloud._helpers import _datetime_to_pb_timestamp
- from google.cloud.bigtable.instance import Instance
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2
- from google.cloud.bigtable.enums import StorageType
-
- NOW = datetime.datetime.utcnow()
- NOW_PB = _datetime_to_pb_timestamp(NOW)
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- STORAGE_TYPE_SSD = StorageType.SSD
- LOCATION = self.LOCATION_PATH + self.LOCATION_ID
- instance = Instance(self.INSTANCE_ID, client)
- cluster = self._make_one(
- self.CLUSTER_ID,
- instance,
- location_id=self.LOCATION_ID,
- serve_nodes=self.SERVE_NODES,
- default_storage_type=STORAGE_TYPE_SSD,
- kms_key_name=self.KMS_KEY_NAME,
- )
- expected_request_cluster = instance_v2_pb2.Cluster(
- location=LOCATION,
- serve_nodes=cluster.serve_nodes,
- default_storage_type=cluster.default_storage_type,
- encryption_config=instance_v2_pb2.Cluster.EncryptionConfig(
- kms_key_name=self.KMS_KEY_NAME,
- ),
- )
- expected_request = {
- "request": {
- "parent": instance.name,
- "cluster_id": self.CLUSTER_ID,
- "cluster": expected_request_cluster,
- }
- }
- name = instance.name
- metadata = messages_v2_pb2.CreateClusterMetadata(request_time=NOW_PB)
- type_url = "type.googleapis.com/{}".format(
- messages_v2_pb2.CreateClusterMetadata._meta._pb.DESCRIPTOR.full_name
- )
- response_pb = operations_pb2.Operation(
- name=self.OP_NAME,
- metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()),
- )
-
- # Patch the stub used by the API method.
- api = mock.create_autospec(BigtableInstanceAdminClient)
- api.common_location_path.return_value = LOCATION
- client._instance_admin_client = api
- cluster._instance._client = client
- cluster._instance._client.instance_admin_client.instance_path.return_value = (
- name
- )
- client._instance_admin_client.create_cluster.return_value = response_pb
- # Perform the method and check the result.
- cluster.create()
-
- actual_request = client._instance_admin_client.create_cluster.call_args_list[
- 0
- ].kwargs
- self.assertEqual(actual_request, expected_request)
-
- def test_update(self):
- import datetime
- from google.longrunning import operations_pb2
- from google.protobuf.any_pb2 import Any
- from google.cloud._helpers import _datetime_to_pb_timestamp
- from google.cloud.bigtable_admin_v2.types import (
- bigtable_instance_admin as messages_v2_pb2,
- )
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.cloud.bigtable.enums import StorageType
-
- NOW = datetime.datetime.utcnow()
- NOW_PB = _datetime_to_pb_timestamp(NOW)
-
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- STORAGE_TYPE_SSD = StorageType.SSD
- instance = _Instance(self.INSTANCE_ID, client)
- cluster = self._make_one(
- self.CLUSTER_ID,
- instance,
- location_id=self.LOCATION_ID,
- serve_nodes=self.SERVE_NODES,
- default_storage_type=STORAGE_TYPE_SSD,
- )
- # Create expected_request
- expected_request = {
- "request": {
- "name": "projects/project/instances/instance-id/clusters/cluster-id",
- "serve_nodes": 5,
- "location": None,
- }
- }
- metadata = messages_v2_pb2.UpdateClusterMetadata(request_time=NOW_PB)
- type_url = "type.googleapis.com/{}".format(
- messages_v2_pb2.UpdateClusterMetadata._meta._pb.DESCRIPTOR.full_name
- )
- response_pb = operations_pb2.Operation(
- name=self.OP_NAME,
- metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()),
- )
-
- # Patch the stub used by the API method.
- api = mock.create_autospec(BigtableInstanceAdminClient)
- client._instance_admin_client = api
- cluster._instance._client.instance_admin_client.cluster_path.return_value = (
- "projects/project/instances/instance-id/clusters/cluster-id"
- )
- # Perform the method and check the result.
- client._instance_admin_client.update_cluster.return_value = response_pb
- cluster.update()
-
- actual_request = client._instance_admin_client.update_cluster.call_args_list[
- 0
- ].kwargs
-
- self.assertEqual(actual_request, expected_request)
-
- def test_delete(self):
- from google.protobuf import empty_pb2
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
-
- api = mock.create_autospec(BigtableInstanceAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = _Instance(self.INSTANCE_ID, client)
- cluster = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID)
-
- # Create response_pb
- response_pb = empty_pb2.Empty()
-
- # Patch the stub used by the API method.
- client._instance_admin_client = api
- instance_admin_client = client._instance_admin_client
- instance_stub = instance_admin_client
- instance_stub.delete_cluster.side_effect = [response_pb]
-
- # Create expected_result.
- expected_result = None # delete() has no return value.
-
- # Perform the method and check the result.
- result = cluster.delete()
-
- self.assertEqual(result, expected_result)
+
+ # Patch the stub used by the API method.
+ api = client._instance_admin_client = _make_instance_admin_client()
+ api.get_cluster.side_effect = [response_pb]
+
+ # Create expected_result.
+ expected_result = None # reload() has no return value.
+
+ # Check Cluster optional config values before.
+ assert cluster.location_id == LOCATION_ID
+ assert cluster.state is None
+ assert cluster.serve_nodes == SERVE_NODES
+ assert cluster.default_storage_type == STORAGE_TYPE_SSD
+
+ # Perform the method and check the result.
+ result = cluster.reload()
+ assert result == expected_result
+ assert cluster.location_id == LOCATION_ID_FROM_SERVER
+ assert cluster.state == STATE
+ assert cluster.serve_nodes == SERVE_NODES_FROM_SERVER
+ assert cluster.default_storage_type == STORAGE_TYPE_FROM_SERVER
+ assert cluster.kms_key_name is None
+
+ api.get_cluster.assert_called_once_with(request={"name": cluster.name})
+
+
+def test_cluster_exists_hit():
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable.instance import Instance
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = Instance(INSTANCE_ID, client)
+
+ cluster_name = client.instance_admin_client.cluster_path(
+ PROJECT, INSTANCE_ID, CLUSTER_ID
+ )
+ response_pb = data_v2_pb2.Cluster(name=cluster_name)
+
+ api = client._instance_admin_client = _make_instance_admin_client()
+ api.get_cluster.return_value = response_pb
+
+ cluster = _make_cluster(CLUSTER_ID, instance)
+
+ assert cluster.exists()
+
+ api.get_cluster.assert_called_once_with(request={"name": cluster.name})
+
+
+def test_cluster_exists_miss():
+ from google.cloud.bigtable.instance import Instance
+ from google.api_core import exceptions
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = Instance(INSTANCE_ID, client)
+
+ api = client._instance_admin_client = _make_instance_admin_client()
+ api.get_cluster.side_effect = exceptions.NotFound("testing")
+
+ non_existing_cluster_id = "nonesuch-cluster-2"
+ cluster = _make_cluster(non_existing_cluster_id, instance)
+
+ assert not cluster.exists()
+
+ api.get_cluster.assert_called_once_with(request={"name": cluster.name})
+
+
+def test_cluster_exists_w_error():
+ from google.cloud.bigtable.instance import Instance
+ from google.api_core import exceptions
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = Instance(INSTANCE_ID, client)
+
+ api = client._instance_admin_client = _make_instance_admin_client()
+ api.get_cluster.side_effect = exceptions.BadRequest("testing")
+
+ cluster = _make_cluster(CLUSTER_ID, instance)
+
+ with pytest.raises(exceptions.BadRequest):
+ cluster.exists()
+
+ api.get_cluster.assert_called_once_with(request={"name": cluster.name})
+
+
+def test_cluster_create():
+ import datetime
+ from google.longrunning import operations_pb2
+ from google.protobuf.any_pb2 import Any
+ from google.cloud.bigtable_admin_v2.types import (
+ bigtable_instance_admin as messages_v2_pb2,
+ )
+ from google.cloud._helpers import _datetime_to_pb_timestamp
+ from google.cloud.bigtable.instance import Instance
+ from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2
+ from google.cloud.bigtable.enums import StorageType
+
+ NOW = datetime.datetime.utcnow()
+ NOW_PB = _datetime_to_pb_timestamp(NOW)
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ STORAGE_TYPE_SSD = StorageType.SSD
+ LOCATION = LOCATION_PATH + LOCATION_ID
+ instance = Instance(INSTANCE_ID, client)
+ cluster = _make_cluster(
+ CLUSTER_ID,
+ instance,
+ location_id=LOCATION_ID,
+ serve_nodes=SERVE_NODES,
+ default_storage_type=STORAGE_TYPE_SSD,
+ )
+ metadata = messages_v2_pb2.CreateClusterMetadata(request_time=NOW_PB)
+ type_url = "type.googleapis.com/{}".format(
+ messages_v2_pb2.CreateClusterMetadata._meta._pb.DESCRIPTOR.full_name
+ )
+ response_pb = operations_pb2.Operation(
+ name=OP_NAME,
+ metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()),
+ )
+
+ api = client._instance_admin_client = _make_instance_admin_client()
+ api.common_location_path.return_value = LOCATION
+ api.instance_path.return_value = instance.name
+ api.create_cluster.return_value = response_pb
+
+ cluster.create()
+
+ expected_request_cluster = instance_v2_pb2.Cluster(
+ location=LOCATION,
+ serve_nodes=cluster.serve_nodes,
+ default_storage_type=cluster.default_storage_type,
+ )
+ expected_request = {
+ "parent": instance.name,
+ "cluster_id": CLUSTER_ID,
+ "cluster": expected_request_cluster,
+ }
+ api.create_cluster.assert_called_once_with(request=expected_request)
+
+
+def test_cluster_create_w_cmek():
+ import datetime
+ from google.longrunning import operations_pb2
+ from google.protobuf.any_pb2 import Any
+ from google.cloud.bigtable_admin_v2.types import (
+ bigtable_instance_admin as messages_v2_pb2,
+ )
+ from google.cloud._helpers import _datetime_to_pb_timestamp
+ from google.cloud.bigtable.instance import Instance
+ from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2
+ from google.cloud.bigtable.enums import StorageType
+
+ NOW = datetime.datetime.utcnow()
+ NOW_PB = _datetime_to_pb_timestamp(NOW)
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ STORAGE_TYPE_SSD = StorageType.SSD
+ LOCATION = LOCATION_PATH + LOCATION_ID
+ instance = Instance(INSTANCE_ID, client)
+ cluster = _make_cluster(
+ CLUSTER_ID,
+ instance,
+ location_id=LOCATION_ID,
+ serve_nodes=SERVE_NODES,
+ default_storage_type=STORAGE_TYPE_SSD,
+ kms_key_name=KMS_KEY_NAME,
+ )
+ name = instance.name
+ metadata = messages_v2_pb2.CreateClusterMetadata(request_time=NOW_PB)
+ type_url = "type.googleapis.com/{}".format(
+ messages_v2_pb2.CreateClusterMetadata._meta._pb.DESCRIPTOR.full_name
+ )
+ response_pb = operations_pb2.Operation(
+ name=OP_NAME,
+ metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()),
+ )
+
+ api = client._instance_admin_client = _make_instance_admin_client()
+ api.common_location_path.return_value = LOCATION
+ api.instance_path.return_value = name
+ api.create_cluster.return_value = response_pb
+
+ cluster.create()
+
+ expected_request_cluster = instance_v2_pb2.Cluster(
+ location=LOCATION,
+ serve_nodes=cluster.serve_nodes,
+ default_storage_type=cluster.default_storage_type,
+ encryption_config=instance_v2_pb2.Cluster.EncryptionConfig(
+ kms_key_name=KMS_KEY_NAME,
+ ),
+ )
+ expected_request = {
+ "parent": instance.name,
+ "cluster_id": CLUSTER_ID,
+ "cluster": expected_request_cluster,
+ }
+ api.create_cluster.assert_called_once_with(request=expected_request)
+
+
+def test_cluster_update():
+ import datetime
+ from google.longrunning import operations_pb2
+ from google.protobuf.any_pb2 import Any
+ from google.cloud._helpers import _datetime_to_pb_timestamp
+ from google.cloud.bigtable_admin_v2.types import (
+ bigtable_instance_admin as messages_v2_pb2,
+ )
+ from google.cloud.bigtable.enums import StorageType
+
+ NOW = datetime.datetime.utcnow()
+ NOW_PB = _datetime_to_pb_timestamp(NOW)
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ STORAGE_TYPE_SSD = StorageType.SSD
+ instance = _Instance(INSTANCE_ID, client)
+ cluster = _make_cluster(
+ CLUSTER_ID,
+ instance,
+ location_id=LOCATION_ID,
+ serve_nodes=SERVE_NODES,
+ default_storage_type=STORAGE_TYPE_SSD,
+ )
+ metadata = messages_v2_pb2.UpdateClusterMetadata(request_time=NOW_PB)
+ type_url = "type.googleapis.com/{}".format(
+ messages_v2_pb2.UpdateClusterMetadata._meta._pb.DESCRIPTOR.full_name
+ )
+ response_pb = operations_pb2.Operation(
+ name=OP_NAME,
+ metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()),
+ )
+
+ api = client._instance_admin_client = _make_instance_admin_client()
+ api.cluster_path.return_value = (
+ "projects/project/instances/instance-id/clusters/cluster-id"
+ )
+ api.update_cluster.return_value = response_pb
+
+ cluster.update()
+
+ expected_request = {
+ "name": "projects/project/instances/instance-id/clusters/cluster-id",
+ "serve_nodes": 5,
+ "location": None,
+ }
+ api.update_cluster.assert_called_once_with(request=expected_request)
+
+
+def test_cluster_delete():
+ from google.protobuf import empty_pb2
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = _Instance(INSTANCE_ID, client)
+ cluster = _make_cluster(CLUSTER_ID, instance, LOCATION_ID)
+
+ api = client._instance_admin_client = _make_instance_admin_client()
+ api.delete_cluster.side_effect = [empty_pb2.Empty()]
+
+ # Perform the method and check the result.
+ assert cluster.delete() is None
+
+ api.delete_cluster.assert_called_once_with(request={"name": cluster.name})
class _Instance(object):
diff --git a/tests/unit/test_column_family.py b/tests/unit/test_column_family.py
index 601c37cf5..9d4632e2a 100644
--- a/tests/unit/test_column_family.py
+++ b/tests/unit/test_column_family.py
@@ -12,609 +12,607 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import unittest
-
import mock
+import pytest
from ._testing import _make_credentials
-class TestMaxVersionsGCRule(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.column_family import MaxVersionsGCRule
-
- return MaxVersionsGCRule
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- def test___eq__type_differ(self):
- gc_rule1 = self._make_one(10)
- self.assertNotEqual(gc_rule1, object())
- self.assertEqual(gc_rule1, mock.ANY)
-
- def test___eq__same_value(self):
- gc_rule1 = self._make_one(2)
- gc_rule2 = self._make_one(2)
- self.assertEqual(gc_rule1, gc_rule2)
-
- def test___ne__same_value(self):
- gc_rule1 = self._make_one(99)
- gc_rule2 = self._make_one(99)
- comparison_val = gc_rule1 != gc_rule2
- self.assertFalse(comparison_val)
-
- def test_to_pb(self):
- max_num_versions = 1337
- gc_rule = self._make_one(max_num_versions=max_num_versions)
- pb_val = gc_rule.to_pb()
- expected = _GcRulePB(max_num_versions=max_num_versions)
- self.assertEqual(pb_val, expected)
-
-
-class TestMaxAgeGCRule(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.column_family import MaxAgeGCRule
-
- return MaxAgeGCRule
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- def test___eq__type_differ(self):
- max_age = object()
- gc_rule1 = self._make_one(max_age=max_age)
- gc_rule2 = object()
- self.assertNotEqual(gc_rule1, gc_rule2)
-
- def test___eq__same_value(self):
- max_age = object()
- gc_rule1 = self._make_one(max_age=max_age)
- gc_rule2 = self._make_one(max_age=max_age)
- self.assertEqual(gc_rule1, gc_rule2)
+def _make_max_versions_gc_rule(*args, **kwargs):
+ from google.cloud.bigtable.column_family import MaxVersionsGCRule
+
+ return MaxVersionsGCRule(*args, **kwargs)
+
+
+def test_max_versions_gc_rule___eq__type_differ():
+ gc_rule1 = _make_max_versions_gc_rule(10)
+ assert gc_rule1 != object()
+ assert gc_rule1 == mock.ANY
+
+
+def test_max_versions_gc_rule___eq__same_value():
+ gc_rule1 = _make_max_versions_gc_rule(2)
+ gc_rule2 = _make_max_versions_gc_rule(2)
+ assert gc_rule1 == gc_rule2
+
+
+def test_max_versions_gc_rule___ne__same_value():
+ gc_rule1 = _make_max_versions_gc_rule(99)
+ gc_rule2 = _make_max_versions_gc_rule(99)
+ assert not (gc_rule1 != gc_rule2)
+
+
+def test_max_versions_gc_rule_to_pb():
+ max_num_versions = 1337
+ gc_rule = _make_max_versions_gc_rule(max_num_versions=max_num_versions)
+ pb_val = gc_rule.to_pb()
+ expected = _GcRulePB(max_num_versions=max_num_versions)
+ assert pb_val == expected
+
+
+def _make_max_age_gc_rule(*args, **kwargs):
+ from google.cloud.bigtable.column_family import MaxAgeGCRule
+
+ return MaxAgeGCRule(*args, **kwargs)
+
+
+def test_max_age_gc_rule___eq__type_differ():
+ max_age = object()
+ gc_rule1 = _make_max_age_gc_rule(max_age=max_age)
+ gc_rule2 = object()
+ assert gc_rule1 != gc_rule2
+
+
+def test_max_age_gc_rule___eq__same_value():
+ max_age = object()
+ gc_rule1 = _make_max_age_gc_rule(max_age=max_age)
+ gc_rule2 = _make_max_age_gc_rule(max_age=max_age)
+ assert gc_rule1 == gc_rule2
+
+
+def test_max_age_gc_rule___ne__same_value():
+ max_age = object()
+ gc_rule1 = _make_max_age_gc_rule(max_age=max_age)
+ gc_rule2 = _make_max_age_gc_rule(max_age=max_age)
+ assert not (gc_rule1 != gc_rule2)
+
+
+def test_max_age_gc_rule_to_pb():
+ import datetime
+ from google.protobuf import duration_pb2
+
+ max_age = datetime.timedelta(seconds=1)
+ duration = duration_pb2.Duration(seconds=1)
+ gc_rule = _make_max_age_gc_rule(max_age=max_age)
+ pb_val = gc_rule.to_pb()
+ assert pb_val == _GcRulePB(max_age=duration)
+
+
+def _make_gc_rule_union(*args, **kwargs):
+ from google.cloud.bigtable.column_family import GCRuleUnion
+
+ return GCRuleUnion(*args, **kwargs)
+
+
+def test_gc_rule_union_constructor():
+ rules = object()
+ rule_union = _make_gc_rule_union(rules)
+ assert rule_union.rules is rules
+
+
+def test_gc_rule_union___eq__():
+ rules = object()
+ gc_rule1 = _make_gc_rule_union(rules)
+ gc_rule2 = _make_gc_rule_union(rules)
+ assert gc_rule1 == gc_rule2
+
+
+def test_gc_rule_union___eq__type_differ():
+ rules = object()
+ gc_rule1 = _make_gc_rule_union(rules)
+ gc_rule2 = object()
+ assert gc_rule1 != gc_rule2
+
+
+def test_gc_rule_union___ne__same_value():
+ rules = object()
+ gc_rule1 = _make_gc_rule_union(rules)
+ gc_rule2 = _make_gc_rule_union(rules)
+ assert not (gc_rule1 != gc_rule2)
+
+
+def test_gc_rule_union_to_pb():
+ import datetime
+ from google.protobuf import duration_pb2
+ from google.cloud.bigtable.column_family import MaxAgeGCRule
+ from google.cloud.bigtable.column_family import MaxVersionsGCRule
+
+ max_num_versions = 42
+ rule1 = MaxVersionsGCRule(max_num_versions)
+ pb_rule1 = _GcRulePB(max_num_versions=max_num_versions)
+
+ max_age = datetime.timedelta(seconds=1)
+ rule2 = MaxAgeGCRule(max_age)
+ pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1))
+
+ rule3 = _make_gc_rule_union(rules=[rule1, rule2])
+ pb_rule3 = _GcRulePB(union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2]))
+
+ gc_rule_pb = rule3.to_pb()
+ assert gc_rule_pb == pb_rule3
+
+
+def test_gc_rule_union_to_pb_nested():
+ import datetime
+ from google.protobuf import duration_pb2
+ from google.cloud.bigtable.column_family import MaxAgeGCRule
+ from google.cloud.bigtable.column_family import MaxVersionsGCRule
+
+ max_num_versions1 = 42
+ rule1 = MaxVersionsGCRule(max_num_versions1)
+ pb_rule1 = _GcRulePB(max_num_versions=max_num_versions1)
+
+ max_age = datetime.timedelta(seconds=1)
+ rule2 = MaxAgeGCRule(max_age)
+ pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1))
+
+ rule3 = _make_gc_rule_union(rules=[rule1, rule2])
+ pb_rule3 = _GcRulePB(union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2]))
+
+ max_num_versions2 = 1337
+ rule4 = MaxVersionsGCRule(max_num_versions2)
+ pb_rule4 = _GcRulePB(max_num_versions=max_num_versions2)
+
+ rule5 = _make_gc_rule_union(rules=[rule3, rule4])
+ pb_rule5 = _GcRulePB(union=_GcRuleUnionPB(rules=[pb_rule3, pb_rule4]))
+
+ gc_rule_pb = rule5.to_pb()
+ assert gc_rule_pb == pb_rule5
+
+
+def _make_gc_rule_intersection(*args, **kwargs):
+ from google.cloud.bigtable.column_family import GCRuleIntersection
+
+ return GCRuleIntersection(*args, **kwargs)
+
+
+def test_gc_rule_intersection_constructor():
+ rules = object()
+ rule_intersection = _make_gc_rule_intersection(rules)
+ assert rule_intersection.rules is rules
+
+
+def test_gc_rule_intersection___eq__():
+ rules = object()
+ gc_rule1 = _make_gc_rule_intersection(rules)
+ gc_rule2 = _make_gc_rule_intersection(rules)
+ assert gc_rule1 == gc_rule2
+
+
+def test_gc_rule_intersection___eq__type_differ():
+ rules = object()
+ gc_rule1 = _make_gc_rule_intersection(rules)
+ gc_rule2 = object()
+ assert gc_rule1 != gc_rule2
+
+
+def test_gc_rule_intersection___ne__same_value():
+ rules = object()
+ gc_rule1 = _make_gc_rule_intersection(rules)
+ gc_rule2 = _make_gc_rule_intersection(rules)
+ assert not (gc_rule1 != gc_rule2)
+
+
+def test_gc_rule_intersection_to_pb():
+ import datetime
+ from google.protobuf import duration_pb2
+ from google.cloud.bigtable.column_family import MaxAgeGCRule
+ from google.cloud.bigtable.column_family import MaxVersionsGCRule
+
+ max_num_versions = 42
+ rule1 = MaxVersionsGCRule(max_num_versions)
+ pb_rule1 = _GcRulePB(max_num_versions=max_num_versions)
+
+ max_age = datetime.timedelta(seconds=1)
+ rule2 = MaxAgeGCRule(max_age)
+ pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1))
+
+ rule3 = _make_gc_rule_intersection(rules=[rule1, rule2])
+ pb_rule3 = _GcRulePB(intersection=_GcRuleIntersectionPB(rules=[pb_rule1, pb_rule2]))
+
+ gc_rule_pb = rule3.to_pb()
+ assert gc_rule_pb == pb_rule3
+
+
+def test_gc_rule_intersection_to_pb_nested():
+ import datetime
+ from google.protobuf import duration_pb2
+ from google.cloud.bigtable.column_family import MaxAgeGCRule
+ from google.cloud.bigtable.column_family import MaxVersionsGCRule
+
+ max_num_versions1 = 42
+ rule1 = MaxVersionsGCRule(max_num_versions1)
+ pb_rule1 = _GcRulePB(max_num_versions=max_num_versions1)
+
+ max_age = datetime.timedelta(seconds=1)
+ rule2 = MaxAgeGCRule(max_age)
+ pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1))
+
+ rule3 = _make_gc_rule_intersection(rules=[rule1, rule2])
+ pb_rule3 = _GcRulePB(intersection=_GcRuleIntersectionPB(rules=[pb_rule1, pb_rule2]))
+
+ max_num_versions2 = 1337
+ rule4 = MaxVersionsGCRule(max_num_versions2)
+ pb_rule4 = _GcRulePB(max_num_versions=max_num_versions2)
+
+ rule5 = _make_gc_rule_intersection(rules=[rule3, rule4])
+ pb_rule5 = _GcRulePB(intersection=_GcRuleIntersectionPB(rules=[pb_rule3, pb_rule4]))
+
+ gc_rule_pb = rule5.to_pb()
+ assert gc_rule_pb == pb_rule5
+
+
+def _make_column_family(*args, **kwargs):
+ from google.cloud.bigtable.column_family import ColumnFamily
+
+ return ColumnFamily(*args, **kwargs)
+
+
+def _make_client(*args, **kwargs):
+ from google.cloud.bigtable.client import Client
+
+ return Client(*args, **kwargs)
+
+
+def test_column_family_constructor():
+ column_family_id = u"column-family-id"
+ table = object()
+ gc_rule = object()
+ column_family = _make_column_family(column_family_id, table, gc_rule=gc_rule)
+
+ assert column_family.column_family_id == column_family_id
+ assert column_family._table is table
+ assert column_family.gc_rule is gc_rule
+
+
+def test_column_family_name_property():
+ column_family_id = u"column-family-id"
+ table_name = "table_name"
+ table = _Table(table_name)
+ column_family = _make_column_family(column_family_id, table)
+
+ expected_name = table_name + "/columnFamilies/" + column_family_id
+ assert column_family.name == expected_name
+
+
+def test_column_family___eq__():
+ column_family_id = "column_family_id"
+ table = object()
+ gc_rule = object()
+ column_family1 = _make_column_family(column_family_id, table, gc_rule=gc_rule)
+ column_family2 = _make_column_family(column_family_id, table, gc_rule=gc_rule)
+ assert column_family1 == column_family2
+
+
+def test_column_family___eq__type_differ():
+ column_family1 = _make_column_family("column_family_id", None)
+ column_family2 = object()
+ assert column_family1 != column_family2
+
+
+def test_column_family___ne__same_value():
+ column_family_id = "column_family_id"
+ table = object()
+ gc_rule = object()
+ column_family1 = _make_column_family(column_family_id, table, gc_rule=gc_rule)
+ column_family2 = _make_column_family(column_family_id, table, gc_rule=gc_rule)
+ assert not (column_family1 != column_family2)
+
+
+def test_column_family___ne__():
+ column_family1 = _make_column_family("column_family_id1", None)
+ column_family2 = _make_column_family("column_family_id2", None)
+ assert column_family1 != column_family2
+
+
+def test_column_family_to_pb_no_rules():
+ column_family = _make_column_family("column_family_id", None)
+ pb_val = column_family.to_pb()
+ expected = _ColumnFamilyPB()
+ assert pb_val == expected
+
+
+def test_column_family_to_pb_with_rule():
+ from google.cloud.bigtable.column_family import MaxVersionsGCRule
+
+ gc_rule = MaxVersionsGCRule(1)
+ column_family = _make_column_family("column_family_id", None, gc_rule=gc_rule)
+ pb_val = column_family.to_pb()
+ expected = _ColumnFamilyPB(gc_rule=gc_rule.to_pb())
+ assert pb_val == expected
+
+
+def _create_test_helper(gc_rule=None):
+ from google.cloud.bigtable_admin_v2.types import (
+ bigtable_table_admin as table_admin_v2_pb2,
+ )
+ from tests.unit._testing import _FakeStub
+ from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
+ BigtableTableAdminClient,
+ )
+
+ project_id = "project-id"
+ zone = "zone"
+ cluster_id = "cluster-id"
+ table_id = "table-id"
+ column_family_id = "column-family-id"
+ table_name = (
+ "projects/"
+ + project_id
+ + "/zones/"
+ + zone
+ + "/clusters/"
+ + cluster_id
+ + "/tables/"
+ + table_id
+ )
+
+ api = mock.create_autospec(BigtableTableAdminClient)
+
+ credentials = _make_credentials()
+ client = _make_client(project=project_id, credentials=credentials, admin=True)
+ table = _Table(table_name, client=client)
+ column_family = _make_column_family(column_family_id, table, gc_rule=gc_rule)
+
+ # Create request_pb
+ if gc_rule is None:
+ column_family_pb = _ColumnFamilyPB()
+ else:
+ column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb())
+ request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name)
+ modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification()
+ modification.id = column_family_id
+ modification.create = column_family_pb
+ request_pb.modifications.append(modification)
+
+ # Create response_pb
+ response_pb = _ColumnFamilyPB()
+
+ # Patch the stub used by the API method.
+ stub = _FakeStub(response_pb)
+ client._table_admin_client = api
+ client._table_admin_client.transport.create = stub
+
+ # Create expected_result.
+ expected_result = None # create() has no return value.
+
+ # Perform the method and check the result.
+ assert stub.results == (response_pb,)
+ result = column_family.create()
+ assert result == expected_result
+
+
+def test_column_family_create():
+ _create_test_helper(gc_rule=None)
+
+
+def test_column_family_create_with_gc_rule():
+ from google.cloud.bigtable.column_family import MaxVersionsGCRule
+
+ gc_rule = MaxVersionsGCRule(1337)
+ _create_test_helper(gc_rule=gc_rule)
+
+
+def _update_test_helper(gc_rule=None):
+ from tests.unit._testing import _FakeStub
+ from google.cloud.bigtable_admin_v2.types import (
+ bigtable_table_admin as table_admin_v2_pb2,
+ )
+ from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
+ BigtableTableAdminClient,
+ )
+
+ project_id = "project-id"
+ zone = "zone"
+ cluster_id = "cluster-id"
+ table_id = "table-id"
+ column_family_id = "column-family-id"
+ table_name = (
+ "projects/"
+ + project_id
+ + "/zones/"
+ + zone
+ + "/clusters/"
+ + cluster_id
+ + "/tables/"
+ + table_id
+ )
+
+ api = mock.create_autospec(BigtableTableAdminClient)
+ credentials = _make_credentials()
+ client = _make_client(project=project_id, credentials=credentials, admin=True)
+ table = _Table(table_name, client=client)
+ column_family = _make_column_family(column_family_id, table, gc_rule=gc_rule)
+
+ # Create request_pb
+ if gc_rule is None:
+ column_family_pb = _ColumnFamilyPB()
+ else:
+ column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb())
+ request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name)
+ modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification()
+ modification.id = column_family_id
+ modification.update = column_family_pb
+ request_pb.modifications.append(modification)
+
+ # Create response_pb
+ response_pb = _ColumnFamilyPB()
+
+ # Patch the stub used by the API method.
+ stub = _FakeStub(response_pb)
+ client._table_admin_client = api
+ client._table_admin_client.transport.update = stub
+
+ # Create expected_result.
+ expected_result = None # update() has no return value.
+
+ # Perform the method and check the result.
+ assert stub.results == (response_pb,)
+ result = column_family.update()
+ assert result == expected_result
+
+
+def test_column_family_update():
+ _update_test_helper(gc_rule=None)
+
+
+def test_column_family_update_with_gc_rule():
+ from google.cloud.bigtable.column_family import MaxVersionsGCRule
+
+ gc_rule = MaxVersionsGCRule(1337)
+ _update_test_helper(gc_rule=gc_rule)
+
+
+def test_column_family_delete():
+ from google.protobuf import empty_pb2
+ from google.cloud.bigtable_admin_v2.types import (
+ bigtable_table_admin as table_admin_v2_pb2,
+ )
+ from tests.unit._testing import _FakeStub
+ from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
+ BigtableTableAdminClient,
+ )
+
+ project_id = "project-id"
+ zone = "zone"
+ cluster_id = "cluster-id"
+ table_id = "table-id"
+ column_family_id = "column-family-id"
+ table_name = (
+ "projects/"
+ + project_id
+ + "/zones/"
+ + zone
+ + "/clusters/"
+ + cluster_id
+ + "/tables/"
+ + table_id
+ )
+
+ api = mock.create_autospec(BigtableTableAdminClient)
+ credentials = _make_credentials()
+ client = _make_client(project=project_id, credentials=credentials, admin=True)
+ table = _Table(table_name, client=client)
+ column_family = _make_column_family(column_family_id, table)
+
+ # Create request_pb
+ request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name)
+ modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification(
+ id=column_family_id, drop=True
+ )
+ request_pb.modifications.append(modification)
+
+ # Create response_pb
+ response_pb = empty_pb2.Empty()
+
+ # Patch the stub used by the API method.
+ stub = _FakeStub(response_pb)
+ client._table_admin_client = api
+ client._table_admin_client.transport.delete = stub
+
+ # Create expected_result.
+ expected_result = None # delete() has no return value.
+
+ # Perform the method and check the result.
+ assert stub.results == (response_pb,)
+ result = column_family.delete()
+ assert result == expected_result
+
+
+def test__gc_rule_from_pb_empty():
+ from google.cloud.bigtable.column_family import _gc_rule_from_pb
+
+ gc_rule_pb = _GcRulePB()
+ assert _gc_rule_from_pb(gc_rule_pb) is None
+
+
+def test__gc_rule_from_pb_max_num_versions():
+ from google.cloud.bigtable.column_family import _gc_rule_from_pb
+ from google.cloud.bigtable.column_family import MaxVersionsGCRule
+
+ orig_rule = MaxVersionsGCRule(1)
+ gc_rule_pb = orig_rule.to_pb()
+ result = _gc_rule_from_pb(gc_rule_pb)
+ assert isinstance(result, MaxVersionsGCRule)
+ assert result == orig_rule
+
+
+def test__gc_rule_from_pb_max_age():
+ import datetime
+ from google.cloud.bigtable.column_family import _gc_rule_from_pb
+ from google.cloud.bigtable.column_family import MaxAgeGCRule
+
+ orig_rule = MaxAgeGCRule(datetime.timedelta(seconds=1))
+ gc_rule_pb = orig_rule.to_pb()
+ result = _gc_rule_from_pb(gc_rule_pb)
+ assert isinstance(result, MaxAgeGCRule)
+ assert result == orig_rule
+
+
+def test__gc_rule_from_pb_union():
+ import datetime
+ from google.cloud.bigtable.column_family import _gc_rule_from_pb
+ from google.cloud.bigtable.column_family import GCRuleUnion
+ from google.cloud.bigtable.column_family import MaxAgeGCRule
+ from google.cloud.bigtable.column_family import MaxVersionsGCRule
+
+ rule1 = MaxVersionsGCRule(1)
+ rule2 = MaxAgeGCRule(datetime.timedelta(seconds=1))
+ orig_rule = GCRuleUnion([rule1, rule2])
+ gc_rule_pb = orig_rule.to_pb()
+ result = _gc_rule_from_pb(gc_rule_pb)
+ assert isinstance(result, GCRuleUnion)
+ assert result == orig_rule
+
+
+def test__gc_rule_from_pb_intersection():
+ import datetime
+ from google.cloud.bigtable.column_family import _gc_rule_from_pb
+ from google.cloud.bigtable.column_family import GCRuleIntersection
+ from google.cloud.bigtable.column_family import MaxAgeGCRule
+ from google.cloud.bigtable.column_family import MaxVersionsGCRule
+
+ rule1 = MaxVersionsGCRule(1)
+ rule2 = MaxAgeGCRule(datetime.timedelta(seconds=1))
+ orig_rule = GCRuleIntersection([rule1, rule2])
+ gc_rule_pb = orig_rule.to_pb()
+ result = _gc_rule_from_pb(gc_rule_pb)
+ assert isinstance(result, GCRuleIntersection)
+ assert result == orig_rule
+
+
+def test__gc_rule_from_pb_unknown_field_name():
+ from google.cloud.bigtable.column_family import _gc_rule_from_pb
+
+ class MockProto(object):
+
+ names = []
+
+ _pb = {}
- def test___ne__same_value(self):
- max_age = object()
- gc_rule1 = self._make_one(max_age=max_age)
- gc_rule2 = self._make_one(max_age=max_age)
- comparison_val = gc_rule1 != gc_rule2
- self.assertFalse(comparison_val)
+ @classmethod
+ def WhichOneof(cls, name):
+ cls.names.append(name)
+ return "unknown"
- def test_to_pb(self):
- import datetime
- from google.protobuf import duration_pb2
+ MockProto._pb = MockProto
- max_age = datetime.timedelta(seconds=1)
- duration = duration_pb2.Duration(seconds=1)
- gc_rule = self._make_one(max_age=max_age)
- pb_val = gc_rule.to_pb()
- self.assertEqual(pb_val, _GcRulePB(max_age=duration))
+ assert MockProto.names == []
+ with pytest.raises(ValueError):
+ _gc_rule_from_pb(MockProto)
-class TestGCRuleUnion(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.column_family import GCRuleUnion
-
- return GCRuleUnion
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- def test_constructor(self):
- rules = object()
- rule_union = self._make_one(rules)
- self.assertIs(rule_union.rules, rules)
-
- def test___eq__(self):
- rules = object()
- gc_rule1 = self._make_one(rules)
- gc_rule2 = self._make_one(rules)
- self.assertEqual(gc_rule1, gc_rule2)
-
- def test___eq__type_differ(self):
- rules = object()
- gc_rule1 = self._make_one(rules)
- gc_rule2 = object()
- self.assertNotEqual(gc_rule1, gc_rule2)
-
- def test___ne__same_value(self):
- rules = object()
- gc_rule1 = self._make_one(rules)
- gc_rule2 = self._make_one(rules)
- comparison_val = gc_rule1 != gc_rule2
- self.assertFalse(comparison_val)
-
- def test_to_pb(self):
- import datetime
- from google.protobuf import duration_pb2
- from google.cloud.bigtable.column_family import MaxAgeGCRule
- from google.cloud.bigtable.column_family import MaxVersionsGCRule
-
- max_num_versions = 42
- rule1 = MaxVersionsGCRule(max_num_versions)
- pb_rule1 = _GcRulePB(max_num_versions=max_num_versions)
-
- max_age = datetime.timedelta(seconds=1)
- rule2 = MaxAgeGCRule(max_age)
- pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1))
-
- rule3 = self._make_one(rules=[rule1, rule2])
- pb_rule3 = _GcRulePB(union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2]))
-
- gc_rule_pb = rule3.to_pb()
- self.assertEqual(gc_rule_pb, pb_rule3)
-
- def test_to_pb_nested(self):
- import datetime
- from google.protobuf import duration_pb2
- from google.cloud.bigtable.column_family import MaxAgeGCRule
- from google.cloud.bigtable.column_family import MaxVersionsGCRule
-
- max_num_versions1 = 42
- rule1 = MaxVersionsGCRule(max_num_versions1)
- pb_rule1 = _GcRulePB(max_num_versions=max_num_versions1)
-
- max_age = datetime.timedelta(seconds=1)
- rule2 = MaxAgeGCRule(max_age)
- pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1))
-
- rule3 = self._make_one(rules=[rule1, rule2])
- pb_rule3 = _GcRulePB(union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2]))
-
- max_num_versions2 = 1337
- rule4 = MaxVersionsGCRule(max_num_versions2)
- pb_rule4 = _GcRulePB(max_num_versions=max_num_versions2)
-
- rule5 = self._make_one(rules=[rule3, rule4])
- pb_rule5 = _GcRulePB(union=_GcRuleUnionPB(rules=[pb_rule3, pb_rule4]))
-
- gc_rule_pb = rule5.to_pb()
- self.assertEqual(gc_rule_pb, pb_rule5)
-
-
-class TestGCRuleIntersection(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.column_family import GCRuleIntersection
-
- return GCRuleIntersection
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- def test_constructor(self):
- rules = object()
- rule_intersection = self._make_one(rules)
- self.assertIs(rule_intersection.rules, rules)
-
- def test___eq__(self):
- rules = object()
- gc_rule1 = self._make_one(rules)
- gc_rule2 = self._make_one(rules)
- self.assertEqual(gc_rule1, gc_rule2)
-
- def test___eq__type_differ(self):
- rules = object()
- gc_rule1 = self._make_one(rules)
- gc_rule2 = object()
- self.assertNotEqual(gc_rule1, gc_rule2)
-
- def test___ne__same_value(self):
- rules = object()
- gc_rule1 = self._make_one(rules)
- gc_rule2 = self._make_one(rules)
- comparison_val = gc_rule1 != gc_rule2
- self.assertFalse(comparison_val)
-
- def test_to_pb(self):
- import datetime
- from google.protobuf import duration_pb2
- from google.cloud.bigtable.column_family import MaxAgeGCRule
- from google.cloud.bigtable.column_family import MaxVersionsGCRule
-
- max_num_versions = 42
- rule1 = MaxVersionsGCRule(max_num_versions)
- pb_rule1 = _GcRulePB(max_num_versions=max_num_versions)
-
- max_age = datetime.timedelta(seconds=1)
- rule2 = MaxAgeGCRule(max_age)
- pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1))
-
- rule3 = self._make_one(rules=[rule1, rule2])
- pb_rule3 = _GcRulePB(
- intersection=_GcRuleIntersectionPB(rules=[pb_rule1, pb_rule2])
- )
-
- gc_rule_pb = rule3.to_pb()
- self.assertEqual(gc_rule_pb, pb_rule3)
-
- def test_to_pb_nested(self):
- import datetime
- from google.protobuf import duration_pb2
- from google.cloud.bigtable.column_family import MaxAgeGCRule
- from google.cloud.bigtable.column_family import MaxVersionsGCRule
-
- max_num_versions1 = 42
- rule1 = MaxVersionsGCRule(max_num_versions1)
- pb_rule1 = _GcRulePB(max_num_versions=max_num_versions1)
-
- max_age = datetime.timedelta(seconds=1)
- rule2 = MaxAgeGCRule(max_age)
- pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1))
-
- rule3 = self._make_one(rules=[rule1, rule2])
- pb_rule3 = _GcRulePB(
- intersection=_GcRuleIntersectionPB(rules=[pb_rule1, pb_rule2])
- )
-
- max_num_versions2 = 1337
- rule4 = MaxVersionsGCRule(max_num_versions2)
- pb_rule4 = _GcRulePB(max_num_versions=max_num_versions2)
-
- rule5 = self._make_one(rules=[rule3, rule4])
- pb_rule5 = _GcRulePB(
- intersection=_GcRuleIntersectionPB(rules=[pb_rule3, pb_rule4])
- )
-
- gc_rule_pb = rule5.to_pb()
- self.assertEqual(gc_rule_pb, pb_rule5)
-
-
-class TestColumnFamily(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.column_family import ColumnFamily
-
- return ColumnFamily
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- @staticmethod
- def _get_target_client_class():
- from google.cloud.bigtable.client import Client
-
- return Client
-
- def _make_client(self, *args, **kwargs):
- return self._get_target_client_class()(*args, **kwargs)
-
- def test_constructor(self):
- column_family_id = u"column-family-id"
- table = object()
- gc_rule = object()
- column_family = self._make_one(column_family_id, table, gc_rule=gc_rule)
-
- self.assertEqual(column_family.column_family_id, column_family_id)
- self.assertIs(column_family._table, table)
- self.assertIs(column_family.gc_rule, gc_rule)
-
- def test_name_property(self):
- column_family_id = u"column-family-id"
- table_name = "table_name"
- table = _Table(table_name)
- column_family = self._make_one(column_family_id, table)
-
- expected_name = table_name + "/columnFamilies/" + column_family_id
- self.assertEqual(column_family.name, expected_name)
-
- def test___eq__(self):
- column_family_id = "column_family_id"
- table = object()
- gc_rule = object()
- column_family1 = self._make_one(column_family_id, table, gc_rule=gc_rule)
- column_family2 = self._make_one(column_family_id, table, gc_rule=gc_rule)
- self.assertEqual(column_family1, column_family2)
-
- def test___eq__type_differ(self):
- column_family1 = self._make_one("column_family_id", None)
- column_family2 = object()
- self.assertNotEqual(column_family1, column_family2)
-
- def test___ne__same_value(self):
- column_family_id = "column_family_id"
- table = object()
- gc_rule = object()
- column_family1 = self._make_one(column_family_id, table, gc_rule=gc_rule)
- column_family2 = self._make_one(column_family_id, table, gc_rule=gc_rule)
- comparison_val = column_family1 != column_family2
- self.assertFalse(comparison_val)
-
- def test___ne__(self):
- column_family1 = self._make_one("column_family_id1", None)
- column_family2 = self._make_one("column_family_id2", None)
- self.assertNotEqual(column_family1, column_family2)
-
- def test_to_pb_no_rules(self):
- column_family = self._make_one("column_family_id", None)
- pb_val = column_family.to_pb()
- expected = _ColumnFamilyPB()
- self.assertEqual(pb_val, expected)
-
- def test_to_pb_with_rule(self):
- from google.cloud.bigtable.column_family import MaxVersionsGCRule
-
- gc_rule = MaxVersionsGCRule(1)
- column_family = self._make_one("column_family_id", None, gc_rule=gc_rule)
- pb_val = column_family.to_pb()
- expected = _ColumnFamilyPB(gc_rule=gc_rule.to_pb())
- self.assertEqual(pb_val, expected)
-
- def _create_test_helper(self, gc_rule=None):
- from google.cloud.bigtable_admin_v2.types import (
- bigtable_table_admin as table_admin_v2_pb2,
- )
- from tests.unit._testing import _FakeStub
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- BigtableTableAdminClient,
- )
-
- project_id = "project-id"
- zone = "zone"
- cluster_id = "cluster-id"
- table_id = "table-id"
- column_family_id = "column-family-id"
- table_name = (
- "projects/"
- + project_id
- + "/zones/"
- + zone
- + "/clusters/"
- + cluster_id
- + "/tables/"
- + table_id
- )
-
- api = mock.create_autospec(BigtableTableAdminClient)
-
- credentials = _make_credentials()
- client = self._make_client(
- project=project_id, credentials=credentials, admin=True
- )
- table = _Table(table_name, client=client)
- column_family = self._make_one(column_family_id, table, gc_rule=gc_rule)
-
- # Create request_pb
- if gc_rule is None:
- column_family_pb = _ColumnFamilyPB()
- else:
- column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb())
- request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name)
- modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification()
- modification.id = column_family_id
- modification.create = column_family_pb
- request_pb.modifications.append(modification)
-
- # Create response_pb
- response_pb = _ColumnFamilyPB()
-
- # Patch the stub used by the API method.
- stub = _FakeStub(response_pb)
- client._table_admin_client = api
- client._table_admin_client.transport.create = stub
-
- # Create expected_result.
- expected_result = None # create() has no return value.
-
- # Perform the method and check the result.
- self.assertEqual(stub.results, (response_pb,))
- result = column_family.create()
- self.assertEqual(result, expected_result)
-
- def test_create(self):
- self._create_test_helper(gc_rule=None)
-
- def test_create_with_gc_rule(self):
- from google.cloud.bigtable.column_family import MaxVersionsGCRule
-
- gc_rule = MaxVersionsGCRule(1337)
- self._create_test_helper(gc_rule=gc_rule)
-
- def _update_test_helper(self, gc_rule=None):
- from tests.unit._testing import _FakeStub
- from google.cloud.bigtable_admin_v2.types import (
- bigtable_table_admin as table_admin_v2_pb2,
- )
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- BigtableTableAdminClient,
- )
-
- project_id = "project-id"
- zone = "zone"
- cluster_id = "cluster-id"
- table_id = "table-id"
- column_family_id = "column-family-id"
- table_name = (
- "projects/"
- + project_id
- + "/zones/"
- + zone
- + "/clusters/"
- + cluster_id
- + "/tables/"
- + table_id
- )
-
- api = mock.create_autospec(BigtableTableAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project=project_id, credentials=credentials, admin=True
- )
- table = _Table(table_name, client=client)
- column_family = self._make_one(column_family_id, table, gc_rule=gc_rule)
-
- # Create request_pb
- if gc_rule is None:
- column_family_pb = _ColumnFamilyPB()
- else:
- column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb())
- request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name)
- modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification()
- modification.id = column_family_id
- modification.update = column_family_pb
- request_pb.modifications.append(modification)
-
- # Create response_pb
- response_pb = _ColumnFamilyPB()
-
- # Patch the stub used by the API method.
- stub = _FakeStub(response_pb)
- client._table_admin_client = api
- client._table_admin_client.transport.update = stub
-
- # Create expected_result.
- expected_result = None # update() has no return value.
-
- # Perform the method and check the result.
- self.assertEqual(stub.results, (response_pb,))
- result = column_family.update()
- self.assertEqual(result, expected_result)
-
- def test_update(self):
- self._update_test_helper(gc_rule=None)
-
- def test_update_with_gc_rule(self):
- from google.cloud.bigtable.column_family import MaxVersionsGCRule
-
- gc_rule = MaxVersionsGCRule(1337)
- self._update_test_helper(gc_rule=gc_rule)
-
- def test_delete(self):
- from google.protobuf import empty_pb2
- from google.cloud.bigtable_admin_v2.types import (
- bigtable_table_admin as table_admin_v2_pb2,
- )
- from tests.unit._testing import _FakeStub
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- BigtableTableAdminClient,
- )
-
- project_id = "project-id"
- zone = "zone"
- cluster_id = "cluster-id"
- table_id = "table-id"
- column_family_id = "column-family-id"
- table_name = (
- "projects/"
- + project_id
- + "/zones/"
- + zone
- + "/clusters/"
- + cluster_id
- + "/tables/"
- + table_id
- )
-
- api = mock.create_autospec(BigtableTableAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project=project_id, credentials=credentials, admin=True
- )
- table = _Table(table_name, client=client)
- column_family = self._make_one(column_family_id, table)
-
- # Create request_pb
- request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name)
- modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification(
- id=column_family_id, drop=True
- )
- request_pb.modifications.append(modification)
-
- # Create response_pb
- response_pb = empty_pb2.Empty()
-
- # Patch the stub used by the API method.
- stub = _FakeStub(response_pb)
- client._table_admin_client = api
- client._table_admin_client.transport.delete = stub
-
- # Create expected_result.
- expected_result = None # delete() has no return value.
-
- # Perform the method and check the result.
- self.assertEqual(stub.results, (response_pb,))
- result = column_family.delete()
- self.assertEqual(result, expected_result)
-
-
-class Test__gc_rule_from_pb(unittest.TestCase):
- def _call_fut(self, *args, **kwargs):
- from google.cloud.bigtable.column_family import _gc_rule_from_pb
-
- return _gc_rule_from_pb(*args, **kwargs)
-
- def test_empty(self):
-
- gc_rule_pb = _GcRulePB()
- self.assertIsNone(self._call_fut(gc_rule_pb))
-
- def test_max_num_versions(self):
- from google.cloud.bigtable.column_family import MaxVersionsGCRule
-
- orig_rule = MaxVersionsGCRule(1)
- gc_rule_pb = orig_rule.to_pb()
- result = self._call_fut(gc_rule_pb)
- self.assertIsInstance(result, MaxVersionsGCRule)
- self.assertEqual(result, orig_rule)
-
- def test_max_age(self):
- import datetime
- from google.cloud.bigtable.column_family import MaxAgeGCRule
-
- orig_rule = MaxAgeGCRule(datetime.timedelta(seconds=1))
- gc_rule_pb = orig_rule.to_pb()
- result = self._call_fut(gc_rule_pb)
- self.assertIsInstance(result, MaxAgeGCRule)
- self.assertEqual(result, orig_rule)
-
- def test_union(self):
- import datetime
- from google.cloud.bigtable.column_family import GCRuleUnion
- from google.cloud.bigtable.column_family import MaxAgeGCRule
- from google.cloud.bigtable.column_family import MaxVersionsGCRule
-
- rule1 = MaxVersionsGCRule(1)
- rule2 = MaxAgeGCRule(datetime.timedelta(seconds=1))
- orig_rule = GCRuleUnion([rule1, rule2])
- gc_rule_pb = orig_rule.to_pb()
- result = self._call_fut(gc_rule_pb)
- self.assertIsInstance(result, GCRuleUnion)
- self.assertEqual(result, orig_rule)
-
- def test_intersection(self):
- import datetime
- from google.cloud.bigtable.column_family import GCRuleIntersection
- from google.cloud.bigtable.column_family import MaxAgeGCRule
- from google.cloud.bigtable.column_family import MaxVersionsGCRule
-
- rule1 = MaxVersionsGCRule(1)
- rule2 = MaxAgeGCRule(datetime.timedelta(seconds=1))
- orig_rule = GCRuleIntersection([rule1, rule2])
- gc_rule_pb = orig_rule.to_pb()
- result = self._call_fut(gc_rule_pb)
- self.assertIsInstance(result, GCRuleIntersection)
- self.assertEqual(result, orig_rule)
-
- def test_unknown_field_name(self):
- class MockProto(object):
-
- names = []
-
- _pb = {}
-
- @classmethod
- def WhichOneof(cls, name):
- cls.names.append(name)
- return "unknown"
-
- MockProto._pb = MockProto
-
- self.assertEqual(MockProto.names, [])
- self.assertRaises(ValueError, self._call_fut, MockProto)
- self.assertEqual(MockProto.names, ["rule"])
+ assert MockProto.names == ["rule"]
def _GcRulePB(*args, **kw):
diff --git a/tests/unit/test_encryption_info.py b/tests/unit/test_encryption_info.py
index ede6f4883..8b92a83ed 100644
--- a/tests/unit/test_encryption_info.py
+++ b/tests/unit/test_encryption_info.py
@@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import unittest
-
import mock
from google.cloud.bigtable import enums
@@ -55,113 +53,119 @@ def _make_info_pb(
)
-class TestEncryptionInfo(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.encryption_info import EncryptionInfo
+def _make_encryption_info(*args, **kwargs):
+ from google.cloud.bigtable.encryption_info import EncryptionInfo
+
+ return EncryptionInfo(*args, **kwargs)
+
+
+def _make_encryption_info_defaults(
+ encryption_type=EncryptionType.GOOGLE_DEFAULT_ENCRYPTION,
+ code=_STATUS_CODE,
+ message=_STATUS_MESSAGE,
+ kms_key_version=_KMS_KEY_VERSION,
+):
+ encryption_status = _make_status(code=code, message=message)
+ return _make_encryption_info(encryption_type, encryption_status, kms_key_version)
+
+
+def test_encryption_info__from_pb():
+ from google.cloud.bigtable.encryption_info import EncryptionInfo
- return EncryptionInfo
+ info_pb = _make_info_pb()
- def _make_one(self, encryption_type, encryption_status, kms_key_version):
- return self._get_target_class()(
- encryption_type, encryption_status, kms_key_version,
- )
+ info = EncryptionInfo._from_pb(info_pb)
- def _make_one_defaults(
- self,
- encryption_type=EncryptionType.GOOGLE_DEFAULT_ENCRYPTION,
- code=_STATUS_CODE,
- message=_STATUS_MESSAGE,
+ assert info.encryption_type == EncryptionType.GOOGLE_DEFAULT_ENCRYPTION
+ assert info.encryption_status.code == _STATUS_CODE
+ assert info.encryption_status.message == _STATUS_MESSAGE
+ assert info.kms_key_version == _KMS_KEY_VERSION
+
+
+def test_encryption_info_ctor():
+ encryption_type = EncryptionType.GOOGLE_DEFAULT_ENCRYPTION
+ encryption_status = _make_status()
+
+ info = _make_encryption_info(
+ encryption_type=encryption_type,
+ encryption_status=encryption_status,
kms_key_version=_KMS_KEY_VERSION,
- ):
- encryption_status = _make_status(code=code, message=message)
- return self._make_one(encryption_type, encryption_status, kms_key_version)
-
- def test__from_pb(self):
- klass = self._get_target_class()
- info_pb = _make_info_pb()
-
- info = klass._from_pb(info_pb)
-
- self.assertEqual(
- info.encryption_type, EncryptionType.GOOGLE_DEFAULT_ENCRYPTION,
- )
- self.assertEqual(info.encryption_status.code, _STATUS_CODE)
- self.assertEqual(info.encryption_status.message, _STATUS_MESSAGE)
- self.assertEqual(info.kms_key_version, _KMS_KEY_VERSION)
-
- def test_ctor(self):
- encryption_type = EncryptionType.GOOGLE_DEFAULT_ENCRYPTION
- encryption_status = _make_status()
-
- info = self._make_one(
- encryption_type=encryption_type,
- encryption_status=encryption_status,
- kms_key_version=_KMS_KEY_VERSION,
- )
-
- self.assertEqual(info.encryption_type, encryption_type)
- self.assertEqual(info.encryption_status, encryption_status)
- self.assertEqual(info.kms_key_version, _KMS_KEY_VERSION)
-
- def test___eq___identity(self):
- info = self._make_one_defaults()
- self.assertTrue(info == info)
-
- def test___eq___wrong_type(self):
- info = self._make_one_defaults()
- other = object()
- self.assertFalse(info == other)
-
- def test___eq___same_values(self):
- info = self._make_one_defaults()
- other = self._make_one_defaults()
- self.assertTrue(info == other)
-
- def test___eq___different_encryption_type(self):
- info = self._make_one_defaults()
- other = self._make_one_defaults(
- encryption_type=EncryptionType.CUSTOMER_MANAGED_ENCRYPTION,
- )
- self.assertFalse(info == other)
-
- def test___eq___different_encryption_status(self):
- info = self._make_one_defaults()
- other = self._make_one_defaults(code=456)
- self.assertFalse(info == other)
-
- def test___eq___different_kms_key_version(self):
- info = self._make_one_defaults()
- other = self._make_one_defaults(kms_key_version=789)
- self.assertFalse(info == other)
-
- def test___ne___identity(self):
- info = self._make_one_defaults()
- self.assertFalse(info != info)
-
- def test___ne___wrong_type(self):
- info = self._make_one_defaults()
- other = object()
- self.assertTrue(info != other)
-
- def test___ne___same_values(self):
- info = self._make_one_defaults()
- other = self._make_one_defaults()
- self.assertFalse(info != other)
-
- def test___ne___different_encryption_type(self):
- info = self._make_one_defaults()
- other = self._make_one_defaults(
- encryption_type=EncryptionType.CUSTOMER_MANAGED_ENCRYPTION,
- )
- self.assertTrue(info != other)
-
- def test___ne___different_encryption_status(self):
- info = self._make_one_defaults()
- other = self._make_one_defaults(code=456)
- self.assertTrue(info != other)
-
- def test___ne___different_kms_key_version(self):
- info = self._make_one_defaults()
- other = self._make_one_defaults(kms_key_version=789)
- self.assertTrue(info != other)
+ )
+
+ assert info.encryption_type == encryption_type
+ assert info.encryption_status == encryption_status
+ assert info.kms_key_version == _KMS_KEY_VERSION
+
+
+def test_encryption_info___eq___identity():
+ info = _make_encryption_info_defaults()
+ assert info == info
+
+
+def test_encryption_info___eq___wrong_type():
+ info = _make_encryption_info_defaults()
+ other = object()
+ assert not (info == other)
+
+
+def test_encryption_info___eq___same_values():
+ info = _make_encryption_info_defaults()
+ other = _make_encryption_info_defaults()
+ assert info == other
+
+
+def test_encryption_info___eq___different_encryption_type():
+ info = _make_encryption_info_defaults()
+ other = _make_encryption_info_defaults(
+ encryption_type=EncryptionType.CUSTOMER_MANAGED_ENCRYPTION,
+ )
+ assert not (info == other)
+
+
+def test_encryption_info___eq___different_encryption_status():
+ info = _make_encryption_info_defaults()
+ other = _make_encryption_info_defaults(code=456)
+ assert not (info == other)
+
+
+def test_encryption_info___eq___different_kms_key_version():
+ info = _make_encryption_info_defaults()
+ other = _make_encryption_info_defaults(kms_key_version=789)
+ assert not (info == other)
+
+
+def test_encryption_info___ne___identity():
+ info = _make_encryption_info_defaults()
+ assert not (info != info)
+
+
+def test_encryption_info___ne___wrong_type():
+ info = _make_encryption_info_defaults()
+ other = object()
+ assert info != other
+
+
+def test_encryption_info___ne___same_values():
+ info = _make_encryption_info_defaults()
+ other = _make_encryption_info_defaults()
+ assert not (info != other)
+
+
+def test_encryption_info___ne___different_encryption_type():
+ info = _make_encryption_info_defaults()
+ other = _make_encryption_info_defaults(
+ encryption_type=EncryptionType.CUSTOMER_MANAGED_ENCRYPTION,
+ )
+ assert info != other
+
+
+def test_encryption_info___ne___different_encryption_status():
+ info = _make_encryption_info_defaults()
+ other = _make_encryption_info_defaults(code=456)
+ assert info != other
+
+
+def test_encryption_info___ne___different_kms_key_version():
+ info = _make_encryption_info_defaults()
+ other = _make_encryption_info_defaults(kms_key_version=789)
+ assert info != other
diff --git a/tests/unit/test_error.py b/tests/unit/test_error.py
index c53d63991..8b148473c 100644
--- a/tests/unit/test_error.py
+++ b/tests/unit/test_error.py
@@ -12,86 +12,90 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import unittest
-
-
-class TestStatus(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.error import Status
-
- return Status
-
- @staticmethod
- def _make_status_pb(**kwargs):
- from google.rpc.status_pb2 import Status
-
- return Status(**kwargs)
-
- def _make_one(self, status_pb):
- return self._get_target_class()(status_pb)
-
- def test_ctor(self):
- status_pb = self._make_status_pb()
- status = self._make_one(status_pb)
- self.assertIs(status.status_pb, status_pb)
-
- def test_code(self):
- code = 123
- status_pb = self._make_status_pb(code=code)
- status = self._make_one(status_pb)
- self.assertEqual(status.code, code)
-
- def test_message(self):
- message = "message"
- status_pb = self._make_status_pb(message=message)
- status = self._make_one(status_pb)
- self.assertEqual(status.message, message)
-
- def test___eq___self(self):
- status_pb = self._make_status_pb()
- status = self._make_one(status_pb)
- self.assertTrue(status == status)
-
- def test___eq___other_hit(self):
- status_pb = self._make_status_pb(code=123, message="message")
- status = self._make_one(status_pb)
- other = self._make_one(status_pb)
- self.assertTrue(status == other)
-
- def test___eq___other_miss(self):
- status_pb = self._make_status_pb(code=123, message="message")
- other_status_pb = self._make_status_pb(code=456, message="oops")
- status = self._make_one(status_pb)
- other = self._make_one(other_status_pb)
- self.assertFalse(status == other)
-
- def test___eq___wrong_type(self):
- status_pb = self._make_status_pb(code=123, message="message")
- status = self._make_one(status_pb)
- other = object()
- self.assertFalse(status == other)
-
- def test___ne___self(self):
- status_pb = self._make_status_pb()
- status = self._make_one(status_pb)
- self.assertFalse(status != status)
-
- def test___ne___other_hit(self):
- status_pb = self._make_status_pb(code=123, message="message")
- status = self._make_one(status_pb)
- other = self._make_one(status_pb)
- self.assertFalse(status != other)
-
- def test___ne___other_miss(self):
- status_pb = self._make_status_pb(code=123, message="message")
- other_status_pb = self._make_status_pb(code=456, message="oops")
- status = self._make_one(status_pb)
- other = self._make_one(other_status_pb)
- self.assertTrue(status != other)
-
- def test___ne___wrong_type(self):
- status_pb = self._make_status_pb(code=123, message="message")
- status = self._make_one(status_pb)
- other = object()
- self.assertTrue(status != other)
+
+def _make_status_pb(**kwargs):
+ from google.rpc.status_pb2 import Status
+
+ return Status(**kwargs)
+
+
+def _make_status(status_pb):
+ from google.cloud.bigtable.error import Status
+
+ return Status(status_pb)
+
+
+def test_status_ctor():
+ status_pb = _make_status_pb()
+ status = _make_status(status_pb)
+ assert status.status_pb is status_pb
+
+
+def test_status_code():
+ code = 123
+ status_pb = _make_status_pb(code=code)
+ status = _make_status(status_pb)
+ assert status.code == code
+
+
+def test_status_message():
+ message = "message"
+ status_pb = _make_status_pb(message=message)
+ status = _make_status(status_pb)
+ assert status.message == message
+
+
+def test_status___eq___self():
+ status_pb = _make_status_pb()
+ status = _make_status(status_pb)
+ assert status == status
+
+
+def test_status___eq___other_hit():
+ status_pb = _make_status_pb(code=123, message="message")
+ status = _make_status(status_pb)
+ other = _make_status(status_pb)
+ assert status == other
+
+
+def test_status___eq___other_miss():
+ status_pb = _make_status_pb(code=123, message="message")
+ other_status_pb = _make_status_pb(code=456, message="oops")
+ status = _make_status(status_pb)
+ other = _make_status(other_status_pb)
+ assert not (status == other)
+
+
+def test_status___eq___wrong_type():
+ status_pb = _make_status_pb(code=123, message="message")
+ status = _make_status(status_pb)
+ other = object()
+ assert not (status == other)
+
+
+def test_status___ne___self():
+ status_pb = _make_status_pb()
+ status = _make_status(status_pb)
+ assert not (status != status)
+
+
+def test_status___ne___other_hit():
+ status_pb = _make_status_pb(code=123, message="message")
+ status = _make_status(status_pb)
+ other = _make_status(status_pb)
+ assert not (status != other)
+
+
+def test_status___ne___other_miss():
+ status_pb = _make_status_pb(code=123, message="message")
+ other_status_pb = _make_status_pb(code=456, message="oops")
+ status = _make_status(status_pb)
+ other = _make_status(other_status_pb)
+ assert status != other
+
+
+def test_status___ne___wrong_type():
+ status_pb = _make_status_pb(code=123, message="message")
+ status = _make_status(status_pb)
+ other = object()
+ assert status != other
diff --git a/tests/unit/test_instance.py b/tests/unit/test_instance.py
index e493fd9c8..def7e3e38 100644
--- a/tests/unit/test_instance.py
+++ b/tests/unit/test_instance.py
@@ -13,1014 +13,919 @@
# limitations under the License.
-import unittest
-
import mock
+import pytest
from ._testing import _make_credentials
from google.cloud.bigtable.cluster import Cluster
+PROJECT = "project"
+INSTANCE_ID = "instance-id"
+INSTANCE_NAME = "projects/" + PROJECT + "/instances/" + INSTANCE_ID
+LOCATION_ID = "locid"
+LOCATION = "projects/" + PROJECT + "/locations/" + LOCATION_ID
+APP_PROFILE_PATH = "projects/" + PROJECT + "/instances/" + INSTANCE_ID + "/appProfiles/"
+DISPLAY_NAME = "display_name"
+LABELS = {"foo": "bar"}
+OP_ID = 8915
+OP_NAME = "operations/projects/{}/instances/{}operations/{}".format(
+ PROJECT, INSTANCE_ID, OP_ID
+)
+TABLE_ID = "table_id"
+TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID
+CLUSTER_ID = "cluster-id"
+CLUSTER_NAME = INSTANCE_NAME + "/clusters/" + CLUSTER_ID
+BACKUP_ID = "backup-id"
+BACKUP_NAME = CLUSTER_NAME + "/backups/" + BACKUP_ID
+
+APP_PROFILE_ID_1 = "app-profile-id-1"
+DESCRIPTION_1 = "routing policy any"
+APP_PROFILE_ID_2 = "app-profile-id-2"
+DESCRIPTION_2 = "routing policy single"
+ALLOW_WRITES = True
+CLUSTER_ID = "cluster-id"
+
+
+def _make_client(*args, **kwargs):
+ from google.cloud.bigtable.client import Client
+
+ return Client(*args, **kwargs)
+
+
+def _make_instance_admin_api():
+ from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
+ BigtableInstanceAdminClient,
+ )
+
+ return mock.create_autospec(BigtableInstanceAdminClient)
+
+
+def _make_instance(*args, **kwargs):
+ from google.cloud.bigtable.instance import Instance
+
+ return Instance(*args, **kwargs)
+
+
+def test_instance_constructor_defaults():
+
+ client = object()
+ instance = _make_instance(INSTANCE_ID, client)
+ assert instance.instance_id == INSTANCE_ID
+ assert instance.display_name == INSTANCE_ID
+ assert instance.type_ is None
+ assert instance.labels is None
+ assert instance._client is client
+ assert instance.state is None
+
+
+def test_instance_constructor_non_default():
+ from google.cloud.bigtable import enums
+
+ instance_type = enums.Instance.Type.DEVELOPMENT
+ state = enums.Instance.State.READY
+ labels = {"test": "test"}
+ client = object()
+
+ instance = _make_instance(
+ INSTANCE_ID,
+ client,
+ display_name=DISPLAY_NAME,
+ instance_type=instance_type,
+ labels=labels,
+ _state=state,
+ )
+ assert instance.instance_id == INSTANCE_ID
+ assert instance.display_name == DISPLAY_NAME
+ assert instance.type_ == instance_type
+ assert instance.labels == labels
+ assert instance._client is client
+ assert instance.state == state
+
+
+def test_instance__update_from_pb_success():
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable import enums
+
+ instance_type = data_v2_pb2.Instance.Type.PRODUCTION
+ state = enums.Instance.State.READY
+ # todo type to type_?
+ instance_pb = data_v2_pb2.Instance(
+ display_name=DISPLAY_NAME, type_=instance_type, labels=LABELS, state=state,
+ )
+
+ instance = _make_instance(None, None)
+ assert instance.display_name is None
+ assert instance.type_ is None
+ assert instance.labels is None
+ instance._update_from_pb(instance_pb._pb)
+ assert instance.display_name == DISPLAY_NAME
+ assert instance.type_ == instance_type
+ assert instance.labels == LABELS
+ assert instance._state == state
+
+
+def test_instance__update_from_pb_success_defaults():
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable import enums
+
+ instance_pb = data_v2_pb2.Instance(display_name=DISPLAY_NAME)
+
+ instance = _make_instance(None, None)
+ assert instance.display_name is None
+ assert instance.type_ is None
+ assert instance.labels is None
+ instance._update_from_pb(instance_pb._pb)
+ assert instance.display_name == DISPLAY_NAME
+ assert instance.type_ == enums.Instance.Type.UNSPECIFIED
+ assert not instance.labels
+
+
+def test_instance__update_from_pb_wo_display_name():
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+
+ instance_pb = data_v2_pb2.Instance()
+ instance = _make_instance(None, None)
+ assert instance.display_name is None
+
+ with pytest.raises(ValueError):
+ instance._update_from_pb(instance_pb)
+
+
+def test_instance_from_pb_success():
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable import enums
+ from google.cloud.bigtable.instance import Instance
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance_type = enums.Instance.Type.PRODUCTION
+ state = enums.Instance.State.READY
+ instance_pb = data_v2_pb2.Instance(
+ name=INSTANCE_NAME,
+ display_name=INSTANCE_ID,
+ type_=instance_type,
+ labels=LABELS,
+ state=state,
+ )
+
+ instance = Instance.from_pb(instance_pb, client)
+
+ assert isinstance(instance, Instance)
+ assert instance._client == client
+ assert instance.instance_id == INSTANCE_ID
+ assert instance.display_name == INSTANCE_ID
+ assert instance.type_ == instance_type
+ assert instance.labels == LABELS
+ assert instance._state == state
+
+
+def test_instance_from_pb_bad_instance_name():
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable.instance import Instance
+
+ instance_name = "INCORRECT_FORMAT"
+ instance_pb = data_v2_pb2.Instance(name=instance_name)
+
+ with pytest.raises(ValueError):
+ Instance.from_pb(instance_pb, None)
+
+
+def test_instance_from_pb_project_mistmatch():
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable.instance import Instance
+
+ ALT_PROJECT = "ALT_PROJECT"
+ credentials = _make_credentials()
+ client = _make_client(project=ALT_PROJECT, credentials=credentials, admin=True)
-class TestInstance(unittest.TestCase):
+ instance_pb = data_v2_pb2.Instance(name=INSTANCE_NAME)
+
+ with pytest.raises(ValueError):
+ Instance.from_pb(instance_pb, client)
+
+
+def test_instance_name():
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+
+ api = client._instance_admin_client = _make_instance_admin_api()
+ api.instance_path.return_value = INSTANCE_NAME
+ instance = _make_instance(INSTANCE_ID, client)
+
+ assert instance.name == INSTANCE_NAME
+
+
+def test_instance___eq__():
+ client = object()
+ instance1 = _make_instance(INSTANCE_ID, client)
+ instance2 = _make_instance(INSTANCE_ID, client)
+ assert instance1 == instance2
+
+
+def test_instance___eq__type_differ():
+ client = object()
+ instance1 = _make_instance(INSTANCE_ID, client)
+ instance2 = object()
+ assert instance1 != instance2
+
+
+def test_instance___ne__same_value():
+ client = object()
+ instance1 = _make_instance(INSTANCE_ID, client)
+ instance2 = _make_instance(INSTANCE_ID, client)
+ assert not (instance1 != instance2)
+
+
+def test_instance___ne__():
+ instance1 = _make_instance("instance_id1", "client1")
+ instance2 = _make_instance("instance_id2", "client2")
+ assert instance1 != instance2
+
+
+def test_instance_create_w_location_and_clusters():
+ instance = _make_instance(INSTANCE_ID, None)
+
+ with pytest.raises(ValueError):
+ instance.create(location_id=LOCATION_ID, clusters=[object(), object()])
+
+
+def test_instance_create_w_serve_nodes_and_clusters():
+ instance = _make_instance(INSTANCE_ID, None)
+
+ with pytest.raises(ValueError):
+ instance.create(serve_nodes=3, clusters=[object(), object()])
+
+
+def test_instance_create_w_default_storage_type_and_clusters():
+ instance = _make_instance(INSTANCE_ID, None)
+
+ with pytest.raises(ValueError):
+ instance.create(default_storage_type=1, clusters=[object(), object()])
+
+
+def _instance_api_response_for_create():
+ import datetime
+ from google.api_core import operation
+ from google.longrunning import operations_pb2
+ from google.protobuf.any_pb2 import Any
+ from google.cloud._helpers import _datetime_to_pb_timestamp
+ from google.cloud.bigtable_admin_v2.types import (
+ bigtable_instance_admin as messages_v2_pb2,
+ )
+ from google.cloud.bigtable_admin_v2.types import instance
- PROJECT = "project"
- INSTANCE_ID = "instance-id"
- INSTANCE_NAME = "projects/" + PROJECT + "/instances/" + INSTANCE_ID
- LOCATION_ID = "locid"
- LOCATION = "projects/" + PROJECT + "/locations/" + LOCATION_ID
- APP_PROFILE_PATH = (
- "projects/" + PROJECT + "/instances/" + INSTANCE_ID + "/appProfiles/"
+ NOW = datetime.datetime.utcnow()
+ NOW_PB = _datetime_to_pb_timestamp(NOW)
+ metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB)
+ type_url = "type.googleapis.com/{}".format(
+ messages_v2_pb2.CreateInstanceMetadata._meta._pb.DESCRIPTOR.full_name
)
- DISPLAY_NAME = "display_name"
- LABELS = {"foo": "bar"}
- OP_ID = 8915
- OP_NAME = "operations/projects/{}/instances/{}operations/{}".format(
- PROJECT, INSTANCE_ID, OP_ID
+ response_pb = operations_pb2.Operation(
+ name=OP_NAME,
+ metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()),
)
- TABLE_ID = "table_id"
- TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID
- CLUSTER_ID = "cluster-id"
- CLUSTER_NAME = INSTANCE_NAME + "/clusters/" + CLUSTER_ID
- BACKUP_ID = "backup-id"
- BACKUP_NAME = CLUSTER_NAME + "/backups/" + BACKUP_ID
-
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.instance import Instance
-
- return Instance
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- @staticmethod
- def _get_target_client_class():
- from google.cloud.bigtable.client import Client
-
- return Client
-
- def _make_client(self, *args, **kwargs):
- return self._get_target_client_class()(*args, **kwargs)
-
- def test_constructor_defaults(self):
-
- client = object()
- instance = self._make_one(self.INSTANCE_ID, client)
- self.assertEqual(instance.instance_id, self.INSTANCE_ID)
- self.assertEqual(instance.display_name, self.INSTANCE_ID)
- self.assertIsNone(instance.type_)
- self.assertIsNone(instance.labels)
- self.assertIs(instance._client, client)
- self.assertIsNone(instance.state)
-
- def test_constructor_non_default(self):
- from google.cloud.bigtable import enums
-
- instance_type = enums.Instance.Type.DEVELOPMENT
- state = enums.Instance.State.READY
- labels = {"test": "test"}
- client = object()
-
- instance = self._make_one(
- self.INSTANCE_ID,
- client,
- display_name=self.DISPLAY_NAME,
- instance_type=instance_type,
- labels=labels,
- _state=state,
- )
- self.assertEqual(instance.instance_id, self.INSTANCE_ID)
- self.assertEqual(instance.display_name, self.DISPLAY_NAME)
- self.assertEqual(instance.type_, instance_type)
- self.assertEqual(instance.labels, labels)
- self.assertIs(instance._client, client)
- self.assertEqual(instance.state, state)
-
- def test__update_from_pb_success(self):
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
- from google.cloud.bigtable import enums
-
- instance_type = data_v2_pb2.Instance.Type.PRODUCTION
- state = enums.Instance.State.READY
- # todo type to type_?
- instance_pb = data_v2_pb2.Instance(
- display_name=self.DISPLAY_NAME,
- type_=instance_type,
- labels=self.LABELS,
- state=state,
- )
-
- instance = self._make_one(None, None)
- self.assertIsNone(instance.display_name)
- self.assertIsNone(instance.type_)
- self.assertIsNone(instance.labels)
- instance._update_from_pb(instance_pb._pb)
- self.assertEqual(instance.display_name, self.DISPLAY_NAME)
- self.assertEqual(instance.type_, instance_type)
- self.assertEqual(instance.labels, self.LABELS)
- self.assertEqual(instance._state, state)
-
- def test__update_from_pb_success_defaults(self):
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
- from google.cloud.bigtable import enums
-
- instance_pb = data_v2_pb2.Instance(display_name=self.DISPLAY_NAME)
-
- instance = self._make_one(None, None)
- self.assertIsNone(instance.display_name)
- self.assertIsNone(instance.type_)
- self.assertIsNone(instance.labels)
- instance._update_from_pb(instance_pb._pb)
- self.assertEqual(instance.display_name, self.DISPLAY_NAME)
- self.assertEqual(instance.type_, enums.Instance.Type.UNSPECIFIED)
- self.assertFalse(instance.labels)
-
- def test__update_from_pb_no_display_name(self):
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
-
- instance_pb = data_v2_pb2.Instance()
- instance = self._make_one(None, None)
- self.assertIsNone(instance.display_name)
- with self.assertRaises(ValueError):
- instance._update_from_pb(instance_pb)
-
- def test_from_pb_success(self):
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
- from google.cloud.bigtable import enums
-
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance_type = enums.Instance.Type.PRODUCTION
- state = enums.Instance.State.READY
- instance_pb = data_v2_pb2.Instance(
- name=self.INSTANCE_NAME,
- display_name=self.INSTANCE_ID,
- type_=instance_type,
- labels=self.LABELS,
- state=state,
- )
-
- klass = self._get_target_class()
- instance = klass.from_pb(instance_pb, client)
- self.assertIsInstance(instance, klass)
- self.assertEqual(instance._client, client)
- self.assertEqual(instance.instance_id, self.INSTANCE_ID)
- self.assertEqual(instance.display_name, self.INSTANCE_ID)
- self.assertEqual(instance.type_, instance_type)
- self.assertEqual(instance.labels, self.LABELS)
- self.assertEqual(instance._state, state)
-
- def test_from_pb_bad_instance_name(self):
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
-
- instance_name = "INCORRECT_FORMAT"
- instance_pb = data_v2_pb2.Instance(name=instance_name)
-
- klass = self._get_target_class()
- with self.assertRaises(ValueError):
- klass.from_pb(instance_pb, None)
-
- def test_from_pb_project_mistmatch(self):
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
-
- ALT_PROJECT = "ALT_PROJECT"
- credentials = _make_credentials()
- client = self._make_client(
- project=ALT_PROJECT, credentials=credentials, admin=True
- )
-
- self.assertNotEqual(self.PROJECT, ALT_PROJECT)
-
- instance_pb = data_v2_pb2.Instance(name=self.INSTANCE_NAME)
-
- klass = self._get_target_class()
- with self.assertRaises(ValueError):
- klass.from_pb(instance_pb, client)
-
- def test_name_property(self):
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
-
- api = mock.create_autospec(BigtableInstanceAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
-
- api.instance_path.return_value = "projects/project/instances/instance-id"
- # Patch the the API method.
- client._instance_admin_client = api
-
- instance = self._make_one(self.INSTANCE_ID, client)
- self.assertEqual(instance.name, self.INSTANCE_NAME)
-
- def test___eq__(self):
- client = object()
- instance1 = self._make_one(self.INSTANCE_ID, client)
- instance2 = self._make_one(self.INSTANCE_ID, client)
- self.assertEqual(instance1, instance2)
-
- def test___eq__type_differ(self):
- client = object()
- instance1 = self._make_one(self.INSTANCE_ID, client)
- instance2 = object()
- self.assertNotEqual(instance1, instance2)
-
- def test___ne__same_value(self):
- client = object()
- instance1 = self._make_one(self.INSTANCE_ID, client)
- instance2 = self._make_one(self.INSTANCE_ID, client)
- comparison_val = instance1 != instance2
- self.assertFalse(comparison_val)
-
- def test___ne__(self):
- instance1 = self._make_one("instance_id1", "client1")
- instance2 = self._make_one("instance_id2", "client2")
- self.assertNotEqual(instance1, instance2)
-
- def test_create_check_location_and_clusters(self):
- instance = self._make_one(self.INSTANCE_ID, None)
-
- with self.assertRaises(ValueError):
- instance.create(location_id=self.LOCATION_ID, clusters=[object(), object()])
-
- def test_create_check_serve_nodes_and_clusters(self):
- instance = self._make_one(self.INSTANCE_ID, None)
-
- with self.assertRaises(ValueError):
- instance.create(serve_nodes=3, clusters=[object(), object()])
-
- def test_create_check_default_storage_type_and_clusters(self):
- instance = self._make_one(self.INSTANCE_ID, None)
-
- with self.assertRaises(ValueError):
- instance.create(default_storage_type=1, clusters=[object(), object()])
-
- def _instance_api_response_for_create(self):
- import datetime
- from google.api_core import operation
- from google.longrunning import operations_pb2
- from google.protobuf.any_pb2 import Any
- from google.cloud._helpers import _datetime_to_pb_timestamp
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.cloud.bigtable_admin_v2.types import (
- bigtable_instance_admin as messages_v2_pb2,
- )
- from google.cloud.bigtable_admin_v2.types import instance
-
- NOW = datetime.datetime.utcnow()
- NOW_PB = _datetime_to_pb_timestamp(NOW)
- metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB)
- type_url = "type.googleapis.com/{}".format(
- messages_v2_pb2.CreateInstanceMetadata._meta._pb.DESCRIPTOR.full_name
- )
- response_pb = operations_pb2.Operation(
- name=self.OP_NAME,
- metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()),
- )
- response = operation.from_gapic(
- response_pb,
- mock.Mock(),
- instance.Instance,
- metadata_type=messages_v2_pb2.CreateInstanceMetadata,
- )
- project_path_template = "projects/{}"
- location_path_template = "projects/{}/locations/{}"
- instance_api = mock.create_autospec(BigtableInstanceAdminClient)
- instance_api.create_instance.return_value = response
- instance_api.project_path = project_path_template.format
- instance_api.location_path = location_path_template.format
- instance_api.common_location_path = location_path_template.format
- return instance_api, response
-
- def test_create(self):
- from google.cloud.bigtable import enums
- from google.cloud.bigtable_admin_v2.types import Instance
- from google.cloud.bigtable_admin_v2.types import Cluster
- import warnings
-
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = self._make_one(
- self.INSTANCE_ID,
- client,
- self.DISPLAY_NAME,
- enums.Instance.Type.PRODUCTION,
- self.LABELS,
- )
- instance_api, response = self._instance_api_response_for_create()
- instance_api.common_project_path.return_value = "projects/project"
- client._instance_admin_client = instance_api
- serve_nodes = 3
-
- with warnings.catch_warnings(record=True) as warned:
- result = instance.create(
- location_id=self.LOCATION_ID, serve_nodes=serve_nodes
- )
-
- cluster_pb = Cluster(
- location=instance_api.location_path(self.PROJECT, self.LOCATION_ID),
- serve_nodes=serve_nodes,
- default_storage_type=enums.StorageType.UNSPECIFIED,
- )
- instance_pb = Instance(
- display_name=self.DISPLAY_NAME,
- type_=enums.Instance.Type.PRODUCTION,
- labels=self.LABELS,
- )
- cluster_id = "{}-cluster".format(self.INSTANCE_ID)
- instance_api.create_instance.assert_called_once_with(
- request={
- "parent": instance_api.project_path(self.PROJECT),
- "instance_id": self.INSTANCE_ID,
- "instance": instance_pb,
- "clusters": {cluster_id: cluster_pb},
- }
- )
-
- self.assertEqual(len(warned), 1)
- self.assertIs(warned[0].category, DeprecationWarning)
-
- self.assertIs(result, response)
-
- def test_create_w_clusters(self):
- from google.cloud.bigtable import enums
- from google.cloud.bigtable.cluster import Cluster
- from google.cloud.bigtable_admin_v2.types import Cluster as cluster_pb
- from google.cloud.bigtable_admin_v2.types import Instance as instance_pb
-
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = self._make_one(
- self.INSTANCE_ID,
- client,
- self.DISPLAY_NAME,
- enums.Instance.Type.PRODUCTION,
- self.LABELS,
- )
- instance_api, response = self._instance_api_response_for_create()
- instance_api.common_project_path.return_value = "projects/project"
- client._instance_admin_client = instance_api
-
- # Perform the method and check the result.
- cluster_id_1 = "cluster-1"
- cluster_id_2 = "cluster-2"
- location_id_1 = "location-id-1"
- location_id_2 = "location-id-2"
- serve_nodes_1 = 3
- serve_nodes_2 = 5
- clusters = [
- Cluster(
- cluster_id_1,
- instance,
- location_id=location_id_1,
- serve_nodes=serve_nodes_1,
- ),
- Cluster(
- cluster_id_2,
- instance,
- location_id=location_id_2,
- serve_nodes=serve_nodes_2,
- ),
- ]
-
- result = instance.create(clusters=clusters)
-
- cluster_pb_1 = cluster_pb(
- location=instance_api.location_path(self.PROJECT, location_id_1),
+ response = operation.from_gapic(
+ response_pb,
+ mock.Mock(),
+ instance.Instance,
+ metadata_type=messages_v2_pb2.CreateInstanceMetadata,
+ )
+ project_path_template = "projects/{}"
+ location_path_template = "projects/{}/locations/{}"
+ api = _make_instance_admin_api()
+ api.create_instance.return_value = response
+ api.project_path = project_path_template.format
+ api.location_path = location_path_template.format
+ api.common_location_path = location_path_template.format
+ return api, response
+
+
+def test_instance_create():
+ from google.cloud.bigtable import enums
+ from google.cloud.bigtable_admin_v2.types import Instance
+ from google.cloud.bigtable_admin_v2.types import Cluster
+ import warnings
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = _make_instance(
+ INSTANCE_ID, client, DISPLAY_NAME, enums.Instance.Type.PRODUCTION, LABELS,
+ )
+ api, response = _instance_api_response_for_create()
+ client._instance_admin_client = api
+ api.common_project_path.return_value = "projects/project"
+ serve_nodes = 3
+
+ with warnings.catch_warnings(record=True) as warned:
+ result = instance.create(location_id=LOCATION_ID, serve_nodes=serve_nodes)
+
+ assert result is response
+
+ cluster_pb = Cluster(
+ location=api.location_path(PROJECT, LOCATION_ID),
+ serve_nodes=serve_nodes,
+ default_storage_type=enums.StorageType.UNSPECIFIED,
+ )
+ instance_pb = Instance(
+ display_name=DISPLAY_NAME, type_=enums.Instance.Type.PRODUCTION, labels=LABELS,
+ )
+ cluster_id = "{}-cluster".format(INSTANCE_ID)
+ api.create_instance.assert_called_once_with(
+ request={
+ "parent": api.project_path(PROJECT),
+ "instance_id": INSTANCE_ID,
+ "instance": instance_pb,
+ "clusters": {cluster_id: cluster_pb},
+ }
+ )
+
+ assert len(warned) == 1
+ assert warned[0].category is DeprecationWarning
+
+
+def test_instance_create_w_clusters():
+ from google.cloud.bigtable import enums
+ from google.cloud.bigtable.cluster import Cluster
+ from google.cloud.bigtable_admin_v2.types import Cluster as cluster_pb
+ from google.cloud.bigtable_admin_v2.types import Instance as instance_pb
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = _make_instance(
+ INSTANCE_ID, client, DISPLAY_NAME, enums.Instance.Type.PRODUCTION, LABELS,
+ )
+ api, response = _instance_api_response_for_create()
+ client._instance_admin_client = api
+ api.common_project_path.return_value = "projects/project"
+ cluster_id_1 = "cluster-1"
+ cluster_id_2 = "cluster-2"
+ location_id_1 = "location-id-1"
+ location_id_2 = "location-id-2"
+ serve_nodes_1 = 3
+ serve_nodes_2 = 5
+ clusters = [
+ Cluster(
+ cluster_id_1,
+ instance,
+ location_id=location_id_1,
serve_nodes=serve_nodes_1,
- default_storage_type=enums.StorageType.UNSPECIFIED,
- )
- cluster_pb_2 = cluster_pb(
- location=instance_api.location_path(self.PROJECT, location_id_2),
+ ),
+ Cluster(
+ cluster_id_2,
+ instance,
+ location_id=location_id_2,
serve_nodes=serve_nodes_2,
- default_storage_type=enums.StorageType.UNSPECIFIED,
- )
- instance_pb = instance_pb(
- display_name=self.DISPLAY_NAME,
- type_=enums.Instance.Type.PRODUCTION,
- labels=self.LABELS,
- )
- instance_api.create_instance.assert_called_once_with(
- request={
- "parent": instance_api.project_path(self.PROJECT),
- "instance_id": self.INSTANCE_ID,
- "instance": instance_pb,
- "clusters": {cluster_id_1: cluster_pb_1, cluster_id_2: cluster_pb_2},
- }
- )
-
- self.assertIs(result, response)
-
- def test_exists(self):
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
- from google.api_core import exceptions
-
- api = mock.create_autospec(BigtableInstanceAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
-
- # Create response_pb
- instance_name = client.instance_admin_client.instance_path(
- self.PROJECT, self.INSTANCE_ID
- )
- response_pb = data_v2_pb2.Instance(name=instance_name)
-
- # Patch the stub used by the API method.
- client._instance_admin_client = api
- instance_admin_stub = client._instance_admin_client
-
- instance_admin_stub.get_instance.side_effect = [
- response_pb,
- exceptions.NotFound("testing"),
- exceptions.BadRequest("testing"),
- ]
-
- # Perform the method and check the result.
- non_existing_instance_id = "instance-id-2"
- alt_instance_1 = self._make_one(self.INSTANCE_ID, client)
- alt_instance_2 = self._make_one(non_existing_instance_id, client)
- self.assertTrue(alt_instance_1.exists())
- self.assertFalse(alt_instance_2.exists())
-
- with self.assertRaises(exceptions.BadRequest):
- alt_instance_2.exists()
-
- def test_reload(self):
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.cloud.bigtable import enums
-
- api = mock.create_autospec(BigtableInstanceAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = self._make_one(self.INSTANCE_ID, client)
-
- # Create response_pb
- DISPLAY_NAME = u"hey-hi-hello"
- instance_type = enums.Instance.Type.PRODUCTION
- response_pb = data_v2_pb2.Instance(
- display_name=DISPLAY_NAME, type_=instance_type, labels=self.LABELS
- )
-
- # Patch the stub used by the API method.
- client._instance_admin_client = api
- bigtable_instance_stub = client._instance_admin_client
- bigtable_instance_stub.get_instance.side_effect = [response_pb]
-
- # Create expected_result.
- expected_result = None # reload() has no return value.
-
- # Check Instance optional config values before.
- self.assertEqual(instance.display_name, self.INSTANCE_ID)
-
- # Perform the method and check the result.
- result = instance.reload()
- self.assertEqual(result, expected_result)
-
- # Check Instance optional config values before.
- self.assertEqual(instance.display_name, DISPLAY_NAME)
-
- def _instance_api_response_for_update(self):
- import datetime
- from google.api_core import operation
- from google.longrunning import operations_pb2
- from google.protobuf.any_pb2 import Any
- from google.cloud._helpers import _datetime_to_pb_timestamp
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.cloud.bigtable_admin_v2.types import (
- bigtable_instance_admin as messages_v2_pb2,
- )
- from google.cloud.bigtable_admin_v2.types import instance
-
- NOW = datetime.datetime.utcnow()
- NOW_PB = _datetime_to_pb_timestamp(NOW)
- metadata = messages_v2_pb2.UpdateInstanceMetadata(request_time=NOW_PB)
- type_url = "type.googleapis.com/{}".format(
- messages_v2_pb2.UpdateInstanceMetadata._meta._pb.DESCRIPTOR.full_name
- )
- response_pb = operations_pb2.Operation(
- name=self.OP_NAME,
- metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()),
- )
- response = operation.from_gapic(
- response_pb,
- mock.Mock(),
- instance.Instance,
- metadata_type=messages_v2_pb2.UpdateInstanceMetadata,
- )
- instance_path_template = "projects/{project}/instances/{instance}"
- instance_api = mock.create_autospec(BigtableInstanceAdminClient)
- instance_api.partial_update_instance.return_value = response
- instance_api.instance_path = instance_path_template.format
- return instance_api, response
-
- def test_update(self):
- from google.cloud.bigtable import enums
- from google.protobuf import field_mask_pb2
- from google.cloud.bigtable_admin_v2.types import Instance
-
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = self._make_one(
- self.INSTANCE_ID,
- client,
- display_name=self.DISPLAY_NAME,
- instance_type=enums.Instance.Type.DEVELOPMENT,
- labels=self.LABELS,
- )
- instance_api, response = self._instance_api_response_for_update()
- client._instance_admin_client = instance_api
-
- result = instance.update()
-
- instance_pb = Instance(
- name=instance.name,
- display_name=instance.display_name,
- type_=instance.type_,
- labels=instance.labels,
- )
- update_mask_pb = field_mask_pb2.FieldMask(
- paths=["display_name", "type", "labels"]
- )
-
- instance_api.partial_update_instance.assert_called_once_with(
- request={"instance": instance_pb, "update_mask": update_mask_pb}
- )
-
- self.assertIs(result, response)
-
- def test_update_empty(self):
- from google.protobuf import field_mask_pb2
- from google.cloud.bigtable_admin_v2.types import Instance
-
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = self._make_one(None, client)
- instance_api, response = self._instance_api_response_for_update()
- client._instance_admin_client = instance_api
-
- result = instance.update()
-
- instance_pb = Instance(
- name=instance.name,
- display_name=instance.display_name,
- type_=instance.type_,
- labels=instance.labels,
- )
- update_mask_pb = field_mask_pb2.FieldMask()
-
- instance_api.partial_update_instance.assert_called_once_with(
- request={"instance": instance_pb, "update_mask": update_mask_pb}
- )
-
- self.assertIs(result, response)
-
- def test_delete(self):
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
-
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = self._make_one(self.INSTANCE_ID, client)
- instance_api = mock.create_autospec(BigtableInstanceAdminClient)
- instance_api.delete_instance.return_value = None
- client._instance_admin_client = instance_api
-
- result = instance.delete()
-
- instance_api.delete_instance.assert_called_once_with(
- request={"name": instance.name}
- )
-
- self.assertIsNone(result)
-
- def test_get_iam_policy(self):
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.iam.v1 import policy_pb2
- from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
-
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = self._make_one(self.INSTANCE_ID, client)
-
- version = 1
- etag = b"etag_v1"
- members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
- bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}]
- iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
-
- # Patch the stub used by the API method.
- instance_api = mock.create_autospec(BigtableInstanceAdminClient)
- client._instance_admin_client = instance_api
- instance_api.get_iam_policy.return_value = iam_policy
-
- # Perform the method and check the result.
- result = instance.get_iam_policy()
-
- instance_api.get_iam_policy.assert_called_once_with(
- request={"resource": instance.name}
- )
- self.assertEqual(result.version, version)
- self.assertEqual(result.etag, etag)
- admins = result.bigtable_admins
- self.assertEqual(len(admins), len(members))
- for found, expected in zip(sorted(admins), sorted(members)):
- self.assertEqual(found, expected)
-
- def test_get_iam_policy_w_requested_policy_version(self):
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.iam.v1 import policy_pb2, options_pb2
- from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
-
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = self._make_one(self.INSTANCE_ID, client)
-
- version = 1
- etag = b"etag_v1"
- members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
- bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}]
- iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
-
- # Patch the stub used by the API method.
- instance_api = mock.create_autospec(BigtableInstanceAdminClient)
- client._instance_admin_client = instance_api
- instance_api.get_iam_policy.return_value = iam_policy
-
- # Perform the method and check the result.
- result = instance.get_iam_policy(requested_policy_version=3)
-
- instance_api.get_iam_policy.assert_called_once_with(
- request={
- "resource": instance.name,
- "options_": options_pb2.GetPolicyOptions(requested_policy_version=3),
- }
- )
- self.assertEqual(result.version, version)
- self.assertEqual(result.etag, etag)
- admins = result.bigtable_admins
- self.assertEqual(len(admins), len(members))
- for found, expected in zip(sorted(admins), sorted(members)):
- self.assertEqual(found, expected)
-
- def test_set_iam_policy(self):
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.iam.v1 import policy_pb2
- from google.cloud.bigtable.policy import Policy
- from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
-
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = self._make_one(self.INSTANCE_ID, client)
-
- version = 1
- etag = b"etag_v1"
- members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
- bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}]
- iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
-
- # Patch the stub used by the API method.
- instance_api = mock.create_autospec(BigtableInstanceAdminClient)
- instance_api.set_iam_policy.return_value = iam_policy_pb
- client._instance_admin_client = instance_api
-
- # Perform the method and check the result.
- iam_policy = Policy(etag=etag, version=version)
- iam_policy[BIGTABLE_ADMIN_ROLE] = [
- Policy.user("user1@test.com"),
- Policy.service_account("service_acc1@test.com"),
- ]
-
- result = instance.set_iam_policy(iam_policy)
-
- instance_api.set_iam_policy.assert_called_once_with(
- request={"resource": instance.name, "policy": iam_policy_pb}
- )
- self.assertEqual(result.version, version)
- self.assertEqual(result.etag, etag)
- admins = result.bigtable_admins
- self.assertEqual(len(admins), len(members))
- for found, expected in zip(sorted(admins), sorted(members)):
- self.assertEqual(found, expected)
-
- def test_test_iam_permissions(self):
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.iam.v1 import iam_policy_pb2
-
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = self._make_one(self.INSTANCE_ID, client)
-
- permissions = ["bigtable.tables.create", "bigtable.clusters.create"]
-
- response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions)
-
- instance_api = mock.create_autospec(BigtableInstanceAdminClient)
- instance_api.test_iam_permissions.return_value = response
- client._instance_admin_client = instance_api
-
- result = instance.test_iam_permissions(permissions)
-
- self.assertEqual(result, permissions)
- instance_api.test_iam_permissions.assert_called_once_with(
- request={"resource": instance.name, "permissions": permissions}
- )
-
- def test_cluster_factory(self):
- from google.cloud.bigtable import enums
-
- CLUSTER_ID = "{}-cluster".format(self.INSTANCE_ID)
- LOCATION_ID = "us-central1-c"
- SERVE_NODES = 3
- STORAGE_TYPE = enums.StorageType.HDD
-
- instance = self._make_one(self.INSTANCE_ID, None)
-
- cluster = instance.cluster(
- CLUSTER_ID,
- location_id=LOCATION_ID,
- serve_nodes=SERVE_NODES,
- default_storage_type=STORAGE_TYPE,
- )
- self.assertIsInstance(cluster, Cluster)
- self.assertEqual(cluster.cluster_id, CLUSTER_ID)
- self.assertEqual(cluster.location_id, LOCATION_ID)
- self.assertIsNone(cluster._state)
- self.assertEqual(cluster.serve_nodes, SERVE_NODES)
- self.assertEqual(cluster.default_storage_type, STORAGE_TYPE)
-
- def test_list_clusters(self):
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.cloud.bigtable_admin_v2.types import (
- bigtable_instance_admin as messages_v2_pb2,
- )
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
- from google.cloud.bigtable.instance import Instance
- from google.cloud.bigtable.instance import Cluster
-
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = Instance(self.INSTANCE_ID, client)
-
- failed_location = "FAILED"
- cluster_id1 = "cluster-id1"
- cluster_id2 = "cluster-id2"
- cluster_path_template = "projects/{}/instances/{}/clusters/{}"
- cluster_name1 = cluster_path_template.format(
- self.PROJECT, self.INSTANCE_ID, cluster_id1
- )
- cluster_name2 = cluster_path_template.format(
- self.PROJECT, self.INSTANCE_ID, cluster_id2
- )
-
- # Create response_pb
- response_pb = messages_v2_pb2.ListClustersResponse(
- failed_locations=[failed_location],
- clusters=[
- data_v2_pb2.Cluster(name=cluster_name1),
- data_v2_pb2.Cluster(name=cluster_name2),
- ],
- )
-
- # Patch the stub used by the API method.
- instance_api = mock.create_autospec(BigtableInstanceAdminClient)
- instance_api.list_clusters.side_effect = [response_pb]
- instance_api.cluster_path = cluster_path_template.format
- client._instance_admin_client = instance_api
-
- # Perform the method and check the result.
- clusters, failed_locations = instance.list_clusters()
-
- cluster_1, cluster_2 = clusters
-
- self.assertIsInstance(cluster_1, Cluster)
- self.assertEqual(cluster_1.name, cluster_name1)
-
- self.assertIsInstance(cluster_2, Cluster)
- self.assertEqual(cluster_2.name, cluster_name2)
-
- self.assertEqual(failed_locations, [failed_location])
-
- def test_table_factory(self):
- from google.cloud.bigtable.table import Table
-
- app_profile_id = "appProfileId1262094415"
- instance = self._make_one(self.INSTANCE_ID, None)
-
- table = instance.table(self.TABLE_ID, app_profile_id=app_profile_id)
- self.assertIsInstance(table, Table)
- self.assertEqual(table.table_id, self.TABLE_ID)
- self.assertEqual(table._instance, instance)
- self.assertEqual(table._app_profile_id, app_profile_id)
-
- def _list_tables_helper(self, table_name=None):
- from google.cloud.bigtable_admin_v2.types import table as table_data_v2_pb2
- from google.cloud.bigtable_admin_v2.types import (
- bigtable_table_admin as table_messages_v1_pb2,
- )
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- BigtableTableAdminClient,
- )
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
-
- table_api = mock.create_autospec(BigtableTableAdminClient)
- instance_api = mock.create_autospec(BigtableInstanceAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = self._make_one(self.INSTANCE_ID, client)
-
- instance_api.instance_path.return_value = instance.name
- # Create response_pb
- if table_name is None:
- table_name = self.TABLE_NAME
-
- response_pb = table_messages_v1_pb2.ListTablesResponse(
- tables=[table_data_v2_pb2.Table(name=table_name)]
- )
-
- # Patch the stub used by the API method.
- client._table_admin_client = table_api
- client._instance_admin_client = instance_api
- bigtable_table_stub = client._table_admin_client
- bigtable_table_stub.list_tables.side_effect = [response_pb]
-
- # Create expected_result.
- expected_table = instance.table(self.TABLE_ID)
- expected_result = [expected_table]
-
- # Perform the method and check the result.
- result = instance.list_tables()
-
- self.assertEqual(result, expected_result)
-
- def test_list_tables(self):
- self._list_tables_helper()
-
- def test_list_tables_failure_bad_split(self):
- with self.assertRaises(ValueError):
- self._list_tables_helper(table_name="wrong-format")
-
- def test_list_tables_failure_name_bad_before(self):
- BAD_TABLE_NAME = (
- "nonempty-section-before"
- + "projects/"
- + self.PROJECT
- + "/instances/"
- + self.INSTANCE_ID
- + "/tables/"
- + self.TABLE_ID
- )
- with self.assertRaises(ValueError):
- self._list_tables_helper(table_name=BAD_TABLE_NAME)
-
- def test_app_profile_factory(self):
- from google.cloud.bigtable.enums import RoutingPolicyType
-
- APP_PROFILE_ID_1 = "app-profile-id-1"
- ANY = RoutingPolicyType.ANY
- DESCRIPTION_1 = "routing policy any"
- APP_PROFILE_ID_2 = "app-profile-id-2"
- SINGLE = RoutingPolicyType.SINGLE
- DESCRIPTION_2 = "routing policy single"
- ALLOW_WRITES = True
- CLUSTER_ID = "cluster-id"
-
- instance = self._make_one(self.INSTANCE_ID, None)
-
- app_profile1 = instance.app_profile(
- APP_PROFILE_ID_1, routing_policy_type=ANY, description=DESCRIPTION_1
- )
-
- app_profile2 = instance.app_profile(
- APP_PROFILE_ID_2,
- routing_policy_type=SINGLE,
- description=DESCRIPTION_2,
- cluster_id=CLUSTER_ID,
- allow_transactional_writes=ALLOW_WRITES,
- )
- self.assertEqual(app_profile1.app_profile_id, APP_PROFILE_ID_1)
- self.assertIs(app_profile1._instance, instance)
- self.assertEqual(app_profile1.routing_policy_type, ANY)
- self.assertEqual(app_profile1.description, DESCRIPTION_1)
- self.assertEqual(app_profile2.app_profile_id, APP_PROFILE_ID_2)
- self.assertIs(app_profile2._instance, instance)
- self.assertEqual(app_profile2.routing_policy_type, SINGLE)
- self.assertEqual(app_profile2.description, DESCRIPTION_2)
- self.assertEqual(app_profile2.cluster_id, CLUSTER_ID)
- self.assertEqual(app_profile2.allow_transactional_writes, ALLOW_WRITES)
-
- def test_list_app_profiles(self):
- from google.api_core.page_iterator import Iterator
- from google.api_core.page_iterator import Page
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
- from google.cloud.bigtable.app_profile import AppProfile
-
- class _Iterator(Iterator):
- def __init__(self, pages):
- super(_Iterator, self).__init__(client=None)
- self._pages = pages
-
- def _next_page(self):
- if self._pages:
- page, self._pages = self._pages[0], self._pages[1:]
- return Page(self, page, self.item_to_value)
-
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT, credentials=credentials, admin=True
- )
- instance = self._make_one(self.INSTANCE_ID, client)
-
- # Setup Expected Response
- app_profile_path_template = "projects/{}/instances/{}/appProfiles/{}"
- app_profile_id1 = "app-profile-id1"
- app_profile_id2 = "app-profile-id2"
- app_profile_name1 = app_profile_path_template.format(
- self.PROJECT, self.INSTANCE_ID, app_profile_id1
- )
- app_profile_name2 = app_profile_path_template.format(
- self.PROJECT, self.INSTANCE_ID, app_profile_id2
- )
- routing_policy = data_v2_pb2.AppProfile.MultiClusterRoutingUseAny()
-
- app_profiles = [
- data_v2_pb2.AppProfile(
- name=app_profile_name1, multi_cluster_routing_use_any=routing_policy
- ),
- data_v2_pb2.AppProfile(
- name=app_profile_name2, multi_cluster_routing_use_any=routing_policy
- ),
- ]
- iterator = _Iterator(pages=[app_profiles])
-
- # Patch the stub used by the API method.
- instance_api = mock.create_autospec(BigtableInstanceAdminClient)
- client._instance_admin_client = instance_api
- instance_api.app_profile_path = app_profile_path_template.format
- instance_api.list_app_profiles.return_value = iterator
-
- # Perform the method and check the result.
- app_profiles = instance.list_app_profiles()
-
- app_profile_1, app_profile_2 = app_profiles
-
- self.assertIsInstance(app_profile_1, AppProfile)
- self.assertEqual(app_profile_1.name, app_profile_name1)
-
- self.assertIsInstance(app_profile_2, AppProfile)
- self.assertEqual(app_profile_2.name, app_profile_name2)
+ ),
+ ]
+
+ result = instance.create(clusters=clusters)
+
+ assert result is response
+
+ cluster_pb_1 = cluster_pb(
+ location=api.location_path(PROJECT, location_id_1),
+ serve_nodes=serve_nodes_1,
+ default_storage_type=enums.StorageType.UNSPECIFIED,
+ )
+ cluster_pb_2 = cluster_pb(
+ location=api.location_path(PROJECT, location_id_2),
+ serve_nodes=serve_nodes_2,
+ default_storage_type=enums.StorageType.UNSPECIFIED,
+ )
+ instance_pb = instance_pb(
+ display_name=DISPLAY_NAME, type_=enums.Instance.Type.PRODUCTION, labels=LABELS,
+ )
+ api.create_instance.assert_called_once_with(
+ request={
+ "parent": api.project_path(PROJECT),
+ "instance_id": INSTANCE_ID,
+ "instance": instance_pb,
+ "clusters": {cluster_id_1: cluster_pb_1, cluster_id_2: cluster_pb_2},
+ }
+ )
+
+
+def test_instance_exists_hit():
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ response_pb = data_v2_pb2.Instance(name=INSTANCE_NAME)
+ api = client._instance_admin_client = _make_instance_admin_api()
+ api.instance_path.return_value = INSTANCE_NAME
+ api.get_instance.return_value = response_pb
+ instance = _make_instance(INSTANCE_ID, client)
+
+ assert instance.exists()
+
+ api.get_instance.assert_called_once_with(request={"name": INSTANCE_NAME})
+
+
+def test_instance_exists_miss():
+ from google.api_core import exceptions
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+
+ api = client._instance_admin_client = _make_instance_admin_api()
+ api.instance_path.return_value = INSTANCE_NAME
+ api.get_instance.side_effect = exceptions.NotFound("testing")
+
+ non_existing_instance_id = "instance-id-2"
+ instance = _make_instance(non_existing_instance_id, client)
+
+ assert not instance.exists()
+
+ api.get_instance.assert_called_once_with(request={"name": INSTANCE_NAME})
+
+
+def test_instance_exists_w_error():
+ from google.api_core import exceptions
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+
+ api = client._instance_admin_client = _make_instance_admin_api()
+ api.instance_path.return_value = INSTANCE_NAME
+ api.get_instance.side_effect = exceptions.BadRequest("testing")
+ instance = _make_instance(INSTANCE_ID, client)
+
+ with pytest.raises(exceptions.BadRequest):
+ instance.exists()
+
+ api.get_instance.assert_called_once_with(request={"name": INSTANCE_NAME})
+
+
+def test_instance_reload():
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable import enums
+
+ DISPLAY_NAME = u"hey-hi-hello"
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = _make_instance(INSTANCE_ID, client)
+ response_pb = data_v2_pb2.Instance(
+ display_name=DISPLAY_NAME, type_=enums.Instance.Type.PRODUCTION, labels=LABELS
+ )
+ api = client._instance_admin_client = _make_instance_admin_api()
+ api.get_instance.side_effect = [response_pb]
+ assert instance.display_name == INSTANCE_ID
+
+ result = instance.reload()
+
+ assert result is None
+ assert instance.display_name == DISPLAY_NAME
+
+
+def _instance_api_response_for_update():
+ import datetime
+ from google.api_core import operation
+ from google.longrunning import operations_pb2
+ from google.protobuf.any_pb2 import Any
+ from google.cloud._helpers import _datetime_to_pb_timestamp
+ from google.cloud.bigtable_admin_v2.types import (
+ bigtable_instance_admin as messages_v2_pb2,
+ )
+ from google.cloud.bigtable_admin_v2.types import instance
+
+ NOW = datetime.datetime.utcnow()
+ NOW_PB = _datetime_to_pb_timestamp(NOW)
+ metadata = messages_v2_pb2.UpdateInstanceMetadata(request_time=NOW_PB)
+ type_url = "type.googleapis.com/{}".format(
+ messages_v2_pb2.UpdateInstanceMetadata._meta._pb.DESCRIPTOR.full_name
+ )
+ response_pb = operations_pb2.Operation(
+ name=OP_NAME,
+ metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()),
+ )
+ response = operation.from_gapic(
+ response_pb,
+ mock.Mock(),
+ instance.Instance,
+ metadata_type=messages_v2_pb2.UpdateInstanceMetadata,
+ )
+ instance_path_template = "projects/{project}/instances/{instance}"
+ api = _make_instance_admin_api()
+ api.partial_update_instance.return_value = response
+ api.instance_path = instance_path_template.format
+ return api, response
+
+
+def test_instance_update():
+ from google.cloud.bigtable import enums
+ from google.protobuf import field_mask_pb2
+ from google.cloud.bigtable_admin_v2.types import Instance
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = _make_instance(
+ INSTANCE_ID,
+ client,
+ display_name=DISPLAY_NAME,
+ instance_type=enums.Instance.Type.DEVELOPMENT,
+ labels=LABELS,
+ )
+ api, response = _instance_api_response_for_update()
+ client._instance_admin_client = api
+
+ result = instance.update()
+
+ assert result is response
+
+ instance_pb = Instance(
+ name=instance.name,
+ display_name=instance.display_name,
+ type_=instance.type_,
+ labels=instance.labels,
+ )
+ update_mask_pb = field_mask_pb2.FieldMask(paths=["display_name", "type", "labels"])
+
+ api.partial_update_instance.assert_called_once_with(
+ request={"instance": instance_pb, "update_mask": update_mask_pb}
+ )
+
+
+def test_instance_update_empty():
+ from google.protobuf import field_mask_pb2
+ from google.cloud.bigtable_admin_v2.types import Instance
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = _make_instance(None, client)
+ api, response = _instance_api_response_for_update()
+ client._instance_admin_client = api
+
+ result = instance.update()
+
+ assert result is response
+
+ instance_pb = Instance(
+ name=instance.name,
+ display_name=instance.display_name,
+ type_=instance.type_,
+ labels=instance.labels,
+ )
+ update_mask_pb = field_mask_pb2.FieldMask()
+
+ api.partial_update_instance.assert_called_once_with(
+ request={"instance": instance_pb, "update_mask": update_mask_pb}
+ )
+
+
+def test_instance_delete():
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = _make_instance(INSTANCE_ID, client)
+ api = client._instance_admin_client = _make_instance_admin_api()
+ api.delete_instance.return_value = None
+
+ result = instance.delete()
+
+ assert result is None
+
+ api.delete_instance.assert_called_once_with(request={"name": instance.name})
+
+
+def test_instance_get_iam_policy():
+ from google.iam.v1 import policy_pb2
+ from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = _make_instance(INSTANCE_ID, client)
+
+ version = 1
+ etag = b"etag_v1"
+ members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
+ bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}]
+ iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
+ api = client._instance_admin_client = _make_instance_admin_api()
+ api.get_iam_policy.return_value = iam_policy
+
+ result = instance.get_iam_policy()
+
+ assert result.version == version
+ assert result.etag == etag
+ admins = result.bigtable_admins
+ assert len(admins) == len(members)
+
+ for found, expected in zip(sorted(admins), sorted(members)):
+ assert found == expected
+ api.get_iam_policy.assert_called_once_with(request={"resource": instance.name})
+
+
+def test_instance_get_iam_policy_w_requested_policy_version():
+ from google.iam.v1 import policy_pb2, options_pb2
+ from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = _make_instance(INSTANCE_ID, client)
+
+ version = 1
+ etag = b"etag_v1"
+ members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
+ bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}]
+ iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
+
+ api = client._instance_admin_client = _make_instance_admin_api()
+ api.get_iam_policy.return_value = iam_policy
+
+ result = instance.get_iam_policy(requested_policy_version=3)
+
+ assert result.version == version
+ assert result.etag == etag
+ admins = result.bigtable_admins
+ assert len(admins) == len(members)
+ for found, expected in zip(sorted(admins), sorted(members)):
+ assert found == expected
+
+ api.get_iam_policy.assert_called_once_with(
+ request={
+ "resource": instance.name,
+ "options_": options_pb2.GetPolicyOptions(requested_policy_version=3),
+ }
+ )
+
+
+def test_instance_set_iam_policy():
+ from google.iam.v1 import policy_pb2
+ from google.cloud.bigtable.policy import Policy
+ from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = _make_instance(INSTANCE_ID, client)
+
+ version = 1
+ etag = b"etag_v1"
+ members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
+ bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}]
+ iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
+
+ api = client._instance_admin_client = _make_instance_admin_api()
+ api.set_iam_policy.return_value = iam_policy_pb
+ iam_policy = Policy(etag=etag, version=version)
+ iam_policy[BIGTABLE_ADMIN_ROLE] = [
+ Policy.user("user1@test.com"),
+ Policy.service_account("service_acc1@test.com"),
+ ]
+
+ result = instance.set_iam_policy(iam_policy)
+
+ api.set_iam_policy.assert_called_once_with(
+ request={"resource": instance.name, "policy": iam_policy_pb}
+ )
+ assert result.version == version
+ assert result.etag == etag
+ admins = result.bigtable_admins
+ assert len(admins) == len(members)
+ for found, expected in zip(sorted(admins), sorted(members)):
+ assert found == expected
+
+
+def test_instance_test_iam_permissions():
+ from google.iam.v1 import iam_policy_pb2
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = _make_instance(INSTANCE_ID, client)
+
+ permissions = ["bigtable.tables.create", "bigtable.clusters.create"]
+
+ response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions)
+ api = client._instance_admin_client = _make_instance_admin_api()
+ api.test_iam_permissions.return_value = response
+
+ result = instance.test_iam_permissions(permissions)
+
+ assert result == permissions
+ api.test_iam_permissions.assert_called_once_with(
+ request={"resource": instance.name, "permissions": permissions}
+ )
+
+
+def test_instance_cluster_factory():
+ from google.cloud.bigtable import enums
+
+ CLUSTER_ID = "{}-cluster".format(INSTANCE_ID)
+ LOCATION_ID = "us-central1-c"
+ SERVE_NODES = 3
+ STORAGE_TYPE = enums.StorageType.HDD
+
+ instance = _make_instance(INSTANCE_ID, None)
+
+ cluster = instance.cluster(
+ CLUSTER_ID,
+ location_id=LOCATION_ID,
+ serve_nodes=SERVE_NODES,
+ default_storage_type=STORAGE_TYPE,
+ )
+ assert isinstance(cluster, Cluster)
+ assert cluster.cluster_id == CLUSTER_ID
+ assert cluster.location_id == LOCATION_ID
+ assert cluster._state is None
+ assert cluster.serve_nodes == SERVE_NODES
+ assert cluster.default_storage_type == STORAGE_TYPE
+
+
+def test_instance_list_clusters():
+ from google.cloud.bigtable_admin_v2.types import (
+ bigtable_instance_admin as messages_v2_pb2,
+ )
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable.instance import Instance
+ from google.cloud.bigtable.instance import Cluster
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = Instance(INSTANCE_ID, client)
+
+ failed_location = "FAILED"
+ cluster_id1 = "cluster-id1"
+ cluster_id2 = "cluster-id2"
+ cluster_path_template = "projects/{}/instances/{}/clusters/{}"
+ cluster_name1 = cluster_path_template.format(PROJECT, INSTANCE_ID, cluster_id1)
+ cluster_name2 = cluster_path_template.format(PROJECT, INSTANCE_ID, cluster_id2)
+ response_pb = messages_v2_pb2.ListClustersResponse(
+ failed_locations=[failed_location],
+ clusters=[
+ data_v2_pb2.Cluster(name=cluster_name1),
+ data_v2_pb2.Cluster(name=cluster_name2),
+ ],
+ )
+ api = client._instance_admin_client = _make_instance_admin_api()
+ api.list_clusters.side_effect = [response_pb]
+ api.cluster_path = cluster_path_template.format
+
+ # Perform the method and check the result.
+ clusters, failed_locations = instance.list_clusters()
+
+ cluster_1, cluster_2 = clusters
+
+ assert isinstance(cluster_1, Cluster)
+ assert cluster_1.name == cluster_name1
+
+ assert isinstance(cluster_2, Cluster)
+ assert cluster_2.name == cluster_name2
+
+ assert failed_locations == [failed_location]
+
+
+def test_instance_table_factory():
+ from google.cloud.bigtable.table import Table
+
+ app_profile_id = "appProfileId1262094415"
+ instance = _make_instance(INSTANCE_ID, None)
+
+ table = instance.table(TABLE_ID, app_profile_id=app_profile_id)
+ assert isinstance(table, Table)
+ assert table.table_id == TABLE_ID
+ assert table._instance == instance
+ assert table._app_profile_id == app_profile_id
+
+
+def _list_tables_helper(table_name=None):
+ from google.cloud.bigtable_admin_v2.types import table as table_data_v2_pb2
+ from google.cloud.bigtable_admin_v2.types import (
+ bigtable_table_admin as table_messages_v1_pb2,
+ )
+ from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
+ BigtableTableAdminClient,
+ )
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = _make_instance(INSTANCE_ID, client)
+
+ instance_api = client._instance_admin_client = _make_instance_admin_api()
+ instance_api.instance_path.return_value = "projects/project/instances/instance-id"
+ table_api = client._table_admin_client = mock.create_autospec(
+ BigtableTableAdminClient
+ )
+ if table_name is None:
+ table_name = TABLE_NAME
+
+ response_pb = table_messages_v1_pb2.ListTablesResponse(
+ tables=[table_data_v2_pb2.Table(name=table_name)]
+ )
+
+ table_api.list_tables.side_effect = [response_pb]
+
+ result = instance.list_tables()
+
+ expected_table = instance.table(TABLE_ID)
+ assert result == [expected_table]
+
+
+def test_instance_list_tables():
+ _list_tables_helper()
+
+
+def test_instance_list_tables_failure_bad_split():
+ with pytest.raises(ValueError):
+ _list_tables_helper(table_name="wrong-format")
+
+
+def test_instance_list_tables_failure_name_bad_before():
+ BAD_TABLE_NAME = (
+ "nonempty-section-before"
+ + "projects/"
+ + PROJECT
+ + "/instances/"
+ + INSTANCE_ID
+ + "/tables/"
+ + TABLE_ID
+ )
+ with pytest.raises(ValueError):
+ _list_tables_helper(table_name=BAD_TABLE_NAME)
+
+
+def test_instance_app_profile_factory():
+ from google.cloud.bigtable.enums import RoutingPolicyType
+
+ instance = _make_instance(INSTANCE_ID, None)
+
+ app_profile1 = instance.app_profile(
+ APP_PROFILE_ID_1,
+ routing_policy_type=RoutingPolicyType.ANY,
+ description=DESCRIPTION_1,
+ )
+
+ app_profile2 = instance.app_profile(
+ APP_PROFILE_ID_2,
+ routing_policy_type=RoutingPolicyType.SINGLE,
+ description=DESCRIPTION_2,
+ cluster_id=CLUSTER_ID,
+ allow_transactional_writes=ALLOW_WRITES,
+ )
+ assert app_profile1.app_profile_id == APP_PROFILE_ID_1
+ assert app_profile1._instance is instance
+ assert app_profile1.routing_policy_type == RoutingPolicyType.ANY
+ assert app_profile1.description == DESCRIPTION_1
+ assert app_profile2.app_profile_id == APP_PROFILE_ID_2
+ assert app_profile2._instance is instance
+ assert app_profile2.routing_policy_type == RoutingPolicyType.SINGLE
+ assert app_profile2.description == DESCRIPTION_2
+ assert app_profile2.cluster_id == CLUSTER_ID
+ assert app_profile2.allow_transactional_writes == ALLOW_WRITES
+
+
+def test_instance_list_app_profiles():
+ from google.api_core.page_iterator import Iterator
+ from google.api_core.page_iterator import Page
+ from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2
+ from google.cloud.bigtable.app_profile import AppProfile
+
+ class _Iterator(Iterator):
+ def __init__(self, pages):
+ super(_Iterator, self).__init__(client=None)
+ self._pages = pages
+
+ def _next_page(self):
+ if self._pages:
+ page, self._pages = self._pages[0], self._pages[1:]
+ return Page(self, page, self.item_to_value)
+
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT, credentials=credentials, admin=True)
+ instance = _make_instance(INSTANCE_ID, client)
+
+ # Setup Expected Response
+ app_profile_path_template = "projects/{}/instances/{}/appProfiles/{}"
+ app_profile_id1 = "app-profile-id1"
+ app_profile_id2 = "app-profile-id2"
+ app_profile_name1 = app_profile_path_template.format(
+ PROJECT, INSTANCE_ID, app_profile_id1
+ )
+ app_profile_name2 = app_profile_path_template.format(
+ PROJECT, INSTANCE_ID, app_profile_id2
+ )
+ routing_policy = data_v2_pb2.AppProfile.MultiClusterRoutingUseAny()
+
+ app_profiles = [
+ data_v2_pb2.AppProfile(
+ name=app_profile_name1, multi_cluster_routing_use_any=routing_policy
+ ),
+ data_v2_pb2.AppProfile(
+ name=app_profile_name2, multi_cluster_routing_use_any=routing_policy
+ ),
+ ]
+ iterator = _Iterator(pages=[app_profiles])
+
+ # Patch the stub used by the API method.
+ api = _make_instance_admin_api()
+ client._instance_admin_client = api
+ api.app_profile_path = app_profile_path_template.format
+ api.list_app_profiles.return_value = iterator
+
+ # Perform the method and check the result.
+ app_profiles = instance.list_app_profiles()
+
+ app_profile_1, app_profile_2 = app_profiles
+
+ assert isinstance(app_profile_1, AppProfile)
+ assert app_profile_1.name == app_profile_name1
+
+ assert isinstance(app_profile_2, AppProfile)
+ assert app_profile_2.name == app_profile_name2
diff --git a/tests/unit/test_policy.py b/tests/unit/test_policy.py
index 63f9ba03f..1b1adbed5 100644
--- a/tests/unit/test_policy.py
+++ b/tests/unit/test_policy.py
@@ -12,263 +12,267 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import unittest
-
-
-class TestPolicy(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.policy import Policy
-
- return Policy
-
- def _make_one(self, *args, **kw):
- return self._get_target_class()(*args, **kw)
-
- def test_ctor_defaults(self):
- empty = frozenset()
- policy = self._make_one()
- self.assertIsNone(policy.etag)
- self.assertIsNone(policy.version)
- self.assertEqual(policy.bigtable_admins, empty)
- self.assertEqual(policy.bigtable_readers, empty)
- self.assertEqual(policy.bigtable_users, empty)
- self.assertEqual(policy.bigtable_viewers, empty)
- self.assertEqual(len(policy), 0)
- self.assertEqual(dict(policy), {})
-
- def test_ctor_explicit(self):
- VERSION = 1
- ETAG = b"ETAG"
- empty = frozenset()
- policy = self._make_one(ETAG, VERSION)
- self.assertEqual(policy.etag, ETAG)
- self.assertEqual(policy.version, VERSION)
- self.assertEqual(policy.bigtable_admins, empty)
- self.assertEqual(policy.bigtable_readers, empty)
- self.assertEqual(policy.bigtable_users, empty)
- self.assertEqual(policy.bigtable_viewers, empty)
- self.assertEqual(len(policy), 0)
- self.assertEqual(dict(policy), {})
-
- def test_bigtable_admins_getter(self):
- from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
-
- MEMBER = "user:phred@example.com"
- expected = frozenset([MEMBER])
- policy = self._make_one()
- policy[BIGTABLE_ADMIN_ROLE] = [MEMBER]
- self.assertEqual(policy.bigtable_admins, expected)
-
- def test_bigtable_readers_getter(self):
- from google.cloud.bigtable.policy import BIGTABLE_READER_ROLE
-
- MEMBER = "user:phred@example.com"
- expected = frozenset([MEMBER])
- policy = self._make_one()
- policy[BIGTABLE_READER_ROLE] = [MEMBER]
- self.assertEqual(policy.bigtable_readers, expected)
-
- def test_bigtable_users_getter(self):
- from google.cloud.bigtable.policy import BIGTABLE_USER_ROLE
-
- MEMBER = "user:phred@example.com"
- expected = frozenset([MEMBER])
- policy = self._make_one()
- policy[BIGTABLE_USER_ROLE] = [MEMBER]
- self.assertEqual(policy.bigtable_users, expected)
-
- def test_bigtable_viewers_getter(self):
- from google.cloud.bigtable.policy import BIGTABLE_VIEWER_ROLE
-
- MEMBER = "user:phred@example.com"
- expected = frozenset([MEMBER])
- policy = self._make_one()
- policy[BIGTABLE_VIEWER_ROLE] = [MEMBER]
- self.assertEqual(policy.bigtable_viewers, expected)
-
- def test_from_pb_empty(self):
- from google.iam.v1 import policy_pb2
-
- empty = frozenset()
- message = policy_pb2.Policy()
- klass = self._get_target_class()
- policy = klass.from_pb(message)
- self.assertEqual(policy.etag, b"")
- self.assertEqual(policy.version, 0)
- self.assertEqual(policy.bigtable_admins, empty)
- self.assertEqual(policy.bigtable_readers, empty)
- self.assertEqual(policy.bigtable_users, empty)
- self.assertEqual(policy.bigtable_viewers, empty)
- self.assertEqual(len(policy), 0)
- self.assertEqual(dict(policy), {})
-
- def test_from_pb_non_empty(self):
- from google.iam.v1 import policy_pb2
- from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
-
- ETAG = b"ETAG"
- VERSION = 1
- members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
- empty = frozenset()
- message = policy_pb2.Policy(
- etag=ETAG,
- version=VERSION,
- bindings=[{"role": BIGTABLE_ADMIN_ROLE, "members": members}],
- )
- klass = self._get_target_class()
- policy = klass.from_pb(message)
- self.assertEqual(policy.etag, ETAG)
- self.assertEqual(policy.version, VERSION)
- self.assertEqual(policy.bigtable_admins, set(members))
- self.assertEqual(policy.bigtable_readers, empty)
- self.assertEqual(policy.bigtable_users, empty)
- self.assertEqual(policy.bigtable_viewers, empty)
- self.assertEqual(len(policy), 1)
- self.assertEqual(dict(policy), {BIGTABLE_ADMIN_ROLE: set(members)})
-
- def test_from_pb_with_condition(self):
- import pytest
- from google.iam.v1 import policy_pb2
- from google.api_core.iam import InvalidOperationException, _DICT_ACCESS_MSG
- from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
-
- ETAG = b"ETAG"
- VERSION = 3
- members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
- BINDINGS = [
- {
- "role": BIGTABLE_ADMIN_ROLE,
- "members": members,
- "condition": {
- "title": "request_time",
- "description": "Requests made before 2021-01-01T00:00:00Z",
- "expression": 'request.time < timestamp("2021-01-01T00:00:00Z")',
- },
- }
- ]
- message = policy_pb2.Policy(etag=ETAG, version=VERSION, bindings=BINDINGS,)
- klass = self._get_target_class()
- policy = klass.from_pb(message)
- self.assertEqual(policy.etag, ETAG)
- self.assertEqual(policy.version, VERSION)
- self.assertEqual(policy.bindings[0]["role"], BIGTABLE_ADMIN_ROLE)
- self.assertEqual(policy.bindings[0]["members"], set(members))
- self.assertEqual(policy.bindings[0]["condition"], BINDINGS[0]["condition"])
- with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG):
- policy.bigtable_admins
- with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG):
- policy.bigtable_readers
- with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG):
- policy.bigtable_users
- with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG):
- policy.bigtable_viewers
- with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG):
- len(policy)
-
- def test_to_pb_empty(self):
- from google.iam.v1 import policy_pb2
-
- policy = self._make_one()
- expected = policy_pb2.Policy()
-
- self.assertEqual(policy.to_pb(), expected)
-
- def test_to_pb_explicit(self):
- from google.iam.v1 import policy_pb2
- from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
-
- VERSION = 1
- ETAG = b"ETAG"
- members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
- policy = self._make_one(ETAG, VERSION)
- policy[BIGTABLE_ADMIN_ROLE] = members
- expected = policy_pb2.Policy(
- etag=ETAG,
- version=VERSION,
- bindings=[
- policy_pb2.Binding(role=BIGTABLE_ADMIN_ROLE, members=sorted(members))
- ],
- )
-
- self.assertEqual(policy.to_pb(), expected)
-
- def test_to_pb_with_condition(self):
- from google.iam.v1 import policy_pb2
- from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
-
- VERSION = 3
- ETAG = b"ETAG"
- members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
- condition = {
- "title": "request_time",
- "description": "Requests made before 2021-01-01T00:00:00Z",
- "expression": 'request.time < timestamp("2021-01-01T00:00:00Z")',
+
+def _make_policy(*args, **kw):
+ from google.cloud.bigtable.policy import Policy
+
+ return Policy(*args, **kw)
+
+
+def test_policy_ctor_defaults():
+ empty = frozenset()
+ policy = _make_policy()
+ assert policy.etag is None
+ assert policy.version is None
+ assert policy.bigtable_admins == empty
+ assert policy.bigtable_readers == empty
+ assert policy.bigtable_users == empty
+ assert policy.bigtable_viewers == empty
+ assert len(policy) == 0
+ assert dict(policy) == {}
+
+
+def test_policy_ctor_explicit():
+ VERSION = 1
+ ETAG = b"ETAG"
+ empty = frozenset()
+ policy = _make_policy(ETAG, VERSION)
+ assert policy.etag == ETAG
+ assert policy.version == VERSION
+ assert policy.bigtable_admins == empty
+ assert policy.bigtable_readers == empty
+ assert policy.bigtable_users == empty
+ assert policy.bigtable_viewers == empty
+ assert len(policy) == 0
+ assert dict(policy) == {}
+
+
+def test_policy_bigtable_admins():
+ from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
+
+ MEMBER = "user:phred@example.com"
+ expected = frozenset([MEMBER])
+ policy = _make_policy()
+ policy[BIGTABLE_ADMIN_ROLE] = [MEMBER]
+ assert policy.bigtable_admins == expected
+
+
+def test_policy_bigtable_readers():
+ from google.cloud.bigtable.policy import BIGTABLE_READER_ROLE
+
+ MEMBER = "user:phred@example.com"
+ expected = frozenset([MEMBER])
+ policy = _make_policy()
+ policy[BIGTABLE_READER_ROLE] = [MEMBER]
+ assert policy.bigtable_readers == expected
+
+
+def test_policy_bigtable_users():
+ from google.cloud.bigtable.policy import BIGTABLE_USER_ROLE
+
+ MEMBER = "user:phred@example.com"
+ expected = frozenset([MEMBER])
+ policy = _make_policy()
+ policy[BIGTABLE_USER_ROLE] = [MEMBER]
+ assert policy.bigtable_users == expected
+
+
+def test_policy_bigtable_viewers():
+ from google.cloud.bigtable.policy import BIGTABLE_VIEWER_ROLE
+
+ MEMBER = "user:phred@example.com"
+ expected = frozenset([MEMBER])
+ policy = _make_policy()
+ policy[BIGTABLE_VIEWER_ROLE] = [MEMBER]
+ assert policy.bigtable_viewers == expected
+
+
+def test_policy_from_pb_w_empty():
+ from google.iam.v1 import policy_pb2
+ from google.cloud.bigtable.policy import Policy
+
+ empty = frozenset()
+ message = policy_pb2.Policy()
+ policy = Policy.from_pb(message)
+ assert policy.etag == b""
+ assert policy.version == 0
+ assert policy.bigtable_admins == empty
+ assert policy.bigtable_readers == empty
+ assert policy.bigtable_users == empty
+ assert policy.bigtable_viewers == empty
+ assert len(policy) == 0
+ assert dict(policy) == {}
+
+
+def test_policy_from_pb_w_non_empty():
+ from google.iam.v1 import policy_pb2
+ from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
+ from google.cloud.bigtable.policy import Policy
+
+ ETAG = b"ETAG"
+ VERSION = 1
+ members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
+ empty = frozenset()
+ message = policy_pb2.Policy(
+ etag=ETAG,
+ version=VERSION,
+ bindings=[{"role": BIGTABLE_ADMIN_ROLE, "members": members}],
+ )
+ policy = Policy.from_pb(message)
+ assert policy.etag == ETAG
+ assert policy.version == VERSION
+ assert policy.bigtable_admins == set(members)
+ assert policy.bigtable_readers == empty
+ assert policy.bigtable_users == empty
+ assert policy.bigtable_viewers == empty
+ assert len(policy) == 1
+ assert dict(policy) == {BIGTABLE_ADMIN_ROLE: set(members)}
+
+
+def test_policy_from_pb_w_condition():
+ import pytest
+ from google.iam.v1 import policy_pb2
+ from google.api_core.iam import InvalidOperationException, _DICT_ACCESS_MSG
+ from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
+ from google.cloud.bigtable.policy import Policy
+
+ ETAG = b"ETAG"
+ VERSION = 3
+ members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
+ BINDINGS = [
+ {
+ "role": BIGTABLE_ADMIN_ROLE,
+ "members": members,
+ "condition": {
+ "title": "request_time",
+ "description": "Requests made before 2021-01-01T00:00:00Z",
+ "expression": 'request.time < timestamp("2021-01-01T00:00:00Z")',
+ },
}
- policy = self._make_one(ETAG, VERSION)
- policy.bindings = [
- {
- "role": BIGTABLE_ADMIN_ROLE,
- "members": set(members),
- "condition": condition,
- }
- ]
- expected = policy_pb2.Policy(
- etag=ETAG,
- version=VERSION,
- bindings=[
- policy_pb2.Binding(
- role=BIGTABLE_ADMIN_ROLE,
- members=sorted(members),
- condition=condition,
- )
- ],
- )
-
- self.assertEqual(policy.to_pb(), expected)
-
- def test_from_api_repr_wo_etag(self):
- VERSION = 1
- empty = frozenset()
- resource = {"version": VERSION}
- klass = self._get_target_class()
- policy = klass.from_api_repr(resource)
- self.assertIsNone(policy.etag)
- self.assertEqual(policy.version, VERSION)
- self.assertEqual(policy.bigtable_admins, empty)
- self.assertEqual(policy.bigtable_readers, empty)
- self.assertEqual(policy.bigtable_users, empty)
- self.assertEqual(policy.bigtable_viewers, empty)
- self.assertEqual(len(policy), 0)
- self.assertEqual(dict(policy), {})
-
- def test_from_api_repr_w_etag(self):
- import base64
-
- ETAG = b"ETAG"
- empty = frozenset()
- resource = {"etag": base64.b64encode(ETAG).decode("ascii")}
- klass = self._get_target_class()
- policy = klass.from_api_repr(resource)
- self.assertEqual(policy.etag, ETAG)
- self.assertIsNone(policy.version)
- self.assertEqual(policy.bigtable_admins, empty)
- self.assertEqual(policy.bigtable_readers, empty)
- self.assertEqual(policy.bigtable_users, empty)
- self.assertEqual(policy.bigtable_viewers, empty)
- self.assertEqual(len(policy), 0)
- self.assertEqual(dict(policy), {})
-
- def test_to_api_repr_wo_etag(self):
- VERSION = 1
- resource = {"version": VERSION}
- policy = self._make_one(version=VERSION)
- self.assertEqual(policy.to_api_repr(), resource)
-
- def test_to_api_repr_w_etag(self):
- import base64
-
- ETAG = b"ETAG"
- policy = self._make_one(etag=ETAG)
- resource = {"etag": base64.b64encode(ETAG).decode("ascii")}
- self.assertEqual(policy.to_api_repr(), resource)
+ ]
+ message = policy_pb2.Policy(etag=ETAG, version=VERSION, bindings=BINDINGS,)
+ policy = Policy.from_pb(message)
+ assert policy.etag == ETAG
+ assert policy.version == VERSION
+ assert policy.bindings[0]["role"] == BIGTABLE_ADMIN_ROLE
+ assert policy.bindings[0]["members"] == set(members)
+ assert policy.bindings[0]["condition"] == BINDINGS[0]["condition"]
+ with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG):
+ policy.bigtable_admins
+ with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG):
+ policy.bigtable_readers
+ with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG):
+ policy.bigtable_users
+ with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG):
+ policy.bigtable_viewers
+ with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG):
+ len(policy)
+
+
+def test_policy_to_pb_empty():
+ from google.iam.v1 import policy_pb2
+
+ policy = _make_policy()
+ expected = policy_pb2.Policy()
+
+ assert policy.to_pb() == expected
+
+
+def test_policy_to_pb_explicit():
+ from google.iam.v1 import policy_pb2
+ from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
+
+ VERSION = 1
+ ETAG = b"ETAG"
+ members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
+ policy = _make_policy(ETAG, VERSION)
+ policy[BIGTABLE_ADMIN_ROLE] = members
+ expected = policy_pb2.Policy(
+ etag=ETAG,
+ version=VERSION,
+ bindings=[
+ policy_pb2.Binding(role=BIGTABLE_ADMIN_ROLE, members=sorted(members))
+ ],
+ )
+
+ assert policy.to_pb() == expected
+
+
+def test_policy_to_pb_w_condition():
+ from google.iam.v1 import policy_pb2
+ from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
+
+ VERSION = 3
+ ETAG = b"ETAG"
+ members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
+ condition = {
+ "title": "request_time",
+ "description": "Requests made before 2021-01-01T00:00:00Z",
+ "expression": 'request.time < timestamp("2021-01-01T00:00:00Z")',
+ }
+ policy = _make_policy(ETAG, VERSION)
+ policy.bindings = [
+ {"role": BIGTABLE_ADMIN_ROLE, "members": set(members), "condition": condition}
+ ]
+ expected = policy_pb2.Policy(
+ etag=ETAG,
+ version=VERSION,
+ bindings=[
+ policy_pb2.Binding(
+ role=BIGTABLE_ADMIN_ROLE, members=sorted(members), condition=condition,
+ )
+ ],
+ )
+
+ assert policy.to_pb() == expected
+
+
+def test_policy_from_api_repr_wo_etag():
+ from google.cloud.bigtable.policy import Policy
+
+ VERSION = 1
+ empty = frozenset()
+ resource = {"version": VERSION}
+ policy = Policy.from_api_repr(resource)
+ assert policy.etag is None
+ assert policy.version == VERSION
+ assert policy.bigtable_admins == empty
+ assert policy.bigtable_readers == empty
+ assert policy.bigtable_users == empty
+ assert policy.bigtable_viewers == empty
+ assert len(policy) == 0
+ assert dict(policy) == {}
+
+
+def test_policy_from_api_repr_w_etag():
+ import base64
+ from google.cloud.bigtable.policy import Policy
+
+ ETAG = b"ETAG"
+ empty = frozenset()
+ resource = {"etag": base64.b64encode(ETAG).decode("ascii")}
+ policy = Policy.from_api_repr(resource)
+ assert policy.etag == ETAG
+ assert policy.version is None
+ assert policy.bigtable_admins == empty
+ assert policy.bigtable_readers == empty
+ assert policy.bigtable_users == empty
+ assert policy.bigtable_viewers == empty
+ assert len(policy) == 0
+ assert dict(policy) == {}
+
+
+def test_policy_to_api_repr_wo_etag():
+ VERSION = 1
+ resource = {"version": VERSION}
+ policy = _make_policy(version=VERSION)
+ assert policy.to_api_repr() == resource
+
+
+def test_policy_to_api_repr_w_etag():
+ import base64
+
+ ETAG = b"ETAG"
+ policy = _make_policy(etag=ETAG)
+ resource = {"etag": base64.b64encode(ETAG).decode("ascii")}
+ assert policy.to_api_repr() == resource
diff --git a/tests/unit/test_row.py b/tests/unit/test_row.py
index 1f33f214b..774756314 100644
--- a/tests/unit/test_row.py
+++ b/tests/unit/test_row.py
@@ -13,763 +13,726 @@
# limitations under the License.
-import unittest
-
import mock
+import pytest
from ._testing import _make_credentials
-class TestRow(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row import Row
+def _make_client(*args, **kwargs):
+ from google.cloud.bigtable.client import Client
- return Row
+ return Client(*args, **kwargs)
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
- def test_row_key_getter(self):
- row = self._make_one(row_key=b"row_key", table="table")
- self.assertEqual(b"row_key", row.row_key)
+def _make_row(*args, **kwargs):
+ from google.cloud.bigtable.row import Row
- def test_row_table_getter(self):
- row = self._make_one(row_key=b"row_key", table="table")
- self.assertEqual("table", row.table)
+ return Row(*args, **kwargs)
-class Test_SetDeleteRow(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row import _SetDeleteRow
+def test_row_key_getter():
+ row = _make_row(row_key=b"row_key", table="table")
+ assert b"row_key" == row.row_key
- return _SetDeleteRow
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
+def test_row_table_getter():
+ row = _make_row(row_key=b"row_key", table="table")
+ assert "table" == row.table
- def test__get_mutations_virtual(self):
- row = self._make_one(b"row-key", None)
- with self.assertRaises(NotImplementedError):
- row._get_mutations(None)
+def _make__set_delete_row(*args, **kwargs):
+ from google.cloud.bigtable.row import _SetDeleteRow
-class TestDirectRow(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row import DirectRow
+ return _SetDeleteRow(*args, **kwargs)
- return DirectRow
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
+def test__set_detlete_row__get_mutations_virtual():
+ row = _make__set_delete_row(b"row-key", None)
+ with pytest.raises(NotImplementedError):
+ row._get_mutations(None)
- @staticmethod
- def _get_target_client_class():
- from google.cloud.bigtable.client import Client
- return Client
+def _make_direct_row(*args, **kwargs):
+ from google.cloud.bigtable.row import DirectRow
- def _make_client(self, *args, **kwargs):
- return self._get_target_client_class()(*args, **kwargs)
+ return DirectRow(*args, **kwargs)
- def test_constructor(self):
- row_key = b"row_key"
- table = object()
- row = self._make_one(row_key, table)
- self.assertEqual(row._row_key, row_key)
- self.assertIs(row._table, table)
- self.assertEqual(row._pb_mutations, [])
+def test_direct_row_constructor():
+ row_key = b"row_key"
+ table = object()
- def test_constructor_with_unicode(self):
- row_key = u"row_key"
- row_key_bytes = b"row_key"
- table = object()
+ row = _make_direct_row(row_key, table)
+ assert row._row_key == row_key
+ assert row._table is table
+ assert row._pb_mutations == []
- row = self._make_one(row_key, table)
- self.assertEqual(row._row_key, row_key_bytes)
- self.assertIs(row._table, table)
- def test_constructor_with_non_bytes(self):
- row_key = object()
- with self.assertRaises(TypeError):
- self._make_one(row_key, None)
+def test_direct_row_constructor_with_unicode():
+ row_key = u"row_key"
+ row_key_bytes = b"row_key"
+ table = object()
- def test__get_mutations(self):
- row_key = b"row_key"
- row = self._make_one(row_key, None)
+ row = _make_direct_row(row_key, table)
+ assert row._row_key == row_key_bytes
+ assert row._table is table
- row._pb_mutations = mutations = object()
- self.assertIs(mutations, row._get_mutations(None))
- def test_get_mutations_size(self):
- row_key = b"row_key"
- row = self._make_one(row_key, None)
+def test_direct_row_constructor_with_non_bytes():
+ row_key = object()
+ with pytest.raises(TypeError):
+ _make_direct_row(row_key, None)
- column_family_id1 = u"column_family_id1"
- column_family_id2 = u"column_family_id2"
- column1 = b"column1"
- column2 = b"column2"
- number_of_bytes = 1 * 1024 * 1024
- value = b"1" * number_of_bytes
- row.set_cell(column_family_id1, column1, value)
- row.set_cell(column_family_id2, column2, value)
+def test_direct_row__get_mutations():
+ row_key = b"row_key"
+ row = _make_direct_row(row_key, None)
- total_mutations_size = 0
- for mutation in row._get_mutations():
- total_mutations_size += mutation._pb.ByteSize()
-
- self.assertEqual(row.get_mutations_size(), total_mutations_size)
-
- def _set_cell_helper(
- self,
- column=None,
- column_bytes=None,
- value=b"foobar",
- timestamp=None,
- timestamp_micros=-1,
- ):
- import struct
-
- row_key = b"row_key"
- column_family_id = u"column_family_id"
- if column is None:
- column = b"column"
- table = object()
- row = self._make_one(row_key, table)
- self.assertEqual(row._pb_mutations, [])
- row.set_cell(column_family_id, column, value, timestamp=timestamp)
-
- if isinstance(value, int):
- value = struct.pack(">q", value)
- expected_pb = _MutationPB(
- set_cell=_MutationSetCellPB(
- family_name=column_family_id,
- column_qualifier=column_bytes or column,
- timestamp_micros=timestamp_micros,
- value=value,
- )
- )
- self.assertEqual(row._pb_mutations, [expected_pb])
+ row._pb_mutations = mutations = object()
+ assert mutations is row._get_mutations(None)
- def test_set_cell(self):
- self._set_cell_helper()
- def test_set_cell_with_string_column(self):
- column_bytes = b"column"
- column_non_bytes = u"column"
- self._set_cell_helper(column=column_non_bytes, column_bytes=column_bytes)
+def test_direct_row_get_mutations_size():
+ row_key = b"row_key"
+ row = _make_direct_row(row_key, None)
- def test_set_cell_with_integer_value(self):
- value = 1337
- self._set_cell_helper(value=value)
+ column_family_id1 = u"column_family_id1"
+ column_family_id2 = u"column_family_id2"
+ column1 = b"column1"
+ column2 = b"column2"
+ number_of_bytes = 1 * 1024 * 1024
+ value = b"1" * number_of_bytes
- def test_set_cell_with_non_bytes_value(self):
- row_key = b"row_key"
- column = b"column"
- column_family_id = u"column_family_id"
- table = object()
-
- row = self._make_one(row_key, table)
- value = object() # Not bytes
- with self.assertRaises(TypeError):
- row.set_cell(column_family_id, column, value)
-
- def test_set_cell_with_non_null_timestamp(self):
- import datetime
- from google.cloud._helpers import _EPOCH
-
- microseconds = 898294371
- millis_granularity = microseconds - (microseconds % 1000)
- timestamp = _EPOCH + datetime.timedelta(microseconds=microseconds)
- self._set_cell_helper(timestamp=timestamp, timestamp_micros=millis_granularity)
-
- def test_delete(self):
- row_key = b"row_key"
- row = self._make_one(row_key, object())
- self.assertEqual(row._pb_mutations, [])
- row.delete()
-
- expected_pb = _MutationPB(delete_from_row=_MutationDeleteFromRowPB())
- self.assertEqual(row._pb_mutations, [expected_pb])
-
- def test_delete_cell(self):
- klass = self._get_target_class()
-
- class MockRow(klass):
- def __init__(self, *args, **kwargs):
- super(MockRow, self).__init__(*args, **kwargs)
- self._args = []
- self._kwargs = []
-
- # Replace the called method with one that logs arguments.
- def _delete_cells(self, *args, **kwargs):
- self._args.append(args)
- self._kwargs.append(kwargs)
-
- row_key = b"row_key"
- column = b"column"
- column_family_id = u"column_family_id"
- table = object()
-
- mock_row = MockRow(row_key, table)
- # Make sure no values are set before calling the method.
- self.assertEqual(mock_row._pb_mutations, [])
- self.assertEqual(mock_row._args, [])
- self.assertEqual(mock_row._kwargs, [])
-
- # Actually make the request against the mock class.
- time_range = object()
- mock_row.delete_cell(column_family_id, column, time_range=time_range)
- self.assertEqual(mock_row._pb_mutations, [])
- self.assertEqual(mock_row._args, [(column_family_id, [column])])
- self.assertEqual(mock_row._kwargs, [{"state": None, "time_range": time_range}])
-
- def test_delete_cells_non_iterable(self):
- row_key = b"row_key"
- column_family_id = u"column_family_id"
- table = object()
-
- row = self._make_one(row_key, table)
- columns = object() # Not iterable
- with self.assertRaises(TypeError):
- row.delete_cells(column_family_id, columns)
-
- def test_delete_cells_all_columns(self):
- row_key = b"row_key"
- column_family_id = u"column_family_id"
- table = object()
-
- row = self._make_one(row_key, table)
- klass = self._get_target_class()
- self.assertEqual(row._pb_mutations, [])
- row.delete_cells(column_family_id, klass.ALL_COLUMNS)
-
- expected_pb = _MutationPB(
- delete_from_family=_MutationDeleteFromFamilyPB(family_name=column_family_id)
- )
- self.assertEqual(row._pb_mutations, [expected_pb])
+ row.set_cell(column_family_id1, column1, value)
+ row.set_cell(column_family_id2, column2, value)
- def test_delete_cells_no_columns(self):
- row_key = b"row_key"
- column_family_id = u"column_family_id"
- table = object()
+ total_mutations_size = 0
+ for mutation in row._get_mutations():
+ total_mutations_size += mutation._pb.ByteSize()
+
+ assert row.get_mutations_size() == total_mutations_size
- row = self._make_one(row_key, table)
- columns = []
- self.assertEqual(row._pb_mutations, [])
- row.delete_cells(column_family_id, columns)
- self.assertEqual(row._pb_mutations, [])
- def _delete_cells_helper(self, time_range=None):
- row_key = b"row_key"
+def _set_cell_helper(
+ column=None,
+ column_bytes=None,
+ value=b"foobar",
+ timestamp=None,
+ timestamp_micros=-1,
+):
+ import struct
+
+ row_key = b"row_key"
+ column_family_id = u"column_family_id"
+ if column is None:
column = b"column"
- column_family_id = u"column_family_id"
- table = object()
-
- row = self._make_one(row_key, table)
- columns = [column]
- self.assertEqual(row._pb_mutations, [])
- row.delete_cells(column_family_id, columns, time_range=time_range)
-
- expected_pb = _MutationPB(
- delete_from_column=_MutationDeleteFromColumnPB(
- family_name=column_family_id, column_qualifier=column
- )
+ table = object()
+ row = _make_direct_row(row_key, table)
+ assert row._pb_mutations == []
+ row.set_cell(column_family_id, column, value, timestamp=timestamp)
+
+ if isinstance(value, int):
+ value = struct.pack(">q", value)
+ expected_pb = _MutationPB(
+ set_cell=_MutationSetCellPB(
+ family_name=column_family_id,
+ column_qualifier=column_bytes or column,
+ timestamp_micros=timestamp_micros,
+ value=value,
)
- if time_range is not None:
- expected_pb.delete_from_column.time_range._pb.CopyFrom(
- time_range.to_pb()._pb
- )
- self.assertEqual(row._pb_mutations, [expected_pb])
-
- def test_delete_cells_no_time_range(self):
- self._delete_cells_helper()
-
- def test_delete_cells_with_time_range(self):
- import datetime
- from google.cloud._helpers import _EPOCH
- from google.cloud.bigtable.row_filters import TimestampRange
-
- microseconds = 30871000 # Makes sure already milliseconds granularity
- start = _EPOCH + datetime.timedelta(microseconds=microseconds)
- time_range = TimestampRange(start=start)
- self._delete_cells_helper(time_range=time_range)
-
- def test_delete_cells_with_bad_column(self):
- # This makes sure a failure on one of the columns doesn't leave
- # the row's mutations in a bad state.
- row_key = b"row_key"
- column = b"column"
- column_family_id = u"column_family_id"
- table = object()
-
- row = self._make_one(row_key, table)
- columns = [column, object()]
- self.assertEqual(row._pb_mutations, [])
- with self.assertRaises(TypeError):
- row.delete_cells(column_family_id, columns)
- self.assertEqual(row._pb_mutations, [])
-
- def test_delete_cells_with_string_columns(self):
- row_key = b"row_key"
- column_family_id = u"column_family_id"
- column1 = u"column1"
- column1_bytes = b"column1"
- column2 = u"column2"
- column2_bytes = b"column2"
- table = object()
-
- row = self._make_one(row_key, table)
- columns = [column1, column2]
- self.assertEqual(row._pb_mutations, [])
- row.delete_cells(column_family_id, columns)
+ )
+ assert row._pb_mutations == [expected_pb]
- expected_pb1 = _MutationPB(
- delete_from_column=_MutationDeleteFromColumnPB(
- family_name=column_family_id, column_qualifier=column1_bytes
- )
- )
- expected_pb2 = _MutationPB(
- delete_from_column=_MutationDeleteFromColumnPB(
- family_name=column_family_id, column_qualifier=column2_bytes
- )
- )
- self.assertEqual(row._pb_mutations, [expected_pb1, expected_pb2])
- def test_commit(self):
- project_id = "project-id"
- row_key = b"row_key"
- table_name = "projects/more-stuff"
- column_family_id = u"column_family_id"
- column = b"column"
+def test_direct_row_set_cell():
+ _set_cell_helper()
- credentials = _make_credentials()
- client = self._make_client(
- project=project_id, credentials=credentials, admin=True
- )
- table = _Table(table_name, client=client)
- row = self._make_one(row_key, table)
- value = b"bytes-value"
- # Perform the method and check the result.
- row.set_cell(column_family_id, column, value)
- row.commit()
- self.assertEqual(table.mutated_rows, [row])
+def test_direct_row_set_cell_with_string_column():
+ column_bytes = b"column"
+ column_non_bytes = u"column"
+ _set_cell_helper(column=column_non_bytes, column_bytes=column_bytes)
- def test_commit_with_exception(self):
- from google.rpc import status_pb2
- project_id = "project-id"
- row_key = b"row_key"
- table_name = "projects/more-stuff"
- column_family_id = u"column_family_id"
- column = b"column"
+def test_direct_row_set_cell_with_integer_value():
+ value = 1337
+ _set_cell_helper(value=value)
- credentials = _make_credentials()
- client = self._make_client(
- project=project_id, credentials=credentials, admin=True
- )
- table = _Table(table_name, client=client)
- row = self._make_one(row_key, table)
- value = b"bytes-value"
- # Perform the method and check the result.
+def test_direct_row_set_cell_with_non_bytes_value():
+ row_key = b"row_key"
+ column = b"column"
+ column_family_id = u"column_family_id"
+ table = object()
+
+ row = _make_direct_row(row_key, table)
+ value = object() # Not bytes
+ with pytest.raises(TypeError):
row.set_cell(column_family_id, column, value)
- result = row.commit()
- expected = status_pb2.Status(code=0)
- self.assertEqual(result, expected)
-
-
-class TestConditionalRow(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row import ConditionalRow
-
- return ConditionalRow
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- @staticmethod
- def _get_target_client_class():
- from google.cloud.bigtable.client import Client
-
- return Client
-
- def _make_client(self, *args, **kwargs):
- return self._get_target_client_class()(*args, **kwargs)
-
- def test_constructor(self):
- row_key = b"row_key"
- table = object()
- filter_ = object()
-
- row = self._make_one(row_key, table, filter_=filter_)
- self.assertEqual(row._row_key, row_key)
- self.assertIs(row._table, table)
- self.assertIs(row._filter, filter_)
- self.assertEqual(row._true_pb_mutations, [])
- self.assertEqual(row._false_pb_mutations, [])
-
- def test__get_mutations(self):
- row_key = b"row_key"
- filter_ = object()
- row = self._make_one(row_key, None, filter_=filter_)
-
- row._true_pb_mutations = true_mutations = object()
- row._false_pb_mutations = false_mutations = object()
- self.assertIs(true_mutations, row._get_mutations(True))
- self.assertIs(false_mutations, row._get_mutations(False))
- self.assertIs(false_mutations, row._get_mutations(None))
-
- def test_commit(self):
- from google.cloud.bigtable.row_filters import RowSampleFilter
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
-
- project_id = "project-id"
- row_key = b"row_key"
- table_name = "projects/more-stuff"
- app_profile_id = "app_profile_id"
- column_family_id1 = u"column_family_id1"
- column_family_id2 = u"column_family_id2"
- column_family_id3 = u"column_family_id3"
- column1 = b"column1"
- column2 = b"column2"
-
- api = mock.create_autospec(BigtableClient)
- credentials = _make_credentials()
- client = self._make_client(
- project=project_id, credentials=credentials, admin=True
- )
- table = _Table(table_name, client=client, app_profile_id=app_profile_id)
- row_filter = RowSampleFilter(0.33)
- row = self._make_one(row_key, table, filter_=row_filter)
- # Create request_pb
- value1 = b"bytes-value"
- # Create response_pb
- predicate_matched = True
- response_pb = _CheckAndMutateRowResponsePB(predicate_matched=predicate_matched)
+def test_direct_row_set_cell_with_non_null_timestamp():
+ import datetime
+ from google.cloud._helpers import _EPOCH
- # Patch the stub used by the API method.
- api.check_and_mutate_row.side_effect = [response_pb]
- client._table_data_client = api
+ microseconds = 898294371
+ millis_granularity = microseconds - (microseconds % 1000)
+ timestamp = _EPOCH + datetime.timedelta(microseconds=microseconds)
+ _set_cell_helper(timestamp=timestamp, timestamp_micros=millis_granularity)
- # Create expected_result.
- expected_result = predicate_matched
- # Perform the method and check the result.
- row.set_cell(column_family_id1, column1, value1, state=True)
- row.delete(state=False)
- row.delete_cell(column_family_id2, column2, state=True)
- row.delete_cells(column_family_id3, row.ALL_COLUMNS, state=True)
- result = row.commit()
- call_args = api.check_and_mutate_row.call_args
- self.assertEqual(app_profile_id, call_args.app_profile_id[0])
- self.assertEqual(result, expected_result)
- self.assertEqual(row._true_pb_mutations, [])
- self.assertEqual(row._false_pb_mutations, [])
-
- def test_commit_too_many_mutations(self):
- from google.cloud._testing import _Monkey
- from google.cloud.bigtable import row as MUT
-
- row_key = b"row_key"
- table = object()
- filter_ = object()
- row = self._make_one(row_key, table, filter_=filter_)
- row._true_pb_mutations = [1, 2, 3]
- num_mutations = len(row._true_pb_mutations)
- with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1):
- with self.assertRaises(ValueError):
- row.commit()
-
- def test_commit_no_mutations(self):
- from tests.unit._testing import _FakeStub
-
- project_id = "project-id"
- row_key = b"row_key"
-
- credentials = _make_credentials()
- client = self._make_client(
- project=project_id, credentials=credentials, admin=True
- )
- table = _Table(None, client=client)
- filter_ = object()
- row = self._make_one(row_key, table, filter_=filter_)
- self.assertEqual(row._true_pb_mutations, [])
- self.assertEqual(row._false_pb_mutations, [])
+def test_direct_row_delete():
+ row_key = b"row_key"
+ row = _make_direct_row(row_key, object())
+ assert row._pb_mutations == []
+ row.delete()
- # Patch the stub used by the API method.
- stub = _FakeStub()
+ expected_pb = _MutationPB(delete_from_row=_MutationDeleteFromRowPB())
+ assert row._pb_mutations == [expected_pb]
- # Perform the method and check the result.
- result = row.commit()
- self.assertIsNone(result)
- # Make sure no request was sent.
- self.assertEqual(stub.method_calls, [])
+def test_direct_row_delete_cell():
+ from google.cloud.bigtable.row import DirectRow
-class TestAppendRow(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row import AppendRow
+ class MockRow(DirectRow):
+ def __init__(self, *args, **kwargs):
+ super(MockRow, self).__init__(*args, **kwargs)
+ self._args = []
+ self._kwargs = []
- return AppendRow
+ # Replace the called method with one that logs arguments.
+ def _delete_cells(self, *args, **kwargs):
+ self._args.append(args)
+ self._kwargs.append(kwargs)
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
+ row_key = b"row_key"
+ column = b"column"
+ column_family_id = u"column_family_id"
+ table = object()
- @staticmethod
- def _get_target_client_class():
- from google.cloud.bigtable.client import Client
+ mock_row = MockRow(row_key, table)
+ # Make sure no values are set before calling the method.
+ assert mock_row._pb_mutations == []
+ assert mock_row._args == []
+ assert mock_row._kwargs == []
- return Client
+ # Actually make the request against the mock class.
+ time_range = object()
+ mock_row.delete_cell(column_family_id, column, time_range=time_range)
+ assert mock_row._pb_mutations == []
+ assert mock_row._args == [(column_family_id, [column])]
+ assert mock_row._kwargs == [{"state": None, "time_range": time_range}]
- def _make_client(self, *args, **kwargs):
- return self._get_target_client_class()(*args, **kwargs)
- def test_constructor(self):
- row_key = b"row_key"
- table = object()
+def test_direct_row_delete_cells_non_iterable():
+ row_key = b"row_key"
+ column_family_id = u"column_family_id"
+ table = object()
- row = self._make_one(row_key, table)
- self.assertEqual(row._row_key, row_key)
- self.assertIs(row._table, table)
- self.assertEqual(row._rule_pb_list, [])
+ row = _make_direct_row(row_key, table)
+ columns = object() # Not iterable
+ with pytest.raises(TypeError):
+ row.delete_cells(column_family_id, columns)
- def test_clear(self):
- row_key = b"row_key"
- table = object()
- row = self._make_one(row_key, table)
- row._rule_pb_list = [1, 2, 3]
- row.clear()
- self.assertEqual(row._rule_pb_list, [])
- def test_append_cell_value(self):
- table = object()
- row_key = b"row_key"
- row = self._make_one(row_key, table)
- self.assertEqual(row._rule_pb_list, [])
+def test_direct_row_delete_cells_all_columns():
+ from google.cloud.bigtable.row import DirectRow
- column = b"column"
- column_family_id = u"column_family_id"
- value = b"bytes-val"
- row.append_cell_value(column_family_id, column, value)
- expected_pb = _ReadModifyWriteRulePB(
- family_name=column_family_id, column_qualifier=column, append_value=value
- )
- self.assertEqual(row._rule_pb_list, [expected_pb])
+ row_key = b"row_key"
+ column_family_id = u"column_family_id"
+ table = object()
- def test_increment_cell_value(self):
- table = object()
- row_key = b"row_key"
- row = self._make_one(row_key, table)
- self.assertEqual(row._rule_pb_list, [])
+ row = _make_direct_row(row_key, table)
+ assert row._pb_mutations == []
+ row.delete_cells(column_family_id, DirectRow.ALL_COLUMNS)
- column = b"column"
- column_family_id = u"column_family_id"
- int_value = 281330
- row.increment_cell_value(column_family_id, column, int_value)
- expected_pb = _ReadModifyWriteRulePB(
- family_name=column_family_id,
- column_qualifier=column,
- increment_amount=int_value,
- )
- self.assertEqual(row._rule_pb_list, [expected_pb])
-
- def test_commit(self):
- from google.cloud._testing import _Monkey
- from google.cloud.bigtable import row as MUT
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
-
- project_id = "project-id"
- row_key = b"row_key"
- table_name = "projects/more-stuff"
- app_profile_id = "app_profile_id"
- column_family_id = u"column_family_id"
- column = b"column"
+ expected_pb = _MutationPB(
+ delete_from_family=_MutationDeleteFromFamilyPB(family_name=column_family_id)
+ )
+ assert row._pb_mutations == [expected_pb]
- api = mock.create_autospec(BigtableClient)
- credentials = _make_credentials()
- client = self._make_client(
- project=project_id, credentials=credentials, admin=True
- )
- table = _Table(table_name, client=client, app_profile_id=app_profile_id)
- row = self._make_one(row_key, table)
-
- # Create request_pb
- value = b"bytes-value"
-
- # Create expected_result.
- row_responses = []
- expected_result = object()
-
- # Patch API calls
- client._table_data_client = api
-
- def mock_parse_rmw_row_response(row_response):
- row_responses.append(row_response)
- return expected_result
-
- # Perform the method and check the result.
- with _Monkey(MUT, _parse_rmw_row_response=mock_parse_rmw_row_response):
- row._table._instance._client._table_data_client = api
- row.append_cell_value(column_family_id, column, value)
- result = row.commit()
- call_args = api.read_modify_write_row.call_args_list[0]
- self.assertEqual(app_profile_id, call_args.app_profile_id[0])
- self.assertEqual(result, expected_result)
- self.assertEqual(row._rule_pb_list, [])
-
- def test_commit_no_rules(self):
- from tests.unit._testing import _FakeStub
-
- project_id = "project-id"
- row_key = b"row_key"
-
- credentials = _make_credentials()
- client = self._make_client(
- project=project_id, credentials=credentials, admin=True
- )
- table = _Table(None, client=client)
- row = self._make_one(row_key, table)
- self.assertEqual(row._rule_pb_list, [])
+def test_direct_row_delete_cells_no_columns():
+ row_key = b"row_key"
+ column_family_id = u"column_family_id"
+ table = object()
- # Patch the stub used by the API method.
- stub = _FakeStub()
+ row = _make_direct_row(row_key, table)
+ columns = []
+ assert row._pb_mutations == []
+ row.delete_cells(column_family_id, columns)
+ assert row._pb_mutations == []
- # Perform the method and check the result.
- result = row.commit()
- self.assertEqual(result, {})
- # Make sure no request was sent.
- self.assertEqual(stub.method_calls, [])
-
- def test_commit_too_many_mutations(self):
- from google.cloud._testing import _Monkey
- from google.cloud.bigtable import row as MUT
-
- row_key = b"row_key"
- table = object()
- row = self._make_one(row_key, table)
- row._rule_pb_list = [1, 2, 3]
- num_mutations = len(row._rule_pb_list)
- with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1):
- with self.assertRaises(ValueError):
- row.commit()
-
-
-class Test__parse_rmw_row_response(unittest.TestCase):
- def _call_fut(self, row_response):
- from google.cloud.bigtable.row import _parse_rmw_row_response
-
- return _parse_rmw_row_response(row_response)
-
- def test_it(self):
- from google.cloud._helpers import _datetime_from_microseconds
-
- col_fam1 = u"col-fam-id"
- col_fam2 = u"col-fam-id2"
- col_name1 = b"col-name1"
- col_name2 = b"col-name2"
- col_name3 = b"col-name3-but-other-fam"
- cell_val1 = b"cell-val"
- cell_val2 = b"cell-val-newer"
- cell_val3 = b"altcol-cell-val"
- cell_val4 = b"foo"
-
- microseconds = 1000871
- timestamp = _datetime_from_microseconds(microseconds)
- expected_output = {
- col_fam1: {
- col_name1: [(cell_val1, timestamp), (cell_val2, timestamp)],
- col_name2: [(cell_val3, timestamp)],
- },
- col_fam2: {col_name3: [(cell_val4, timestamp)]},
- }
- response_row = _RowPB(
- families=[
- _FamilyPB(
- name=col_fam1,
- columns=[
- _ColumnPB(
- qualifier=col_name1,
- cells=[
- _CellPB(value=cell_val1, timestamp_micros=microseconds),
- _CellPB(value=cell_val2, timestamp_micros=microseconds),
- ],
- ),
- _ColumnPB(
- qualifier=col_name2,
- cells=[
- _CellPB(value=cell_val3, timestamp_micros=microseconds)
- ],
- ),
- ],
- ),
- _FamilyPB(
- name=col_fam2,
- columns=[
- _ColumnPB(
- qualifier=col_name3,
- cells=[
- _CellPB(value=cell_val4, timestamp_micros=microseconds)
- ],
- )
- ],
- ),
- ]
+
+def _delete_cells_helper(time_range=None):
+ row_key = b"row_key"
+ column = b"column"
+ column_family_id = u"column_family_id"
+ table = object()
+
+ row = _make_direct_row(row_key, table)
+ columns = [column]
+ assert row._pb_mutations == []
+ row.delete_cells(column_family_id, columns, time_range=time_range)
+
+ expected_pb = _MutationPB(
+ delete_from_column=_MutationDeleteFromColumnPB(
+ family_name=column_family_id, column_qualifier=column
)
- sample_input = _ReadModifyWriteRowResponsePB(row=response_row)
- self.assertEqual(expected_output, self._call_fut(sample_input))
+ )
+ if time_range is not None:
+ expected_pb.delete_from_column.time_range._pb.CopyFrom(time_range.to_pb()._pb)
+ assert row._pb_mutations == [expected_pb]
+
+
+def test_direct_row_delete_cells_no_time_range():
+ _delete_cells_helper()
+
+
+def test_direct_row_delete_cells_with_time_range():
+ import datetime
+ from google.cloud._helpers import _EPOCH
+ from google.cloud.bigtable.row_filters import TimestampRange
+ microseconds = 30871000 # Makes sure already milliseconds granularity
+ start = _EPOCH + datetime.timedelta(microseconds=microseconds)
+ time_range = TimestampRange(start=start)
+ _delete_cells_helper(time_range=time_range)
-class Test__parse_family_pb(unittest.TestCase):
- def _call_fut(self, family_pb):
- from google.cloud.bigtable.row import _parse_family_pb
- return _parse_family_pb(family_pb)
+def test_direct_row_delete_cells_with_bad_column():
+ # This makes sure a failure on one of the columns doesn't leave
+ # the row's mutations in a bad state.
+ row_key = b"row_key"
+ column = b"column"
+ column_family_id = u"column_family_id"
+ table = object()
- def test_it(self):
- from google.cloud._helpers import _datetime_from_microseconds
+ row = _make_direct_row(row_key, table)
+ columns = [column, object()]
+ assert row._pb_mutations == []
+ with pytest.raises(TypeError):
+ row.delete_cells(column_family_id, columns)
+ assert row._pb_mutations == []
+
+
+def test_direct_row_delete_cells_with_string_columns():
+ row_key = b"row_key"
+ column_family_id = u"column_family_id"
+ column1 = u"column1"
+ column1_bytes = b"column1"
+ column2 = u"column2"
+ column2_bytes = b"column2"
+ table = object()
+
+ row = _make_direct_row(row_key, table)
+ columns = [column1, column2]
+ assert row._pb_mutations == []
+ row.delete_cells(column_family_id, columns)
+
+ expected_pb1 = _MutationPB(
+ delete_from_column=_MutationDeleteFromColumnPB(
+ family_name=column_family_id, column_qualifier=column1_bytes
+ )
+ )
+ expected_pb2 = _MutationPB(
+ delete_from_column=_MutationDeleteFromColumnPB(
+ family_name=column_family_id, column_qualifier=column2_bytes
+ )
+ )
+ assert row._pb_mutations == [expected_pb1, expected_pb2]
+
+
+def test_direct_row_commit():
+ project_id = "project-id"
+ row_key = b"row_key"
+ table_name = "projects/more-stuff"
+ column_family_id = u"column_family_id"
+ column = b"column"
- col_fam1 = u"col-fam-id"
- col_name1 = b"col-name1"
- col_name2 = b"col-name2"
- cell_val1 = b"cell-val"
- cell_val2 = b"cell-val-newer"
- cell_val3 = b"altcol-cell-val"
+ credentials = _make_credentials()
+ client = _make_client(project=project_id, credentials=credentials, admin=True)
+ table = _Table(table_name, client=client)
+ row = _make_direct_row(row_key, table)
+ value = b"bytes-value"
+
+ # Perform the method and check the result.
+ row.set_cell(column_family_id, column, value)
+ row.commit()
+ assert table.mutated_rows == [row]
+
+
+def test_direct_row_commit_with_exception():
+ from google.rpc import status_pb2
+
+ project_id = "project-id"
+ row_key = b"row_key"
+ table_name = "projects/more-stuff"
+ column_family_id = u"column_family_id"
+ column = b"column"
+
+ credentials = _make_credentials()
+ client = _make_client(project=project_id, credentials=credentials, admin=True)
+ table = _Table(table_name, client=client)
+ row = _make_direct_row(row_key, table)
+ value = b"bytes-value"
+
+ # Perform the method and check the result.
+ row.set_cell(column_family_id, column, value)
+ result = row.commit()
+ expected = status_pb2.Status(code=0)
+ assert result == expected
+
+
+def _make_conditional_row(*args, **kwargs):
+ from google.cloud.bigtable.row import ConditionalRow
+
+ return ConditionalRow(*args, **kwargs)
+
+
+def test_conditional_row_constructor():
+ row_key = b"row_key"
+ table = object()
+ filter_ = object()
+
+ row = _make_conditional_row(row_key, table, filter_=filter_)
+ assert row._row_key == row_key
+ assert row._table is table
+ assert row._filter is filter_
+ assert row._true_pb_mutations == []
+ assert row._false_pb_mutations == []
+
+
+def test_conditional_row__get_mutations():
+ row_key = b"row_key"
+ filter_ = object()
+ row = _make_conditional_row(row_key, None, filter_=filter_)
+
+ row._true_pb_mutations = true_mutations = object()
+ row._false_pb_mutations = false_mutations = object()
+ assert true_mutations is row._get_mutations(True)
+ assert false_mutations is row._get_mutations(False)
+ assert false_mutations is row._get_mutations(None)
+
+
+def test_conditional_row_commit():
+ from google.cloud.bigtable.row_filters import RowSampleFilter
+ from google.cloud.bigtable_v2.services.bigtable import BigtableClient
+
+ project_id = "project-id"
+ row_key = b"row_key"
+ table_name = "projects/more-stuff"
+ app_profile_id = "app_profile_id"
+ column_family_id1 = u"column_family_id1"
+ column_family_id2 = u"column_family_id2"
+ column_family_id3 = u"column_family_id3"
+ column1 = b"column1"
+ column2 = b"column2"
+
+ api = mock.create_autospec(BigtableClient)
+ credentials = _make_credentials()
+ client = _make_client(project=project_id, credentials=credentials, admin=True)
+ table = _Table(table_name, client=client, app_profile_id=app_profile_id)
+ row_filter = RowSampleFilter(0.33)
+ row = _make_conditional_row(row_key, table, filter_=row_filter)
+
+ # Create request_pb
+ value1 = b"bytes-value"
+
+ # Create response_pb
+ predicate_matched = True
+ response_pb = _CheckAndMutateRowResponsePB(predicate_matched=predicate_matched)
+
+ # Patch the stub used by the API method.
+ api.check_and_mutate_row.side_effect = [response_pb]
+ client._table_data_client = api
+
+ # Create expected_result.
+ expected_result = predicate_matched
+
+ # Perform the method and check the result.
+ row.set_cell(column_family_id1, column1, value1, state=True)
+ row.delete(state=False)
+ row.delete_cell(column_family_id2, column2, state=True)
+ row.delete_cells(column_family_id3, row.ALL_COLUMNS, state=True)
+ result = row.commit()
+ call_args = api.check_and_mutate_row.call_args
+ assert app_profile_id == call_args.app_profile_id[0]
+ assert result == expected_result
+ assert row._true_pb_mutations == []
+ assert row._false_pb_mutations == []
+
+
+def test_conditional_row_commit_too_many_mutations():
+ from google.cloud._testing import _Monkey
+ from google.cloud.bigtable import row as MUT
+
+ row_key = b"row_key"
+ table = object()
+ filter_ = object()
+ row = _make_conditional_row(row_key, table, filter_=filter_)
+ row._true_pb_mutations = [1, 2, 3]
+ num_mutations = len(row._true_pb_mutations)
+ with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1):
+ with pytest.raises(ValueError):
+ row.commit()
+
+
+def test_conditional_row_commit_no_mutations():
+ from tests.unit._testing import _FakeStub
+
+ project_id = "project-id"
+ row_key = b"row_key"
+
+ credentials = _make_credentials()
+ client = _make_client(project=project_id, credentials=credentials, admin=True)
+ table = _Table(None, client=client)
+ filter_ = object()
+ row = _make_conditional_row(row_key, table, filter_=filter_)
+ assert row._true_pb_mutations == []
+ assert row._false_pb_mutations == []
+
+ # Patch the stub used by the API method.
+ stub = _FakeStub()
+
+ # Perform the method and check the result.
+ result = row.commit()
+ assert result is None
+ # Make sure no request was sent.
+ assert stub.method_calls == []
+
+
+def _make_append_row(*args, **kwargs):
+ from google.cloud.bigtable.row import AppendRow
+
+ return AppendRow(*args, **kwargs)
+
+
+def test_append_row_constructor():
+ row_key = b"row_key"
+ table = object()
+
+ row = _make_append_row(row_key, table)
+ assert row._row_key == row_key
+ assert row._table is table
+ assert row._rule_pb_list == []
+
+
+def test_append_row_clear():
+ row_key = b"row_key"
+ table = object()
+ row = _make_append_row(row_key, table)
+ row._rule_pb_list = [1, 2, 3]
+ row.clear()
+ assert row._rule_pb_list == []
+
+
+def test_append_row_append_cell_value():
+ table = object()
+ row_key = b"row_key"
+ row = _make_append_row(row_key, table)
+ assert row._rule_pb_list == []
+
+ column = b"column"
+ column_family_id = u"column_family_id"
+ value = b"bytes-val"
+ row.append_cell_value(column_family_id, column, value)
+ expected_pb = _ReadModifyWriteRulePB(
+ family_name=column_family_id, column_qualifier=column, append_value=value
+ )
+ assert row._rule_pb_list == [expected_pb]
+
+
+def test_append_row_increment_cell_value():
+ table = object()
+ row_key = b"row_key"
+ row = _make_append_row(row_key, table)
+ assert row._rule_pb_list == []
+
+ column = b"column"
+ column_family_id = u"column_family_id"
+ int_value = 281330
+ row.increment_cell_value(column_family_id, column, int_value)
+ expected_pb = _ReadModifyWriteRulePB(
+ family_name=column_family_id,
+ column_qualifier=column,
+ increment_amount=int_value,
+ )
+ assert row._rule_pb_list == [expected_pb]
+
+
+def test_append_row_commit():
+ from google.cloud._testing import _Monkey
+ from google.cloud.bigtable import row as MUT
+ from google.cloud.bigtable_v2.services.bigtable import BigtableClient
+
+ project_id = "project-id"
+ row_key = b"row_key"
+ table_name = "projects/more-stuff"
+ app_profile_id = "app_profile_id"
+ column_family_id = u"column_family_id"
+ column = b"column"
+
+ api = mock.create_autospec(BigtableClient)
- microseconds = 5554441037
- timestamp = _datetime_from_microseconds(microseconds)
- expected_dict = {
+ credentials = _make_credentials()
+ client = _make_client(project=project_id, credentials=credentials, admin=True)
+ table = _Table(table_name, client=client, app_profile_id=app_profile_id)
+ row = _make_append_row(row_key, table)
+
+ # Create request_pb
+ value = b"bytes-value"
+
+ # Create expected_result.
+ row_responses = []
+ expected_result = object()
+
+ # Patch API calls
+ client._table_data_client = api
+
+ def mock_parse_rmw_row_response(row_response):
+ row_responses.append(row_response)
+ return expected_result
+
+ # Perform the method and check the result.
+ with _Monkey(MUT, _parse_rmw_row_response=mock_parse_rmw_row_response):
+ row._table._instance._client._table_data_client = api
+ row.append_cell_value(column_family_id, column, value)
+ result = row.commit()
+ call_args = api.read_modify_write_row.call_args_list[0]
+ assert app_profile_id == call_args.app_profile_id[0]
+ assert result == expected_result
+ assert row._rule_pb_list == []
+
+
+def test_append_row_commit_no_rules():
+ from tests.unit._testing import _FakeStub
+
+ project_id = "project-id"
+ row_key = b"row_key"
+
+ credentials = _make_credentials()
+ client = _make_client(project=project_id, credentials=credentials, admin=True)
+ table = _Table(None, client=client)
+ row = _make_append_row(row_key, table)
+ assert row._rule_pb_list == []
+
+ # Patch the stub used by the API method.
+ stub = _FakeStub()
+
+ # Perform the method and check the result.
+ result = row.commit()
+ assert result == {}
+ # Make sure no request was sent.
+ assert stub.method_calls == []
+
+
+def test_append_row_commit_too_many_mutations():
+ from google.cloud._testing import _Monkey
+ from google.cloud.bigtable import row as MUT
+
+ row_key = b"row_key"
+ table = object()
+ row = _make_append_row(row_key, table)
+ row._rule_pb_list = [1, 2, 3]
+ num_mutations = len(row._rule_pb_list)
+ with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1):
+ with pytest.raises(ValueError):
+ row.commit()
+
+
+def test__parse_rmw_row_response():
+ from google.cloud._helpers import _datetime_from_microseconds
+ from google.cloud.bigtable.row import _parse_rmw_row_response
+
+ col_fam1 = u"col-fam-id"
+ col_fam2 = u"col-fam-id2"
+ col_name1 = b"col-name1"
+ col_name2 = b"col-name2"
+ col_name3 = b"col-name3-but-other-fam"
+ cell_val1 = b"cell-val"
+ cell_val2 = b"cell-val-newer"
+ cell_val3 = b"altcol-cell-val"
+ cell_val4 = b"foo"
+
+ microseconds = 1000871
+ timestamp = _datetime_from_microseconds(microseconds)
+ expected_output = {
+ col_fam1: {
col_name1: [(cell_val1, timestamp), (cell_val2, timestamp)],
col_name2: [(cell_val3, timestamp)],
- }
- expected_output = (col_fam1, expected_dict)
- sample_input = _FamilyPB(
- name=col_fam1,
- columns=[
- _ColumnPB(
- qualifier=col_name1,
- cells=[
- _CellPB(value=cell_val1, timestamp_micros=microseconds),
- _CellPB(value=cell_val2, timestamp_micros=microseconds),
- ],
- ),
- _ColumnPB(
- qualifier=col_name2,
- cells=[_CellPB(value=cell_val3, timestamp_micros=microseconds)],
- ),
- ],
- )
- self.assertEqual(expected_output, self._call_fut(sample_input))
+ },
+ col_fam2: {col_name3: [(cell_val4, timestamp)]},
+ }
+ response_row = _RowPB(
+ families=[
+ _FamilyPB(
+ name=col_fam1,
+ columns=[
+ _ColumnPB(
+ qualifier=col_name1,
+ cells=[
+ _CellPB(value=cell_val1, timestamp_micros=microseconds),
+ _CellPB(value=cell_val2, timestamp_micros=microseconds),
+ ],
+ ),
+ _ColumnPB(
+ qualifier=col_name2,
+ cells=[_CellPB(value=cell_val3, timestamp_micros=microseconds)],
+ ),
+ ],
+ ),
+ _FamilyPB(
+ name=col_fam2,
+ columns=[
+ _ColumnPB(
+ qualifier=col_name3,
+ cells=[_CellPB(value=cell_val4, timestamp_micros=microseconds)],
+ )
+ ],
+ ),
+ ]
+ )
+ sample_input = _ReadModifyWriteRowResponsePB(row=response_row)
+ assert expected_output == _parse_rmw_row_response(sample_input)
+
+
+def test__parse_family_pb():
+ from google.cloud._helpers import _datetime_from_microseconds
+ from google.cloud.bigtable.row import _parse_family_pb
+
+ col_fam1 = u"col-fam-id"
+ col_name1 = b"col-name1"
+ col_name2 = b"col-name2"
+ cell_val1 = b"cell-val"
+ cell_val2 = b"cell-val-newer"
+ cell_val3 = b"altcol-cell-val"
+
+ microseconds = 5554441037
+ timestamp = _datetime_from_microseconds(microseconds)
+ expected_dict = {
+ col_name1: [(cell_val1, timestamp), (cell_val2, timestamp)],
+ col_name2: [(cell_val3, timestamp)],
+ }
+ expected_output = (col_fam1, expected_dict)
+ sample_input = _FamilyPB(
+ name=col_fam1,
+ columns=[
+ _ColumnPB(
+ qualifier=col_name1,
+ cells=[
+ _CellPB(value=cell_val1, timestamp_micros=microseconds),
+ _CellPB(value=cell_val2, timestamp_micros=microseconds),
+ ],
+ ),
+ _ColumnPB(
+ qualifier=col_name2,
+ cells=[_CellPB(value=cell_val3, timestamp_micros=microseconds)],
+ ),
+ ],
+ )
+ assert expected_output == _parse_family_pb(sample_input)
def _CheckAndMutateRowResponsePB(*args, **kw):
diff --git a/tests/unit/test_row_data.py b/tests/unit/test_row_data.py
index a95cf2ec4..06fd2f016 100644
--- a/tests/unit/test_row_data.py
+++ b/tests/unit/test_row_data.py
@@ -13,1219 +13,1298 @@
# limitations under the License.
-import unittest
+import os
+
import mock
+import pytest
-from google.api_core.exceptions import DeadlineExceeded
from ._testing import _make_credentials
-from google.cloud.bigtable.row_set import RowRange
-from google.cloud.bigtable_v2.types import data as data_v2_pb2
-
-
-class TestCell(unittest.TestCase):
- timestamp_micros = 18738724000 # Make sure millis granularity
-
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_data import Cell
-
- return Cell
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- def _from_pb_test_helper(self, labels=None):
- import datetime
- from google.cloud._helpers import _EPOCH
- from google.cloud.bigtable_v2.types import data as data_v2_pb2
-
- timestamp_micros = TestCell.timestamp_micros
- timestamp = _EPOCH + datetime.timedelta(microseconds=timestamp_micros)
- value = b"value-bytes"
-
- if labels is None:
- cell_pb = data_v2_pb2.Cell(value=value, timestamp_micros=timestamp_micros)
- cell_expected = self._make_one(value, timestamp_micros)
- else:
- cell_pb = data_v2_pb2.Cell(
- value=value, timestamp_micros=timestamp_micros, labels=labels
- )
- cell_expected = self._make_one(value, timestamp_micros, labels=labels)
-
- klass = self._get_target_class()
- result = klass.from_pb(cell_pb)
- self.assertEqual(result, cell_expected)
- self.assertEqual(result.timestamp, timestamp)
-
- def test_from_pb(self):
- self._from_pb_test_helper()
-
- def test_from_pb_with_labels(self):
- labels = [u"label1", u"label2"]
- self._from_pb_test_helper(labels)
-
- def test_constructor(self):
- value = object()
- cell = self._make_one(value, TestCell.timestamp_micros)
- self.assertEqual(cell.value, value)
-
- def test___eq__(self):
- value = object()
- cell1 = self._make_one(value, TestCell.timestamp_micros)
- cell2 = self._make_one(value, TestCell.timestamp_micros)
- self.assertEqual(cell1, cell2)
-
- def test___eq__type_differ(self):
- cell1 = self._make_one(None, None)
- cell2 = object()
- self.assertNotEqual(cell1, cell2)
-
- def test___ne__same_value(self):
- value = object()
- cell1 = self._make_one(value, TestCell.timestamp_micros)
- cell2 = self._make_one(value, TestCell.timestamp_micros)
- comparison_val = cell1 != cell2
- self.assertFalse(comparison_val)
-
- def test___ne__(self):
- value1 = "value1"
- value2 = "value2"
- cell1 = self._make_one(value1, TestCell.timestamp_micros)
- cell2 = self._make_one(value2, TestCell.timestamp_micros)
- self.assertNotEqual(cell1, cell2)
-
-
-class TestPartialRowData(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_data import PartialRowData
-
- return PartialRowData
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- def test_constructor(self):
- row_key = object()
- partial_row_data = self._make_one(row_key)
- self.assertIs(partial_row_data._row_key, row_key)
- self.assertEqual(partial_row_data._cells, {})
-
- def test___eq__(self):
- row_key = object()
- partial_row_data1 = self._make_one(row_key)
- partial_row_data2 = self._make_one(row_key)
- self.assertEqual(partial_row_data1, partial_row_data2)
-
- def test___eq__type_differ(self):
- partial_row_data1 = self._make_one(None)
- partial_row_data2 = object()
- self.assertNotEqual(partial_row_data1, partial_row_data2)
-
- def test___ne__same_value(self):
- row_key = object()
- partial_row_data1 = self._make_one(row_key)
- partial_row_data2 = self._make_one(row_key)
- comparison_val = partial_row_data1 != partial_row_data2
- self.assertFalse(comparison_val)
-
- def test___ne__(self):
- row_key1 = object()
- partial_row_data1 = self._make_one(row_key1)
- row_key2 = object()
- partial_row_data2 = self._make_one(row_key2)
- self.assertNotEqual(partial_row_data1, partial_row_data2)
-
- def test___ne__cells(self):
- row_key = object()
- partial_row_data1 = self._make_one(row_key)
- partial_row_data1._cells = object()
- partial_row_data2 = self._make_one(row_key)
- self.assertNotEqual(partial_row_data1, partial_row_data2)
-
- def test_to_dict(self):
- cell1 = object()
- cell2 = object()
- cell3 = object()
-
- family_name1 = u"name1"
- family_name2 = u"name2"
- qual1 = b"col1"
- qual2 = b"col2"
- qual3 = b"col3"
-
- partial_row_data = self._make_one(None)
- partial_row_data._cells = {
- family_name1: {qual1: cell1, qual2: cell2},
- family_name2: {qual3: cell3},
- }
-
- result = partial_row_data.to_dict()
- expected_result = {
- b"name1:col1": cell1,
- b"name1:col2": cell2,
- b"name2:col3": cell3,
- }
- self.assertEqual(result, expected_result)
-
- def test_cell_value(self):
- family_name = u"name1"
- qualifier = b"col1"
- cell = _make_cell(b"value-bytes")
-
- partial_row_data = self._make_one(None)
- partial_row_data._cells = {family_name: {qualifier: [cell]}}
-
- result = partial_row_data.cell_value(family_name, qualifier)
- self.assertEqual(result, cell.value)
-
- def test_cell_value_invalid_index(self):
- family_name = u"name1"
- qualifier = b"col1"
- cell = _make_cell(b"")
-
- partial_row_data = self._make_one(None)
- partial_row_data._cells = {family_name: {qualifier: [cell]}}
-
- with self.assertRaises(IndexError):
- partial_row_data.cell_value(family_name, qualifier, index=None)
-
- def test_cell_value_invalid_column_family_key(self):
- family_name = u"name1"
- qualifier = b"col1"
-
- partial_row_data = self._make_one(None)
-
- with self.assertRaises(KeyError):
- partial_row_data.cell_value(family_name, qualifier)
-
- def test_cell_value_invalid_column_key(self):
- family_name = u"name1"
- qualifier = b"col1"
-
- partial_row_data = self._make_one(None)
- partial_row_data._cells = {family_name: {}}
-
- with self.assertRaises(KeyError):
- partial_row_data.cell_value(family_name, qualifier)
-
- def test_cell_values(self):
- family_name = u"name1"
- qualifier = b"col1"
- cell = _make_cell(b"value-bytes")
-
- partial_row_data = self._make_one(None)
- partial_row_data._cells = {family_name: {qualifier: [cell]}}
-
- values = []
- for value, timestamp_micros in partial_row_data.cell_values(
- family_name, qualifier
- ):
- values.append(value)
-
- self.assertEqual(values[0], cell.value)
-
- def test_cell_values_with_max_count(self):
- family_name = u"name1"
- qualifier = b"col1"
- cell_1 = _make_cell(b"value-bytes-1")
- cell_2 = _make_cell(b"value-bytes-2")
-
- partial_row_data = self._make_one(None)
- partial_row_data._cells = {family_name: {qualifier: [cell_1, cell_2]}}
-
- values = []
- for value, timestamp_micros in partial_row_data.cell_values(
- family_name, qualifier, max_count=1
- ):
- values.append(value)
-
- self.assertEqual(1, len(values))
- self.assertEqual(values[0], cell_1.value)
- def test_cells_property(self):
- partial_row_data = self._make_one(None)
- cells = {1: 2}
- partial_row_data._cells = cells
- self.assertEqual(partial_row_data.cells, cells)
+TIMESTAMP_MICROS = 18738724000 # Make sure millis granularity
+ROW_KEY = b"row-key"
+FAMILY_NAME = u"family"
+QUALIFIER = b"qualifier"
+TIMESTAMP_MICROS = 100
+VALUE = b"value"
+TABLE_NAME = "table_name"
- def test_row_key_getter(self):
- row_key = object()
- partial_row_data = self._make_one(row_key)
- self.assertIs(partial_row_data.row_key, row_key)
+def _make_cell(*args, **kwargs):
+ from google.cloud.bigtable.row_data import Cell
-class _Client(object):
+ return Cell(*args, **kwargs)
- data_stub = None
+def _cell_from_pb_test_helper(labels=None):
+ import datetime
+ from google.cloud._helpers import _EPOCH
+ from google.cloud.bigtable_v2.types import data as data_v2_pb2
+ from google.cloud.bigtable.row_data import Cell
-class Test_retry_read_rows_exception(unittest.TestCase):
- @staticmethod
- def _call_fut(exc):
- from google.cloud.bigtable.row_data import _retry_read_rows_exception
+ timestamp = _EPOCH + datetime.timedelta(microseconds=TIMESTAMP_MICROS)
+ value = b"value-bytes"
- return _retry_read_rows_exception(exc)
+ if labels is None:
+ cell_pb = data_v2_pb2.Cell(value=value, timestamp_micros=TIMESTAMP_MICROS)
+ cell_expected = _make_cell(value, TIMESTAMP_MICROS)
+ else:
+ cell_pb = data_v2_pb2.Cell(
+ value=value, timestamp_micros=TIMESTAMP_MICROS, labels=labels
+ )
+ cell_expected = _make_cell(value, TIMESTAMP_MICROS, labels=labels)
- @staticmethod
- def _make_grpc_call_error(exception):
- from grpc import Call
- from grpc import RpcError
+ result = Cell.from_pb(cell_pb)
- class TestingException(Call, RpcError):
- def __init__(self, exception):
- self.exception = exception
+ assert result == cell_expected
+ assert result.timestamp == timestamp
- def code(self):
- return self.exception.grpc_status_code
- def details(self):
- return "Testing"
+def test_cell_from_pb():
+ _cell_from_pb_test_helper()
- return TestingException(exception)
- def test_w_miss(self):
- from google.api_core.exceptions import Conflict
+def test_cell_from_pb_with_labels():
+ labels = [u"label1", u"label2"]
+ _cell_from_pb_test_helper(labels)
- exception = Conflict("testing")
- self.assertFalse(self._call_fut(exception))
- def test_w_service_unavailable(self):
- from google.api_core.exceptions import ServiceUnavailable
+def test_cell_constructor():
+ value = object()
+ cell = _make_cell(value, TIMESTAMP_MICROS)
+ assert cell.value == value
- exception = ServiceUnavailable("testing")
- self.assertTrue(self._call_fut(exception))
- def test_w_deadline_exceeded(self):
- from google.api_core.exceptions import DeadlineExceeded
+def test_cell___eq__():
+ value = object()
+ cell1 = _make_cell(value, TIMESTAMP_MICROS)
+ cell2 = _make_cell(value, TIMESTAMP_MICROS)
+ assert cell1 == cell2
- exception = DeadlineExceeded("testing")
- self.assertTrue(self._call_fut(exception))
- def test_w_miss_wrapped_in_grpc(self):
- from google.api_core.exceptions import Conflict
+def test_cell___eq__type_differ():
+ cell1 = _make_cell(None, None)
+ cell2 = object()
+ assert not (cell1 == cell2)
- wrapped = Conflict("testing")
- exception = self._make_grpc_call_error(wrapped)
- self.assertFalse(self._call_fut(exception))
- def test_w_service_unavailable_wrapped_in_grpc(self):
- from google.api_core.exceptions import ServiceUnavailable
+def test_cell___ne__same_value():
+ value = object()
+ cell1 = _make_cell(value, TIMESTAMP_MICROS)
+ cell2 = _make_cell(value, TIMESTAMP_MICROS)
+ assert not (cell1 != cell2)
- wrapped = ServiceUnavailable("testing")
- exception = self._make_grpc_call_error(wrapped)
- self.assertTrue(self._call_fut(exception))
- def test_w_deadline_exceeded_wrapped_in_grpc(self):
- from google.api_core.exceptions import DeadlineExceeded
+def test_cell___ne__():
+ value1 = "value1"
+ value2 = "value2"
+ cell1 = _make_cell(value1, TIMESTAMP_MICROS)
+ cell2 = _make_cell(value2, TIMESTAMP_MICROS)
+ assert cell1 != cell2
+
+
+def _make_partial_row_data(*args, **kwargs):
+ from google.cloud.bigtable.row_data import PartialRowData
+
+ return PartialRowData(*args, **kwargs)
+
+
+def test_partial_row_data_constructor():
+ row_key = object()
+ partial_row_data = _make_partial_row_data(row_key)
+ assert partial_row_data._row_key is row_key
+ assert partial_row_data._cells == {}
+
+
+def test_partial_row_data___eq__():
+ row_key = object()
+ partial_row_data1 = _make_partial_row_data(row_key)
+ partial_row_data2 = _make_partial_row_data(row_key)
+ assert partial_row_data1 == partial_row_data2
+
+
+def test_partial_row_data___eq__type_differ():
+ partial_row_data1 = _make_partial_row_data(None)
+ partial_row_data2 = object()
+ assert not (partial_row_data1 == partial_row_data2)
+
+
+def test_partial_row_data___ne__same_value():
+ row_key = object()
+ partial_row_data1 = _make_partial_row_data(row_key)
+ partial_row_data2 = _make_partial_row_data(row_key)
+ assert not (partial_row_data1 != partial_row_data2)
+
+
+def test_partial_row_data___ne__():
+ row_key1 = object()
+ partial_row_data1 = _make_partial_row_data(row_key1)
+ row_key2 = object()
+ partial_row_data2 = _make_partial_row_data(row_key2)
+ assert partial_row_data1 != partial_row_data2
+
+
+def test_partial_row_data___ne__cells():
+ row_key = object()
+ partial_row_data1 = _make_partial_row_data(row_key)
+ partial_row_data1._cells = object()
+ partial_row_data2 = _make_partial_row_data(row_key)
+ assert partial_row_data1 != partial_row_data2
+
+
+def test_partial_row_data_to_dict():
+ cell1 = object()
+ cell2 = object()
+ cell3 = object()
+
+ family_name1 = u"name1"
+ family_name2 = u"name2"
+ qual1 = b"col1"
+ qual2 = b"col2"
+ qual3 = b"col3"
+
+ partial_row_data = _make_partial_row_data(None)
+ partial_row_data._cells = {
+ family_name1: {qual1: cell1, qual2: cell2},
+ family_name2: {qual3: cell3},
+ }
+
+ result = partial_row_data.to_dict()
+ expected_result = {
+ b"name1:col1": cell1,
+ b"name1:col2": cell2,
+ b"name2:col3": cell3,
+ }
+ assert result == expected_result
+
+
+def test_partial_row_data_cell_value():
+ family_name = u"name1"
+ qualifier = b"col1"
+ cell = _make_cell_pb(b"value-bytes")
+
+ partial_row_data = _make_partial_row_data(None)
+ partial_row_data._cells = {family_name: {qualifier: [cell]}}
+
+ result = partial_row_data.cell_value(family_name, qualifier)
+ assert result == cell.value
+
+
+def test_partial_row_data_cell_value_invalid_index():
+ family_name = u"name1"
+ qualifier = b"col1"
+ cell = _make_cell_pb(b"")
+
+ partial_row_data = _make_partial_row_data(None)
+ partial_row_data._cells = {family_name: {qualifier: [cell]}}
+
+ with pytest.raises(IndexError):
+ partial_row_data.cell_value(family_name, qualifier, index=None)
+
+
+def test_partial_row_data_cell_value_invalid_column_family_key():
+ family_name = u"name1"
+ qualifier = b"col1"
+
+ partial_row_data = _make_partial_row_data(None)
+
+ with pytest.raises(KeyError):
+ partial_row_data.cell_value(family_name, qualifier)
+
+
+def test_partial_row_data_cell_value_invalid_column_key():
+ family_name = u"name1"
+ qualifier = b"col1"
+
+ partial_row_data = _make_partial_row_data(None)
+ partial_row_data._cells = {family_name: {}}
+
+ with pytest.raises(KeyError):
+ partial_row_data.cell_value(family_name, qualifier)
+
+
+def test_partial_row_data_cell_values():
+ family_name = u"name1"
+ qualifier = b"col1"
+ cell = _make_cell_pb(b"value-bytes")
+
+ partial_row_data = _make_partial_row_data(None)
+ partial_row_data._cells = {family_name: {qualifier: [cell]}}
+
+ values = []
+ for value, timestamp_micros in partial_row_data.cell_values(family_name, qualifier):
+ values.append(value)
+
+ assert values[0] == cell.value
+
+
+def test_partial_row_data_cell_values_with_max_count():
+ family_name = u"name1"
+ qualifier = b"col1"
+ cell_1 = _make_cell_pb(b"value-bytes-1")
+ cell_2 = _make_cell_pb(b"value-bytes-2")
+
+ partial_row_data = _make_partial_row_data(None)
+ partial_row_data._cells = {family_name: {qualifier: [cell_1, cell_2]}}
+
+ values = []
+ for value, timestamp_micros in partial_row_data.cell_values(
+ family_name, qualifier, max_count=1
+ ):
+ values.append(value)
+
+ assert 1 == len(values)
+ assert values[0] == cell_1.value
+
+
+def test_partial_row_data_cells_property():
+ partial_row_data = _make_partial_row_data(None)
+ cells = {1: 2}
+ partial_row_data._cells = cells
+ assert partial_row_data.cells == cells
+
+
+def test_partial_row_data_row_key_getter():
+ row_key = object()
+ partial_row_data = _make_partial_row_data(row_key)
+ assert partial_row_data.row_key is row_key
+
+
+def _make_grpc_call_error(exception):
+ from grpc import Call
+ from grpc import RpcError
+
+ class TestingException(Call, RpcError):
+ def __init__(self, exception):
+ self.exception = exception
+
+ def code(self):
+ return self.exception.grpc_status_code
+
+ def details(self):
+ return "Testing"
+
+ def trailing_metadata(self):
+ return None
+
+ return TestingException(exception)
+
+
+def test__retry_read_rows_exception_miss():
+ from google.api_core.exceptions import Conflict
+ from google.cloud.bigtable.row_data import _retry_read_rows_exception
+
+ exception = Conflict("testing")
+ assert not _retry_read_rows_exception(exception)
+
+
+def test__retry_read_rows_exception_service_unavailable():
+ from google.api_core.exceptions import ServiceUnavailable
+ from google.cloud.bigtable.row_data import _retry_read_rows_exception
+
+ exception = ServiceUnavailable("testing")
+ assert _retry_read_rows_exception(exception)
+
+
+def test__retry_read_rows_exception_deadline_exceeded():
+ from google.api_core.exceptions import DeadlineExceeded
+ from google.cloud.bigtable.row_data import _retry_read_rows_exception
+
+ exception = DeadlineExceeded("testing")
+ assert _retry_read_rows_exception(exception)
+
+
+def test__retry_read_rows_exception_miss_wrapped_in_grpc():
+ from google.api_core.exceptions import Conflict
+ from google.cloud.bigtable.row_data import _retry_read_rows_exception
+
+ wrapped = Conflict("testing")
+ exception = _make_grpc_call_error(wrapped)
+ assert not _retry_read_rows_exception(exception)
+
+
+def test__retry_read_rows_exception_service_unavailable_wrapped_in_grpc():
+ from google.api_core.exceptions import ServiceUnavailable
+ from google.cloud.bigtable.row_data import _retry_read_rows_exception
+
+ wrapped = ServiceUnavailable("testing")
+ exception = _make_grpc_call_error(wrapped)
+ assert _retry_read_rows_exception(exception)
+
+
+def test__retry_read_rows_exception_deadline_exceeded_wrapped_in_grpc():
+ from google.api_core.exceptions import DeadlineExceeded
+ from google.cloud.bigtable.row_data import _retry_read_rows_exception
+
+ wrapped = DeadlineExceeded("testing")
+ exception = _make_grpc_call_error(wrapped)
+ assert _retry_read_rows_exception(exception)
+
+
+def _make_partial_rows_data(*args, **kwargs):
+ from google.cloud.bigtable.row_data import PartialRowsData
+
+ return PartialRowsData(*args, **kwargs)
+
+
+def _partial_rows_data_consume_all(yrd):
+ return [row.row_key for row in yrd]
+
+
+def _make_client(*args, **kwargs):
+ from google.cloud.bigtable.client import Client
+
+ return Client(*args, **kwargs)
+
+
+def test_partial_rows_data_constructor():
+ from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS
+
+ client = _Client()
+ client._data_stub = mock.MagicMock()
+ request = object()
+ partial_rows_data = _make_partial_rows_data(client._data_stub.ReadRows, request)
+ assert partial_rows_data.request is request
+ assert partial_rows_data.rows == {}
+ assert partial_rows_data.retry == DEFAULT_RETRY_READ_ROWS
+
+
+def test_partial_rows_data_constructor_with_retry():
+ from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS
+
+ client = _Client()
+ client._data_stub = mock.MagicMock()
+ request = object()
+ retry = DEFAULT_RETRY_READ_ROWS
+ partial_rows_data = _make_partial_rows_data(
+ client._data_stub.ReadRows, request, retry
+ )
+ partial_rows_data.read_method.assert_called_once_with(
+ request, timeout=DEFAULT_RETRY_READ_ROWS.deadline + 1
+ )
+ assert partial_rows_data.request is request
+ assert partial_rows_data.rows == {}
+ assert partial_rows_data.retry == retry
+
+
+def test_partial_rows_data___eq__():
+ client = _Client()
+ client._data_stub = mock.MagicMock()
+ request = object()
+ partial_rows_data1 = _make_partial_rows_data(client._data_stub.ReadRows, request)
+ partial_rows_data2 = _make_partial_rows_data(client._data_stub.ReadRows, request)
+ assert partial_rows_data1.rows == partial_rows_data2.rows
+
+
+def test_partial_rows_data___eq__type_differ():
+ client = _Client()
+ client._data_stub = mock.MagicMock()
+ request = object()
+ partial_rows_data1 = _make_partial_rows_data(client._data_stub.ReadRows, request)
+ partial_rows_data2 = object()
+ assert not (partial_rows_data1 == partial_rows_data2)
- wrapped = DeadlineExceeded("testing")
- exception = self._make_grpc_call_error(wrapped)
- self.assertTrue(self._call_fut(exception))
+def test_partial_rows_data___ne__same_value():
+ client = _Client()
+ client._data_stub = mock.MagicMock()
+ request = object()
+ partial_rows_data1 = _make_partial_rows_data(client._data_stub.ReadRows, request)
+ partial_rows_data2 = _make_partial_rows_data(client._data_stub.ReadRows, request)
+ assert partial_rows_data1 != partial_rows_data2
-class TestPartialRowsData(unittest.TestCase):
- ROW_KEY = b"row-key"
- FAMILY_NAME = u"family"
- QUALIFIER = b"qualifier"
+
+def test_partial_rows_data___ne__():
+ client = _Client()
+ client._data_stub = mock.MagicMock()
+ request = object()
+ partial_rows_data1 = _make_partial_rows_data(client._data_stub.ReadRows, request)
+ partial_rows_data2 = _make_partial_rows_data(client._data_stub.ReadRows, request)
+ assert partial_rows_data1 != partial_rows_data2
+
+
+def test_partial_rows_data_rows_getter():
+ client = _Client()
+ client._data_stub = mock.MagicMock()
+ request = object()
+ partial_rows_data = _make_partial_rows_data(client._data_stub.ReadRows, request)
+ partial_rows_data.rows = value = object()
+ assert partial_rows_data.rows is value
+
+
+def test_partial_rows_data_state_start():
+ client = _Client()
+ iterator = _MockCancellableIterator()
+ client._data_stub = mock.MagicMock()
+ client._data_stub.ReadRows.side_effect = [iterator]
+ request = object()
+ yrd = _make_partial_rows_data(client._data_stub.ReadRows, request)
+ assert yrd.state == yrd.NEW_ROW
+
+
+def test_partial_rows_data_state_new_row_w_row():
+ from google.cloud.bigtable_v2.services.bigtable import BigtableClient
+
+ chunk = _ReadRowsResponseCellChunkPB(
+ row_key=ROW_KEY,
+ family_name=FAMILY_NAME,
+ qualifier=QUALIFIER,
+ timestamp_micros=TIMESTAMP_MICROS,
+ value=VALUE,
+ commit_row=True,
+ )
+ chunks = [chunk]
+
+ response = _ReadRowsResponseV2(chunks)
+ iterator = _MockCancellableIterator(response)
+
+ data_api = mock.create_autospec(BigtableClient)
+
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ client._table_data_client = data_api
+ request = object()
+
+ yrd = _make_partial_rows_data(client._table_data_client.read_rows, request)
+ assert yrd.retry._deadline == 60.0
+
+ yrd.response_iterator = iterator
+ rows = [row for row in yrd]
+
+ result = rows[0]
+ assert result.row_key == ROW_KEY
+ assert yrd._counter == 1
+ assert yrd.state == yrd.NEW_ROW
+
+
+def test_partial_rows_data_multiple_chunks():
+ from google.cloud.bigtable_v2.services.bigtable import BigtableClient
+
+ chunk1 = _ReadRowsResponseCellChunkPB(
+ row_key=ROW_KEY,
+ family_name=FAMILY_NAME,
+ qualifier=QUALIFIER,
+ timestamp_micros=TIMESTAMP_MICROS,
+ value=VALUE,
+ commit_row=False,
+ )
+ chunk2 = _ReadRowsResponseCellChunkPB(
+ qualifier=QUALIFIER + b"1",
+ timestamp_micros=TIMESTAMP_MICROS,
+ value=VALUE,
+ commit_row=True,
+ )
+ chunks = [chunk1, chunk2]
+
+ response = _ReadRowsResponseV2(chunks)
+ iterator = _MockCancellableIterator(response)
+ data_api = mock.create_autospec(BigtableClient)
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ client._table_data_client = data_api
+ request = object()
+
+ yrd = _make_partial_rows_data(data_api.read_rows, request)
+
+ yrd.response_iterator = iterator
+ rows = [row for row in yrd]
+ result = rows[0]
+ assert result.row_key == ROW_KEY
+ assert yrd._counter == 1
+ assert yrd.state == yrd.NEW_ROW
+
+
+def test_partial_rows_data_cancel():
+ client = _Client()
+ response_iterator = _MockCancellableIterator()
+ client._data_stub = mock.MagicMock()
+ client._data_stub.ReadRows.side_effect = [response_iterator]
+ request = object()
+ yield_rows_data = _make_partial_rows_data(client._data_stub.ReadRows, request)
+ assert response_iterator.cancel_calls == 0
+ yield_rows_data.cancel()
+ assert response_iterator.cancel_calls == 1
+ assert list(yield_rows_data) == []
+
+
+def test_partial_rows_data_cancel_between_chunks():
+ from google.cloud.bigtable_v2.services.bigtable import BigtableClient
+
+ chunk1 = _ReadRowsResponseCellChunkPB(
+ row_key=ROW_KEY,
+ family_name=FAMILY_NAME,
+ qualifier=QUALIFIER,
+ timestamp_micros=TIMESTAMP_MICROS,
+ value=VALUE,
+ commit_row=True,
+ )
+ chunk2 = _ReadRowsResponseCellChunkPB(
+ qualifier=QUALIFIER + b"1",
+ timestamp_micros=TIMESTAMP_MICROS,
+ value=VALUE,
+ commit_row=True,
+ )
+ chunks = [chunk1, chunk2]
+ response = _ReadRowsResponseV2(chunks)
+ response_iterator = _MockCancellableIterator(response)
+
+ client = _Client()
+ data_api = mock.create_autospec(BigtableClient)
+ client._table_data_client = data_api
+ request = object()
+ yrd = _make_partial_rows_data(data_api.read_rows, request)
+ yrd.response_iterator = response_iterator
+
+ rows = []
+ for row in yrd:
+ yrd.cancel()
+ rows.append(row)
+
+ assert response_iterator.cancel_calls == 1
+ assert list(yrd) == []
+
+
+# 'consume_next' tested via 'TestPartialRowsData_JSON_acceptance_tests'
+
+
+def test_partial_rows_data__copy_from_previous_unset():
+ client = _Client()
+ client._data_stub = mock.MagicMock()
+ request = object()
+ yrd = _make_partial_rows_data(client._data_stub.read_rows, request)
+ cell = _PartialCellData()
+ yrd._copy_from_previous(cell)
+ assert cell.row_key == b""
+ assert cell.family_name == u""
+ assert cell.qualifier is None
+ assert cell.timestamp_micros == 0
+ assert cell.labels == []
+
+
+def test_partial_rows_data__copy_from_previous_blank():
+ ROW_KEY = "RK"
+ FAMILY_NAME = u"A"
+ QUALIFIER = b"C"
+ TIMESTAMP_MICROS = 100
+ LABELS = ["L1", "L2"]
+ client = _Client()
+ client._data_stub = mock.MagicMock()
+ request = object()
+ yrd = _make_partial_rows_data(client._data_stub.ReadRows, request)
+ cell = _PartialCellData(
+ row_key=ROW_KEY,
+ family_name=FAMILY_NAME,
+ qualifier=QUALIFIER,
+ timestamp_micros=TIMESTAMP_MICROS,
+ labels=LABELS,
+ )
+ yrd._previous_cell = _PartialCellData()
+ yrd._copy_from_previous(cell)
+ assert cell.row_key == ROW_KEY
+ assert cell.family_name == FAMILY_NAME
+ assert cell.qualifier == QUALIFIER
+ assert cell.timestamp_micros == TIMESTAMP_MICROS
+ assert cell.labels == LABELS
+
+
+def test_partial_rows_data__copy_from_previous_filled():
+ from google.cloud.bigtable_v2.services.bigtable import BigtableClient
+
+ ROW_KEY = "RK"
+ FAMILY_NAME = u"A"
+ QUALIFIER = b"C"
TIMESTAMP_MICROS = 100
- VALUE = b"value"
+ LABELS = ["L1", "L2"]
+ client = _Client()
+ data_api = mock.create_autospec(BigtableClient)
+ client._data_stub = data_api
+ request = object()
+ yrd = _make_partial_rows_data(client._data_stub.read_rows, request)
+ yrd._previous_cell = _PartialCellData(
+ row_key=ROW_KEY,
+ family_name=FAMILY_NAME,
+ qualifier=QUALIFIER,
+ timestamp_micros=TIMESTAMP_MICROS,
+ labels=LABELS,
+ )
+ cell = _PartialCellData()
+ yrd._copy_from_previous(cell)
+ assert cell.row_key == ROW_KEY
+ assert cell.family_name == FAMILY_NAME
+ assert cell.qualifier == QUALIFIER
+ assert cell.timestamp_micros == 0
+ assert cell.labels == []
+
+
+def test_partial_rows_data_valid_last_scanned_row_key_on_start():
+ client = _Client()
+ response = _ReadRowsResponseV2(chunks=(), last_scanned_row_key="2.AFTER")
+ iterator = _MockCancellableIterator(response)
+ client._data_stub = mock.MagicMock()
+ client._data_stub.read_rows.side_effect = [iterator]
+ request = object()
+ yrd = _make_partial_rows_data(client._data_stub.read_rows, request)
+ yrd.last_scanned_row_key = "1.BEFORE"
+ _partial_rows_data_consume_all(yrd)
+ assert yrd.last_scanned_row_key == "2.AFTER"
+
+
+def test_partial_rows_data_invalid_empty_chunk():
+ from google.cloud.bigtable.row_data import InvalidChunk
+ from google.cloud.bigtable_v2.services.bigtable import BigtableClient
+
+ client = _Client()
+ chunks = _generate_cell_chunks([""])
+ response = _ReadRowsResponseV2(chunks)
+ iterator = _MockCancellableIterator(response)
+ client._data_stub = mock.create_autospec(BigtableClient)
+ client._data_stub.read_rows.side_effect = [iterator]
+ request = object()
+ yrd = _make_partial_rows_data(client._data_stub.read_rows, request)
+ with pytest.raises(InvalidChunk):
+ _partial_rows_data_consume_all(yrd)
+
+
+def test_partial_rows_data_state_cell_in_progress():
+ from google.cloud.bigtable_v2.services.bigtable import BigtableClient
+
+ LABELS = ["L1", "L2"]
+
+ request = object()
+ client = _Client()
+ client._data_stub = mock.create_autospec(BigtableClient)
+ yrd = _make_partial_rows_data(client._data_stub.read_rows, request)
+
+ chunk = _ReadRowsResponseCellChunkPB(
+ row_key=ROW_KEY,
+ family_name=FAMILY_NAME,
+ qualifier=QUALIFIER,
+ timestamp_micros=TIMESTAMP_MICROS,
+ value=VALUE,
+ labels=LABELS,
+ )
+ yrd._update_cell(chunk)
+
+ more_cell_data = _ReadRowsResponseCellChunkPB(value=VALUE)
+ yrd._update_cell(more_cell_data)
+
+ assert yrd._cell.row_key == ROW_KEY
+ assert yrd._cell.family_name == FAMILY_NAME
+ assert yrd._cell.qualifier == QUALIFIER
+ assert yrd._cell.timestamp_micros == TIMESTAMP_MICROS
+ assert yrd._cell.labels == LABELS
+ assert yrd._cell.value == VALUE + VALUE
+
+
+def test_partial_rows_data_yield_rows_data():
+ from google.cloud.bigtable_v2.services.bigtable import BigtableClient
+
+ client = _Client()
+
+ chunk = _ReadRowsResponseCellChunkPB(
+ row_key=ROW_KEY,
+ family_name=FAMILY_NAME,
+ qualifier=QUALIFIER,
+ timestamp_micros=TIMESTAMP_MICROS,
+ value=VALUE,
+ commit_row=True,
+ )
+ chunks = [chunk]
+
+ response = _ReadRowsResponseV2(chunks)
+ iterator = _MockCancellableIterator(response)
+ data_api = mock.create_autospec(BigtableClient)
+ client._data_stub = data_api
+ client._data_stub.read_rows.side_effect = [iterator]
+
+ request = object()
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_data import PartialRowsData
+ yrd = _make_partial_rows_data(client._data_stub.read_rows, request)
- return PartialRowsData
+ result = _partial_rows_data_consume_all(yrd)[0]
- @staticmethod
- def _get_target_client_class():
- from google.cloud.bigtable.client import Client
+ assert result == ROW_KEY
- return Client
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
+def test_partial_rows_data_yield_retry_rows_data():
+ from google.api_core import retry
- def test_constructor(self):
- from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS
+ client = _Client()
- client = _Client()
- client._data_stub = mock.MagicMock()
- request = object()
- partial_rows_data = self._make_one(client._data_stub.ReadRows, request)
- self.assertIs(partial_rows_data.request, request)
- self.assertEqual(partial_rows_data.rows, {})
- self.assertEqual(partial_rows_data.retry, DEFAULT_RETRY_READ_ROWS)
+ retry_read_rows = retry.Retry(predicate=_read_rows_retry_exception)
- def test_constructor_with_retry(self):
- from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS
+ chunk = _ReadRowsResponseCellChunkPB(
+ row_key=ROW_KEY,
+ family_name=FAMILY_NAME,
+ qualifier=QUALIFIER,
+ timestamp_micros=TIMESTAMP_MICROS,
+ value=VALUE,
+ commit_row=True,
+ )
+ chunks = [chunk]
- client = _Client()
- client._data_stub = mock.MagicMock()
- request = object()
- retry = DEFAULT_RETRY_READ_ROWS
- partial_rows_data = self._make_one(client._data_stub.ReadRows, request, retry)
- partial_rows_data.read_method.assert_called_once_with(
- request, timeout=DEFAULT_RETRY_READ_ROWS.deadline + 1
- )
- self.assertIs(partial_rows_data.request, request)
- self.assertEqual(partial_rows_data.rows, {})
- self.assertEqual(partial_rows_data.retry, retry)
-
- def test___eq__(self):
- client = _Client()
- client._data_stub = mock.MagicMock()
- request = object()
- partial_rows_data1 = self._make_one(client._data_stub.ReadRows, request)
- partial_rows_data2 = self._make_one(client._data_stub.ReadRows, request)
- self.assertEqual(partial_rows_data1.rows, partial_rows_data2.rows)
-
- def test___eq__type_differ(self):
- client = _Client()
- client._data_stub = mock.MagicMock()
- request = object()
- partial_rows_data1 = self._make_one(client._data_stub.ReadRows, request)
- partial_rows_data2 = object()
- self.assertNotEqual(partial_rows_data1, partial_rows_data2)
-
- def test___ne__same_value(self):
- client = _Client()
- client._data_stub = mock.MagicMock()
- request = object()
- partial_rows_data1 = self._make_one(client._data_stub.ReadRows, request)
- partial_rows_data2 = self._make_one(client._data_stub.ReadRows, request)
- comparison_val = partial_rows_data1 != partial_rows_data2
- self.assertTrue(comparison_val)
-
- def test___ne__(self):
- client = _Client()
- client._data_stub = mock.MagicMock()
- request = object()
- partial_rows_data1 = self._make_one(client._data_stub.ReadRows, request)
- partial_rows_data2 = self._make_one(client._data_stub.ReadRows, request)
- self.assertNotEqual(partial_rows_data1, partial_rows_data2)
-
- def test_rows_getter(self):
- client = _Client()
- client._data_stub = mock.MagicMock()
- request = object()
- partial_rows_data = self._make_one(client._data_stub.ReadRows, request)
- partial_rows_data.rows = value = object()
- self.assertIs(partial_rows_data.rows, value)
-
- def _make_client(self, *args, **kwargs):
- return self._get_target_client_class()(*args, **kwargs)
-
- def test_state_start(self):
- client = _Client()
- iterator = _MockCancellableIterator()
- client._data_stub = mock.MagicMock()
- client._data_stub.ReadRows.side_effect = [iterator]
- request = object()
- yrd = self._make_one(client._data_stub.ReadRows, request)
- self.assertEqual(yrd.state, yrd.NEW_ROW)
-
- def test_state_new_row_w_row(self):
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
-
- chunk = _ReadRowsResponseCellChunkPB(
- row_key=self.ROW_KEY,
- family_name=self.FAMILY_NAME,
- qualifier=self.QUALIFIER,
- timestamp_micros=self.TIMESTAMP_MICROS,
- value=self.VALUE,
- commit_row=True,
- )
- chunks = [chunk]
+ response = _ReadRowsResponseV2(chunks)
+ failure_iterator = _MockFailureIterator_1()
+ iterator = _MockCancellableIterator(response)
+ client._data_stub = mock.MagicMock()
+ client._data_stub.ReadRows.side_effect = [failure_iterator, iterator]
- response = _ReadRowsResponseV2(chunks)
- iterator = _MockCancellableIterator(response)
+ request = object()
- data_api = mock.create_autospec(BigtableClient)
+ yrd = _make_partial_rows_data(client._data_stub.ReadRows, request, retry_read_rows)
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- client._table_data_client = data_api
- request = object()
-
- yrd = self._make_one(client._table_data_client.read_rows, request)
- self.assertEqual(yrd.retry._deadline, 60.0)
-
- yrd.response_iterator = iterator
- rows = [row for row in yrd]
-
- result = rows[0]
- self.assertEqual(result.row_key, self.ROW_KEY)
- self.assertEqual(yrd._counter, 1)
- self.assertEqual(yrd.state, yrd.NEW_ROW)
-
- def test_multiple_chunks(self):
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
-
- chunk1 = _ReadRowsResponseCellChunkPB(
- row_key=self.ROW_KEY,
- family_name=self.FAMILY_NAME,
- qualifier=self.QUALIFIER,
- timestamp_micros=self.TIMESTAMP_MICROS,
- value=self.VALUE,
- commit_row=False,
- )
- chunk2 = _ReadRowsResponseCellChunkPB(
- qualifier=self.QUALIFIER + b"1",
- timestamp_micros=self.TIMESTAMP_MICROS,
- value=self.VALUE,
- commit_row=True,
- )
- chunks = [chunk1, chunk2]
-
- response = _ReadRowsResponseV2(chunks)
- iterator = _MockCancellableIterator(response)
- data_api = mock.create_autospec(BigtableClient)
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- client._table_data_client = data_api
- request = object()
-
- yrd = self._make_one(data_api.read_rows, request)
-
- yrd.response_iterator = iterator
- rows = [row for row in yrd]
- result = rows[0]
- self.assertEqual(result.row_key, self.ROW_KEY)
- self.assertEqual(yrd._counter, 1)
- self.assertEqual(yrd.state, yrd.NEW_ROW)
-
- def test_cancel(self):
- client = _Client()
- response_iterator = _MockCancellableIterator()
- client._data_stub = mock.MagicMock()
- client._data_stub.ReadRows.side_effect = [response_iterator]
- request = object()
- yield_rows_data = self._make_one(client._data_stub.ReadRows, request)
- self.assertEqual(response_iterator.cancel_calls, 0)
- yield_rows_data.cancel()
- self.assertEqual(response_iterator.cancel_calls, 1)
- self.assertEqual(list(yield_rows_data), [])
-
- def test_cancel_between_chunks(self):
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
-
- chunk1 = _ReadRowsResponseCellChunkPB(
- row_key=self.ROW_KEY,
- family_name=self.FAMILY_NAME,
- qualifier=self.QUALIFIER,
- timestamp_micros=self.TIMESTAMP_MICROS,
- value=self.VALUE,
- commit_row=True,
- )
- chunk2 = _ReadRowsResponseCellChunkPB(
- qualifier=self.QUALIFIER + b"1",
- timestamp_micros=self.TIMESTAMP_MICROS,
- value=self.VALUE,
- commit_row=True,
- )
- chunks = [chunk1, chunk2]
- response = _ReadRowsResponseV2(chunks)
- response_iterator = _MockCancellableIterator(response)
-
- client = _Client()
- data_api = mock.create_autospec(BigtableClient)
- client._table_data_client = data_api
- request = object()
- yrd = self._make_one(data_api.read_rows, request)
- yrd.response_iterator = response_iterator
-
- rows = []
- for row in yrd:
- yrd.cancel()
- rows.append(row)
-
- self.assertEqual(response_iterator.cancel_calls, 1)
- self.assertEqual(list(yrd), [])
-
- # 'consume_next' tested via 'TestPartialRowsData_JSON_acceptance_tests'
-
- def test__copy_from_previous_unset(self):
- client = _Client()
- client._data_stub = mock.MagicMock()
- request = object()
- yrd = self._make_one(client._data_stub.read_rows, request)
- cell = _PartialCellData()
- yrd._copy_from_previous(cell)
- self.assertEqual(cell.row_key, b"")
- self.assertEqual(cell.family_name, u"")
- self.assertIsNone(cell.qualifier)
- self.assertEqual(cell.timestamp_micros, 0)
- self.assertEqual(cell.labels, [])
-
- def test__copy_from_previous_blank(self):
- ROW_KEY = "RK"
- FAMILY_NAME = u"A"
- QUALIFIER = b"C"
- TIMESTAMP_MICROS = 100
- LABELS = ["L1", "L2"]
- client = _Client()
- client._data_stub = mock.MagicMock()
- request = object()
- yrd = self._make_one(client._data_stub.ReadRows, request)
- cell = _PartialCellData(
- row_key=ROW_KEY,
- family_name=FAMILY_NAME,
- qualifier=QUALIFIER,
- timestamp_micros=TIMESTAMP_MICROS,
- labels=LABELS,
- )
- yrd._previous_cell = _PartialCellData()
- yrd._copy_from_previous(cell)
- self.assertEqual(cell.row_key, ROW_KEY)
- self.assertEqual(cell.family_name, FAMILY_NAME)
- self.assertEqual(cell.qualifier, QUALIFIER)
- self.assertEqual(cell.timestamp_micros, TIMESTAMP_MICROS)
- self.assertEqual(cell.labels, LABELS)
-
- def test__copy_from_previous_filled(self):
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
-
- ROW_KEY = "RK"
- FAMILY_NAME = u"A"
- QUALIFIER = b"C"
- TIMESTAMP_MICROS = 100
- LABELS = ["L1", "L2"]
- client = _Client()
- data_api = mock.create_autospec(BigtableClient)
- client._data_stub = data_api
- request = object()
- yrd = self._make_one(client._data_stub.read_rows, request)
- yrd._previous_cell = _PartialCellData(
- row_key=ROW_KEY,
- family_name=FAMILY_NAME,
- qualifier=QUALIFIER,
- timestamp_micros=TIMESTAMP_MICROS,
- labels=LABELS,
- )
- cell = _PartialCellData()
- yrd._copy_from_previous(cell)
- self.assertEqual(cell.row_key, ROW_KEY)
- self.assertEqual(cell.family_name, FAMILY_NAME)
- self.assertEqual(cell.qualifier, QUALIFIER)
- self.assertEqual(cell.timestamp_micros, 0)
- self.assertEqual(cell.labels, [])
-
- def test_valid_last_scanned_row_key_on_start(self):
- client = _Client()
- response = _ReadRowsResponseV2(chunks=(), last_scanned_row_key="2.AFTER")
- iterator = _MockCancellableIterator(response)
- client._data_stub = mock.MagicMock()
- client._data_stub.read_rows.side_effect = [iterator]
- request = object()
- yrd = self._make_one(client._data_stub.read_rows, request)
- yrd.last_scanned_row_key = "1.BEFORE"
- self._consume_all(yrd)
- self.assertEqual(yrd.last_scanned_row_key, "2.AFTER")
-
- def test_invalid_empty_chunk(self):
- from google.cloud.bigtable.row_data import InvalidChunk
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
-
- client = _Client()
- chunks = _generate_cell_chunks([""])
- response = _ReadRowsResponseV2(chunks)
- iterator = _MockCancellableIterator(response)
- client._data_stub = mock.create_autospec(BigtableClient)
- client._data_stub.read_rows.side_effect = [iterator]
- request = object()
- yrd = self._make_one(client._data_stub.read_rows, request)
- with self.assertRaises(InvalidChunk):
- self._consume_all(yrd)
-
- def test_state_cell_in_progress(self):
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
-
- LABELS = ["L1", "L2"]
-
- request = object()
- client = _Client()
- client._data_stub = mock.create_autospec(BigtableClient)
- yrd = self._make_one(client._data_stub.read_rows, request)
-
- chunk = _ReadRowsResponseCellChunkPB(
- row_key=self.ROW_KEY,
- family_name=self.FAMILY_NAME,
- qualifier=self.QUALIFIER,
- timestamp_micros=self.TIMESTAMP_MICROS,
- value=self.VALUE,
- labels=LABELS,
- )
- yrd._update_cell(chunk)
-
- more_cell_data = _ReadRowsResponseCellChunkPB(value=self.VALUE)
- yrd._update_cell(more_cell_data)
-
- self.assertEqual(yrd._cell.row_key, self.ROW_KEY)
- self.assertEqual(yrd._cell.family_name, self.FAMILY_NAME)
- self.assertEqual(yrd._cell.qualifier, self.QUALIFIER)
- self.assertEqual(yrd._cell.timestamp_micros, self.TIMESTAMP_MICROS)
- self.assertEqual(yrd._cell.labels, LABELS)
- self.assertEqual(yrd._cell.value, self.VALUE + self.VALUE)
-
- def test_yield_rows_data(self):
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
-
- client = _Client()
-
- chunk = _ReadRowsResponseCellChunkPB(
- row_key=self.ROW_KEY,
- family_name=self.FAMILY_NAME,
- qualifier=self.QUALIFIER,
- timestamp_micros=self.TIMESTAMP_MICROS,
- value=self.VALUE,
- commit_row=True,
- )
- chunks = [chunk]
+ result = _partial_rows_data_consume_all(yrd)[0]
- response = _ReadRowsResponseV2(chunks)
- iterator = _MockCancellableIterator(response)
- data_api = mock.create_autospec(BigtableClient)
- client._data_stub = data_api
- client._data_stub.read_rows.side_effect = [iterator]
+ assert result == ROW_KEY
- request = object()
- yrd = self._make_one(client._data_stub.read_rows, request)
+def _make_read_rows_request_manager(*args, **kwargs):
+ from google.cloud.bigtable.row_data import _ReadRowsRequestManager
- result = self._consume_all(yrd)[0]
+ return _ReadRowsRequestManager(*args, **kwargs)
- self.assertEqual(result, self.ROW_KEY)
- def test_yield_retry_rows_data(self):
- from google.api_core import retry
+@pytest.fixture(scope="session")
+def rrrm_data():
+ from google.cloud.bigtable import row_set
- client = _Client()
+ row_range1 = row_set.RowRange(b"row_key21", b"row_key29")
+ row_range2 = row_set.RowRange(b"row_key31", b"row_key39")
+ row_range3 = row_set.RowRange(b"row_key41", b"row_key49")
- retry_read_rows = retry.Retry(predicate=_read_rows_retry_exception)
+ request = _ReadRowsRequestPB(table_name=TABLE_NAME)
+ request.rows.row_ranges.append(row_range1.get_range_kwargs())
+ request.rows.row_ranges.append(row_range2.get_range_kwargs())
+ request.rows.row_ranges.append(row_range3.get_range_kwargs())
- chunk = _ReadRowsResponseCellChunkPB(
- row_key=self.ROW_KEY,
- family_name=self.FAMILY_NAME,
- qualifier=self.QUALIFIER,
- timestamp_micros=self.TIMESTAMP_MICROS,
- value=self.VALUE,
- commit_row=True,
- )
- chunks = [chunk]
+ yield {
+ "row_range1": row_range1,
+ "row_range2": row_range2,
+ "row_range3": row_range3,
+ "request": request,
+ }
- response = _ReadRowsResponseV2(chunks)
- failure_iterator = _MockFailureIterator_1()
- iterator = _MockCancellableIterator(response)
- client._data_stub = mock.MagicMock()
- client._data_stub.ReadRows.side_effect = [failure_iterator, iterator]
- request = object()
+def test_RRRM_constructor():
+ request = mock.Mock()
+ last_scanned_key = "last_key"
+ rows_read_so_far = 10
- yrd = self._make_one(client._data_stub.ReadRows, request, retry_read_rows)
+ request_manager = _make_read_rows_request_manager(
+ request, last_scanned_key, rows_read_so_far
+ )
+ assert request == request_manager.message
+ assert last_scanned_key == request_manager.last_scanned_key
+ assert rows_read_so_far == request_manager.rows_read_so_far
- result = self._consume_all(yrd)[0]
- self.assertEqual(result, self.ROW_KEY)
+def test_RRRM__filter_row_key():
+ table_name = "table_name"
+ request = _ReadRowsRequestPB(table_name=table_name)
+ request.rows.row_keys.extend([b"row_key1", b"row_key2", b"row_key3", b"row_key4"])
- def _consume_all(self, yrd):
- return [row.row_key for row in yrd]
+ last_scanned_key = b"row_key2"
+ request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2)
+ row_keys = request_manager._filter_rows_keys()
+ expected_row_keys = [b"row_key3", b"row_key4"]
+ assert expected_row_keys == row_keys
-class Test_ReadRowsRequestManager(unittest.TestCase):
- @classmethod
- def setUpClass(cls):
- cls.table_name = "table_name"
- cls.row_range1 = RowRange(b"row_key21", b"row_key29")
- cls.row_range2 = RowRange(b"row_key31", b"row_key39")
- cls.row_range3 = RowRange(b"row_key41", b"row_key49")
- cls.request = _ReadRowsRequestPB(table_name=cls.table_name)
- cls.request.rows.row_ranges.append(cls.row_range1.get_range_kwargs())
- cls.request.rows.row_ranges.append(cls.row_range2.get_range_kwargs())
- cls.request.rows.row_ranges.append(cls.row_range3.get_range_kwargs())
+def test_RRRM__filter_row_ranges_all_ranges_added_back(rrrm_data):
+ from google.cloud.bigtable_v2.types import data as data_v2_pb2
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_data import _ReadRowsRequestManager
+ request = rrrm_data["request"]
+ last_scanned_key = b"row_key14"
+ request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2)
+ row_ranges = request_manager._filter_row_ranges()
- return _ReadRowsRequestManager
+ exp_row_range1 = data_v2_pb2.RowRange(
+ start_key_closed=b"row_key21", end_key_open=b"row_key29"
+ )
+ exp_row_range2 = data_v2_pb2.RowRange(
+ start_key_closed=b"row_key31", end_key_open=b"row_key39"
+ )
+ exp_row_range3 = data_v2_pb2.RowRange(
+ start_key_closed=b"row_key41", end_key_open=b"row_key49"
+ )
+ exp_row_ranges = [exp_row_range1, exp_row_range2, exp_row_range3]
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
+ assert exp_row_ranges == row_ranges
- def test_constructor(self):
- request = mock.Mock()
- last_scanned_key = "last_key"
- rows_read_so_far = 10
- request_manager = self._make_one(request, last_scanned_key, rows_read_so_far)
- self.assertEqual(request, request_manager.message)
- self.assertEqual(last_scanned_key, request_manager.last_scanned_key)
- self.assertEqual(rows_read_so_far, request_manager.rows_read_so_far)
+def test_RRRM__filter_row_ranges_all_ranges_already_read(rrrm_data):
+ request = rrrm_data["request"]
+ last_scanned_key = b"row_key54"
+ request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2)
+ row_ranges = request_manager._filter_row_ranges()
- def test__filter_row_key(self):
- table_name = "table_name"
- request = _ReadRowsRequestPB(table_name=table_name)
- request.rows.row_keys.extend(
- [b"row_key1", b"row_key2", b"row_key3", b"row_key4"]
- )
+ assert row_ranges == []
- last_scanned_key = b"row_key2"
- request_manager = self._make_one(request, last_scanned_key, 2)
- row_keys = request_manager._filter_rows_keys()
- expected_row_keys = [b"row_key3", b"row_key4"]
- self.assertEqual(expected_row_keys, row_keys)
+def test_RRRM__filter_row_ranges_all_ranges_already_read_open_closed():
+ from google.cloud.bigtable import row_set
- def test__filter_row_ranges_all_ranges_added_back(self):
- last_scanned_key = b"row_key14"
- request_manager = self._make_one(self.request, last_scanned_key, 2)
- row_ranges = request_manager._filter_row_ranges()
+ last_scanned_key = b"row_key54"
- exp_row_range1 = data_v2_pb2.RowRange(
- start_key_closed=b"row_key21", end_key_open=b"row_key29"
- )
- exp_row_range2 = data_v2_pb2.RowRange(
- start_key_closed=b"row_key31", end_key_open=b"row_key39"
- )
- exp_row_range3 = data_v2_pb2.RowRange(
- start_key_closed=b"row_key41", end_key_open=b"row_key49"
- )
- exp_row_ranges = [exp_row_range1, exp_row_range2, exp_row_range3]
+ row_range1 = row_set.RowRange(b"row_key21", b"row_key29", False, True)
+ row_range2 = row_set.RowRange(b"row_key31", b"row_key39")
+ row_range3 = row_set.RowRange(b"row_key41", b"row_key49", False, True)
- self.assertEqual(exp_row_ranges, row_ranges)
+ request = _ReadRowsRequestPB(table_name=TABLE_NAME)
+ request.rows.row_ranges.append(row_range1.get_range_kwargs())
+ request.rows.row_ranges.append(row_range2.get_range_kwargs())
+ request.rows.row_ranges.append(row_range3.get_range_kwargs())
- def test__filter_row_ranges_all_ranges_already_read(self):
- last_scanned_key = b"row_key54"
- request_manager = self._make_one(self.request, last_scanned_key, 2)
- row_ranges = request_manager._filter_row_ranges()
+ request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2)
+ request_manager.new_message = _ReadRowsRequestPB(table_name=TABLE_NAME)
+ row_ranges = request_manager._filter_row_ranges()
- self.assertEqual(row_ranges, [])
+ assert row_ranges == []
- def test__filter_row_ranges_all_ranges_already_read_open_closed(self):
- last_scanned_key = b"row_key54"
- row_range1 = RowRange(b"row_key21", b"row_key29", False, True)
- row_range2 = RowRange(b"row_key31", b"row_key39")
- row_range3 = RowRange(b"row_key41", b"row_key49", False, True)
+def test_RRRM__filter_row_ranges_some_ranges_already_read(rrrm_data):
+ from google.cloud.bigtable_v2.types import data as data_v2_pb2
- request = _ReadRowsRequestPB(table_name=self.table_name)
- request.rows.row_ranges.append(row_range1.get_range_kwargs())
- request.rows.row_ranges.append(row_range2.get_range_kwargs())
- request.rows.row_ranges.append(row_range3.get_range_kwargs())
+ request = rrrm_data["request"]
+ last_scanned_key = b"row_key22"
+ request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2)
+ request_manager.new_message = _ReadRowsRequestPB(table_name=TABLE_NAME)
+ row_ranges = request_manager._filter_row_ranges()
- request_manager = self._make_one(request, last_scanned_key, 2)
- request_manager.new_message = _ReadRowsRequestPB(table_name=self.table_name)
- row_ranges = request_manager._filter_row_ranges()
+ exp_row_range1 = data_v2_pb2.RowRange(
+ start_key_open=b"row_key22", end_key_open=b"row_key29"
+ )
+ exp_row_range2 = data_v2_pb2.RowRange(
+ start_key_closed=b"row_key31", end_key_open=b"row_key39"
+ )
+ exp_row_range3 = data_v2_pb2.RowRange(
+ start_key_closed=b"row_key41", end_key_open=b"row_key49"
+ )
+ exp_row_ranges = [exp_row_range1, exp_row_range2, exp_row_range3]
- self.assertEqual(row_ranges, [])
+ assert exp_row_ranges == row_ranges
- def test__filter_row_ranges_some_ranges_already_read(self):
- last_scanned_key = b"row_key22"
- request_manager = self._make_one(self.request, last_scanned_key, 2)
- request_manager.new_message = _ReadRowsRequestPB(table_name=self.table_name)
- row_ranges = request_manager._filter_row_ranges()
- exp_row_range1 = data_v2_pb2.RowRange(
- start_key_open=b"row_key22", end_key_open=b"row_key29"
- )
- exp_row_range2 = data_v2_pb2.RowRange(
- start_key_closed=b"row_key31", end_key_open=b"row_key39"
- )
- exp_row_range3 = data_v2_pb2.RowRange(
- start_key_closed=b"row_key41", end_key_open=b"row_key49"
- )
- exp_row_ranges = [exp_row_range1, exp_row_range2, exp_row_range3]
+def test_RRRM_build_updated_request(rrrm_data):
+ from google.cloud.bigtable.row_filters import RowSampleFilter
+ from google.cloud.bigtable_v2 import types
- self.assertEqual(exp_row_ranges, row_ranges)
+ row_range1 = rrrm_data["row_range1"]
+ row_filter = RowSampleFilter(0.33)
+ last_scanned_key = b"row_key25"
+ request = _ReadRowsRequestPB(
+ filter=row_filter.to_pb(), rows_limit=8, table_name=TABLE_NAME
+ )
+ request.rows.row_ranges.append(row_range1.get_range_kwargs())
- def test_build_updated_request(self):
- from google.cloud.bigtable.row_filters import RowSampleFilter
- from google.cloud.bigtable_v2.types import RowRange
+ request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2)
- row_filter = RowSampleFilter(0.33)
- last_scanned_key = b"row_key25"
- request = _ReadRowsRequestPB(
- filter=row_filter.to_pb(), rows_limit=8, table_name=self.table_name
- )
- request.rows.row_ranges.append(self.row_range1.get_range_kwargs())
+ result = request_manager.build_updated_request()
- request_manager = self._make_one(request, last_scanned_key, 2)
+ expected_result = _ReadRowsRequestPB(
+ table_name=TABLE_NAME, filter=row_filter.to_pb(), rows_limit=6
+ )
- result = request_manager.build_updated_request()
+ row_range1 = types.RowRange(
+ start_key_open=last_scanned_key, end_key_open=row_range1.end_key
+ )
+ expected_result.rows.row_ranges.append(row_range1)
- expected_result = _ReadRowsRequestPB(
- table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=6
- )
+ assert expected_result == result
- row_range1 = RowRange(
- start_key_open=last_scanned_key, end_key_open=self.row_range1.end_key
- )
- expected_result.rows.row_ranges.append(row_range1)
- self.assertEqual(expected_result, result)
+def test_RRRM_build_updated_request_full_table():
+ from google.cloud.bigtable_v2 import types
- def test_build_updated_request_full_table(self):
- from google.cloud.bigtable_v2.types import RowRange
+ last_scanned_key = b"row_key14"
- last_scanned_key = b"row_key14"
+ request = _ReadRowsRequestPB(table_name=TABLE_NAME)
+ request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2)
- request = _ReadRowsRequestPB(table_name=self.table_name)
- request_manager = self._make_one(request, last_scanned_key, 2)
+ result = request_manager.build_updated_request()
+ expected_result = _ReadRowsRequestPB(table_name=TABLE_NAME, filter={})
+ row_range1 = types.RowRange(start_key_open=last_scanned_key)
+ expected_result.rows.row_ranges.append(row_range1)
+ assert expected_result == result
- result = request_manager.build_updated_request()
- expected_result = _ReadRowsRequestPB(table_name=self.table_name, filter={})
- row_range1 = RowRange(start_key_open=last_scanned_key)
- expected_result.rows.row_ranges.append(row_range1)
- self.assertEqual(expected_result, result)
- def test_build_updated_request_no_start_key(self):
- from google.cloud.bigtable.row_filters import RowSampleFilter
- from google.cloud.bigtable_v2.types import RowRange
+def test_RRRM_build_updated_request_no_start_key():
+ from google.cloud.bigtable.row_filters import RowSampleFilter
+ from google.cloud.bigtable_v2 import types
- row_filter = RowSampleFilter(0.33)
- last_scanned_key = b"row_key25"
- request = _ReadRowsRequestPB(
- filter=row_filter.to_pb(), rows_limit=8, table_name=self.table_name
- )
- row_range1 = RowRange(end_key_open=b"row_key29")
- request.rows.row_ranges.append(row_range1)
+ row_filter = RowSampleFilter(0.33)
+ last_scanned_key = b"row_key25"
+ request = _ReadRowsRequestPB(
+ filter=row_filter.to_pb(), rows_limit=8, table_name=TABLE_NAME
+ )
+ row_range1 = types.RowRange(end_key_open=b"row_key29")
+ request.rows.row_ranges.append(row_range1)
- request_manager = self._make_one(request, last_scanned_key, 2)
+ request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2)
- result = request_manager.build_updated_request()
+ result = request_manager.build_updated_request()
- expected_result = _ReadRowsRequestPB(
- table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=6
- )
+ expected_result = _ReadRowsRequestPB(
+ table_name=TABLE_NAME, filter=row_filter.to_pb(), rows_limit=6
+ )
- row_range2 = RowRange(
- start_key_open=last_scanned_key, end_key_open=b"row_key29"
- )
- expected_result.rows.row_ranges.append(row_range2)
+ row_range2 = types.RowRange(
+ start_key_open=last_scanned_key, end_key_open=b"row_key29"
+ )
+ expected_result.rows.row_ranges.append(row_range2)
- self.assertEqual(expected_result, result)
+ assert expected_result == result
- def test_build_updated_request_no_end_key(self):
- from google.cloud.bigtable.row_filters import RowSampleFilter
- from google.cloud.bigtable_v2.types import RowRange
- row_filter = RowSampleFilter(0.33)
- last_scanned_key = b"row_key25"
- request = _ReadRowsRequestPB(
- filter=row_filter.to_pb(), rows_limit=8, table_name=self.table_name
- )
+def test_RRRM_build_updated_request_no_end_key():
+ from google.cloud.bigtable.row_filters import RowSampleFilter
+ from google.cloud.bigtable_v2 import types
- row_range1 = RowRange(start_key_closed=b"row_key20")
- request.rows.row_ranges.append(row_range1)
+ row_filter = RowSampleFilter(0.33)
+ last_scanned_key = b"row_key25"
+ request = _ReadRowsRequestPB(
+ filter=row_filter.to_pb(), rows_limit=8, table_name=TABLE_NAME
+ )
- request_manager = self._make_one(request, last_scanned_key, 2)
+ row_range1 = types.RowRange(start_key_closed=b"row_key20")
+ request.rows.row_ranges.append(row_range1)
- result = request_manager.build_updated_request()
+ request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2)
- expected_result = _ReadRowsRequestPB(
- table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=6
- )
- row_range2 = RowRange(start_key_open=last_scanned_key)
- expected_result.rows.row_ranges.append(row_range2)
+ result = request_manager.build_updated_request()
- self.assertEqual(expected_result, result)
+ expected_result = _ReadRowsRequestPB(
+ table_name=TABLE_NAME, filter=row_filter.to_pb(), rows_limit=6
+ )
+ row_range2 = types.RowRange(start_key_open=last_scanned_key)
+ expected_result.rows.row_ranges.append(row_range2)
- def test_build_updated_request_rows(self):
- from google.cloud.bigtable.row_filters import RowSampleFilter
+ assert expected_result == result
- row_filter = RowSampleFilter(0.33)
- last_scanned_key = b"row_key4"
- request = _ReadRowsRequestPB(
- filter=row_filter.to_pb(), rows_limit=5, table_name=self.table_name
- )
- request.rows.row_keys.extend(
- [
- b"row_key1",
- b"row_key2",
- b"row_key4",
- b"row_key5",
- b"row_key7",
- b"row_key9",
- ]
- )
- request_manager = self._make_one(request, last_scanned_key, 3)
+def test_RRRM_build_updated_request_rows():
+ from google.cloud.bigtable.row_filters import RowSampleFilter
- result = request_manager.build_updated_request()
+ row_filter = RowSampleFilter(0.33)
+ last_scanned_key = b"row_key4"
+ request = _ReadRowsRequestPB(
+ filter=row_filter.to_pb(), rows_limit=5, table_name=TABLE_NAME
+ )
+ request.rows.row_keys.extend(
+ [b"row_key1", b"row_key2", b"row_key4", b"row_key5", b"row_key7", b"row_key9"]
+ )
- expected_result = _ReadRowsRequestPB(
- table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=2
- )
- expected_result.rows.row_keys.extend([b"row_key5", b"row_key7", b"row_key9"])
+ request_manager = _make_read_rows_request_manager(request, last_scanned_key, 3)
- self.assertEqual(expected_result, result)
+ result = request_manager.build_updated_request()
- def test_build_updated_request_rows_limit(self):
- from google.cloud.bigtable_v2.types import RowRange
+ expected_result = _ReadRowsRequestPB(
+ table_name=TABLE_NAME, filter=row_filter.to_pb(), rows_limit=2
+ )
+ expected_result.rows.row_keys.extend([b"row_key5", b"row_key7", b"row_key9"])
- last_scanned_key = b"row_key14"
+ assert expected_result == result
- request = _ReadRowsRequestPB(table_name=self.table_name, rows_limit=10)
- request_manager = self._make_one(request, last_scanned_key, 2)
- result = request_manager.build_updated_request()
- expected_result = _ReadRowsRequestPB(
- table_name=self.table_name, filter={}, rows_limit=8
- )
- row_range1 = RowRange(start_key_open=last_scanned_key)
- expected_result.rows.row_ranges.append(row_range1)
- self.assertEqual(expected_result, result)
+def test_RRRM_build_updated_request_rows_limit():
+ from google.cloud.bigtable_v2 import types
- def test__key_already_read(self):
- last_scanned_key = b"row_key14"
- request = _ReadRowsRequestPB(table_name=self.table_name)
- request_manager = self._make_one(request, last_scanned_key, 2)
+ last_scanned_key = b"row_key14"
- self.assertTrue(request_manager._key_already_read(b"row_key11"))
- self.assertFalse(request_manager._key_already_read(b"row_key16"))
+ request = _ReadRowsRequestPB(table_name=TABLE_NAME, rows_limit=10)
+ request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2)
+ result = request_manager.build_updated_request()
+ expected_result = _ReadRowsRequestPB(table_name=TABLE_NAME, filter={}, rows_limit=8)
+ row_range1 = types.RowRange(start_key_open=last_scanned_key)
+ expected_result.rows.row_ranges.append(row_range1)
+ assert expected_result == result
-class TestPartialRowsData_JSON_acceptance_tests(unittest.TestCase):
- _json_tests = None
+def test_RRRM__key_already_read():
+ last_scanned_key = b"row_key14"
+ request = _ReadRowsRequestPB(table_name=TABLE_NAME)
+ request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2)
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_data import PartialRowsData
+ assert request_manager._key_already_read(b"row_key11")
+ assert not request_manager._key_already_read(b"row_key16")
- return PartialRowsData
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
+@pytest.fixture(scope="session")
+def json_tests():
+ dirname = os.path.dirname(__file__)
+ filename = os.path.join(dirname, "read-rows-acceptance-test.json")
+ raw = _parse_readrows_acceptance_tests(filename)
+ tests = {}
+ for (name, chunks, results) in raw:
+ tests[name] = chunks, results
- def _load_json_test(self, test_name):
- import os
+ yield tests
- if self.__class__._json_tests is None:
- dirname = os.path.dirname(__file__)
- filename = os.path.join(dirname, "read-rows-acceptance-test.json")
- raw = _parse_readrows_acceptance_tests(filename)
- tests = self.__class__._json_tests = {}
- for (name, chunks, results) in raw:
- tests[name] = chunks, results
- return self.__class__._json_tests[test_name]
- # JSON Error cases: invalid chunks
+# JSON Error cases: invalid chunks
- def _fail_during_consume(self, testcase_name):
- from google.cloud.bigtable.row_data import InvalidChunk
- client = _Client()
- chunks, results = self._load_json_test(testcase_name)
- response = _ReadRowsResponseV2(chunks)
- iterator = _MockCancellableIterator(response)
- client._data_stub = mock.MagicMock()
- client._data_stub.ReadRows.side_effect = [iterator]
- request = object()
- prd = self._make_one(client._data_stub.ReadRows, request)
- with self.assertRaises(InvalidChunk):
- prd.consume_all()
- expected_result = self._sort_flattend_cells(
- [result for result in results if not result["error"]]
- )
- flattened = self._sort_flattend_cells(_flatten_cells(prd))
- self.assertEqual(flattened, expected_result)
+def _fail_during_consume(json_tests, testcase_name):
+ from google.cloud.bigtable.row_data import InvalidChunk
- def test_invalid_no_cell_key_before_commit(self):
- self._fail_during_consume("invalid - no cell key before commit")
+ client = _Client()
+ chunks, results = json_tests[testcase_name]
+ response = _ReadRowsResponseV2(chunks)
+ iterator = _MockCancellableIterator(response)
+ client._data_stub = mock.MagicMock()
+ client._data_stub.ReadRows.side_effect = [iterator]
+ request = object()
+ prd = _make_partial_rows_data(client._data_stub.ReadRows, request)
+ with pytest.raises(InvalidChunk):
+ prd.consume_all()
+ expected_result = _sort_flattend_cells(
+ [result for result in results if not result["error"]]
+ )
+ flattened = _sort_flattend_cells(_flatten_cells(prd))
+ assert flattened == expected_result
- def test_invalid_no_cell_key_before_value(self):
- self._fail_during_consume("invalid - no cell key before value")
- def test_invalid_new_col_family_wo_qualifier(self):
- self._fail_during_consume("invalid - new col family must specify qualifier")
+def test_prd_json_accept_invalid_no_cell_key_before_commit(json_tests):
+ _fail_during_consume(json_tests, "invalid - no cell key before commit")
- def test_invalid_no_commit_between_rows(self):
- self._fail_during_consume("invalid - no commit between rows")
- def test_invalid_no_commit_after_first_row(self):
- self._fail_during_consume("invalid - no commit after first row")
+def test_prd_json_accept_invalid_no_cell_key_before_value(json_tests):
+ _fail_during_consume(json_tests, "invalid - no cell key before value")
- def test_invalid_duplicate_row_key(self):
- self._fail_during_consume("invalid - duplicate row key")
- def test_invalid_new_row_missing_row_key(self):
- self._fail_during_consume("invalid - new row missing row key")
+def test_prd_json_accept_invalid_new_col_family_wo_qualifier(json_tests):
+ _fail_during_consume(json_tests, "invalid - new col family must specify qualifier")
- def test_invalid_bare_reset(self):
- self._fail_during_consume("invalid - bare reset")
- def test_invalid_bad_reset_no_commit(self):
- self._fail_during_consume("invalid - bad reset, no commit")
+def test_prd_json_accept_invalid_no_commit_between_rows(json_tests):
+ _fail_during_consume(json_tests, "invalid - no commit between rows")
- def test_invalid_missing_key_after_reset(self):
- self._fail_during_consume("invalid - missing key after reset")
- def test_invalid_reset_with_chunk(self):
- self._fail_during_consume("invalid - reset with chunk")
+def test_prd_json_accept_invalid_no_commit_after_first_row(json_tests):
+ _fail_during_consume(json_tests, "invalid - no commit after first row")
- def test_invalid_commit_with_chunk(self):
- self._fail_during_consume("invalid - commit with chunk")
- # JSON Error cases: incomplete final row
+def test_prd_json_accept_invalid_duplicate_row_key(json_tests):
+ _fail_during_consume(json_tests, "invalid - duplicate row key")
- def _sort_flattend_cells(self, flattened):
- import operator
- key_func = operator.itemgetter("rk", "fm", "qual")
- return sorted(flattened, key=key_func)
+def test_prd_json_accept_invalid_new_row_missing_row_key(json_tests):
+ _fail_during_consume(json_tests, "invalid - new row missing row key")
- def _incomplete_final_row(self, testcase_name):
- client = _Client()
- chunks, results = self._load_json_test(testcase_name)
- response = _ReadRowsResponseV2(chunks)
- iterator = _MockCancellableIterator(response)
- client._data_stub = mock.MagicMock()
- client._data_stub.ReadRows.side_effect = [iterator]
- request = object()
- prd = self._make_one(client._data_stub.ReadRows, request)
- with self.assertRaises(ValueError):
- prd.consume_all()
- self.assertEqual(prd.state, prd.ROW_IN_PROGRESS)
- expected_result = self._sort_flattend_cells(
- [result for result in results if not result["error"]]
- )
- flattened = self._sort_flattend_cells(_flatten_cells(prd))
- self.assertEqual(flattened, expected_result)
- def test_invalid_no_commit(self):
- self._incomplete_final_row("invalid - no commit")
+def test_prd_json_accept_invalid_bare_reset(json_tests):
+ _fail_during_consume(json_tests, "invalid - bare reset")
- def test_invalid_last_row_missing_commit(self):
- self._incomplete_final_row("invalid - last row missing commit")
- # Non-error cases
+def test_prd_json_accept_invalid_bad_reset_no_commit(json_tests):
+ _fail_during_consume(json_tests, "invalid - bad reset, no commit")
- _marker = object()
- def _match_results(self, testcase_name, expected_result=_marker):
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
+def test_prd_json_accept_invalid_missing_key_after_reset(json_tests):
+ _fail_during_consume(json_tests, "invalid - missing key after reset")
- client = _Client()
- chunks, results = self._load_json_test(testcase_name)
- response = _ReadRowsResponseV2(chunks)
- iterator = _MockCancellableIterator(response)
- data_api = mock.create_autospec(BigtableClient)
- client._table_data_client = data_api
- client._table_data_client.read_rows.side_effect = [iterator]
- request = object()
- prd = self._make_one(client._table_data_client.read_rows, request)
+
+def test_prd_json_accept_invalid_reset_with_chunk(json_tests):
+ _fail_during_consume(json_tests, "invalid - reset with chunk")
+
+
+def test_prd_json_accept_invalid_commit_with_chunk(json_tests):
+ _fail_during_consume(json_tests, "invalid - commit with chunk")
+
+
+# JSON Error cases: incomplete final row
+
+
+def _sort_flattend_cells(flattened):
+ import operator
+
+ key_func = operator.itemgetter("rk", "fm", "qual")
+ return sorted(flattened, key=key_func)
+
+
+def _incomplete_final_row(json_tests, testcase_name):
+ client = _Client()
+ chunks, results = json_tests[testcase_name]
+ response = _ReadRowsResponseV2(chunks)
+ iterator = _MockCancellableIterator(response)
+ client._data_stub = mock.MagicMock()
+ client._data_stub.ReadRows.side_effect = [iterator]
+ request = object()
+ prd = _make_partial_rows_data(client._data_stub.ReadRows, request)
+ with pytest.raises(ValueError):
prd.consume_all()
- flattened = self._sort_flattend_cells(_flatten_cells(prd))
- if expected_result is self._marker:
- expected_result = self._sort_flattend_cells(results)
- self.assertEqual(flattened, expected_result)
+ assert prd.state == prd.ROW_IN_PROGRESS
+ expected_result = _sort_flattend_cells(
+ [result for result in results if not result["error"]]
+ )
+ flattened = _sort_flattend_cells(_flatten_cells(prd))
+ assert flattened == expected_result
+
+
+def test_prd_json_accept_invalid_no_commit(json_tests):
+ _incomplete_final_row(json_tests, "invalid - no commit")
- def test_bare_commit_implies_ts_zero(self):
- self._match_results("bare commit implies ts=0")
- def test_simple_row_with_timestamp(self):
- self._match_results("simple row with timestamp")
+def test_prd_json_accept_invalid_last_row_missing_commit(json_tests):
+ _incomplete_final_row(json_tests, "invalid - last row missing commit")
- def test_missing_timestamp_implies_ts_zero(self):
- self._match_results("missing timestamp, implied ts=0")
- def test_empty_cell_value(self):
- self._match_results("empty cell value")
+# Non-error cases
- def test_two_unsplit_cells(self):
- self._match_results("two unsplit cells")
+_marker = object()
- def test_two_qualifiers(self):
- self._match_results("two qualifiers")
- def test_two_families(self):
- self._match_results("two families")
+def _match_results(json_tests, testcase_name, expected_result=_marker):
+ from google.cloud.bigtable_v2.services.bigtable import BigtableClient
- def test_with_labels(self):
- self._match_results("with labels")
+ client = _Client()
+ chunks, results = json_tests[testcase_name]
+ response = _ReadRowsResponseV2(chunks)
+ iterator = _MockCancellableIterator(response)
+ data_api = mock.create_autospec(BigtableClient)
+ client._table_data_client = data_api
+ client._table_data_client.read_rows.side_effect = [iterator]
+ request = object()
+ prd = _make_partial_rows_data(client._table_data_client.read_rows, request)
+ prd.consume_all()
+ flattened = _sort_flattend_cells(_flatten_cells(prd))
+ if expected_result is _marker:
+ expected_result = _sort_flattend_cells(results)
+ assert flattened == expected_result
- def test_split_cell_bare_commit(self):
- self._match_results("split cell, bare commit")
- def test_split_cell(self):
- self._match_results("split cell")
+def test_prd_json_accept_bare_commit_implies_ts_zero(json_tests):
+ _match_results(json_tests, "bare commit implies ts=0")
- def test_split_four_ways(self):
- self._match_results("split four ways")
- def test_two_split_cells(self):
- self._match_results("two split cells")
+def test_prd_json_accept_simple_row_with_timestamp(json_tests):
+ _match_results(json_tests, "simple row with timestamp")
- def test_multi_qualifier_splits(self):
- self._match_results("multi-qualifier splits")
- def test_multi_qualifier_multi_split(self):
- self._match_results("multi-qualifier multi-split")
+def test_prd_json_accept_missing_timestamp_implies_ts_zero(json_tests):
+ _match_results(json_tests, "missing timestamp, implied ts=0")
- def test_multi_family_split(self):
- self._match_results("multi-family split")
- def test_two_rows(self):
- self._match_results("two rows")
+def test_prd_json_accept_empty_cell_value(json_tests):
+ _match_results(json_tests, "empty cell value")
- def test_two_rows_implicit_timestamp(self):
- self._match_results("two rows implicit timestamp")
- def test_two_rows_empty_value(self):
- self._match_results("two rows empty value")
+def test_prd_json_accept_two_unsplit_cells(json_tests):
+ _match_results(json_tests, "two unsplit cells")
- def test_two_rows_one_with_multiple_cells(self):
- self._match_results("two rows, one with multiple cells")
- def test_two_rows_multiple_cells_multiple_families(self):
- self._match_results("two rows, multiple cells, multiple families")
+def test_prd_json_accept_two_qualifiers(json_tests):
+ _match_results(json_tests, "two qualifiers")
- def test_two_rows_multiple_cells(self):
- self._match_results("two rows, multiple cells")
- def test_two_rows_four_cells_two_labels(self):
- self._match_results("two rows, four cells, 2 labels")
+def test_prd_json_accept_two_families(json_tests):
+ _match_results(json_tests, "two families")
- def test_two_rows_with_splits_same_timestamp(self):
- self._match_results("two rows with splits, same timestamp")
- def test_no_data_after_reset(self):
- # JSON testcase has `"results": null`
- self._match_results("no data after reset", expected_result=[])
+def test_prd_json_accept_with_labels(json_tests):
+ _match_results(json_tests, "with labels")
- def test_simple_reset(self):
- self._match_results("simple reset")
- def test_reset_to_new_val(self):
- self._match_results("reset to new val")
+def test_prd_json_accept_split_cell_bare_commit(json_tests):
+ _match_results(json_tests, "split cell, bare commit")
- def test_reset_to_new_qual(self):
- self._match_results("reset to new qual")
- def test_reset_with_splits(self):
- self._match_results("reset with splits")
+def test_prd_json_accept_split_cell(json_tests):
+ _match_results(json_tests, "split cell")
- def test_two_resets(self):
- self._match_results("two resets")
- def test_reset_to_new_row(self):
- self._match_results("reset to new row")
+def test_prd_json_accept_split_four_ways(json_tests):
+ _match_results(json_tests, "split four ways")
- def test_reset_in_between_chunks(self):
- self._match_results("reset in between chunks")
- def test_empty_cell_chunk(self):
- self._match_results("empty cell chunk")
+def test_prd_json_accept_two_split_cells(json_tests):
+ _match_results(json_tests, "two split cells")
- def test_empty_second_qualifier(self):
- self._match_results("empty second qualifier")
+
+def test_prd_json_accept_multi_qualifier_splits(json_tests):
+ _match_results(json_tests, "multi-qualifier splits")
+
+
+def test_prd_json_accept_multi_qualifier_multi_split(json_tests):
+ _match_results(json_tests, "multi-qualifier multi-split")
+
+
+def test_prd_json_accept_multi_family_split(json_tests):
+ _match_results(json_tests, "multi-family split")
+
+
+def test_prd_json_accept_two_rows(json_tests):
+ _match_results(json_tests, "two rows")
+
+
+def test_prd_json_accept_two_rows_implicit_timestamp(json_tests):
+ _match_results(json_tests, "two rows implicit timestamp")
+
+
+def test_prd_json_accept_two_rows_empty_value(json_tests):
+ _match_results(json_tests, "two rows empty value")
+
+
+def test_prd_json_accept_two_rows_one_with_multiple_cells(json_tests):
+ _match_results(json_tests, "two rows, one with multiple cells")
+
+
+def test_prd_json_accept_two_rows_multiple_cells_multiple_families(json_tests):
+ _match_results(json_tests, "two rows, multiple cells, multiple families")
+
+
+def test_prd_json_accept_two_rows_multiple_cells(json_tests):
+ _match_results(json_tests, "two rows, multiple cells")
+
+
+def test_prd_json_accept_two_rows_four_cells_two_labels(json_tests):
+ _match_results(json_tests, "two rows, four cells, 2 labels")
+
+
+def test_prd_json_accept_two_rows_with_splits_same_timestamp(json_tests):
+ _match_results(json_tests, "two rows with splits, same timestamp")
+
+
+def test_prd_json_accept_no_data_after_reset(json_tests):
+ # JSON testcase has `"results": null`
+ _match_results(json_tests, "no data after reset", expected_result=[])
+
+
+def test_prd_json_accept_simple_reset(json_tests):
+ _match_results(json_tests, "simple reset")
+
+
+def test_prd_json_accept_reset_to_new_val(json_tests):
+ _match_results(json_tests, "reset to new val")
+
+
+def test_prd_json_accept_reset_to_new_qual(json_tests):
+ _match_results(json_tests, "reset to new qual")
+
+
+def test_prd_json_accept_reset_with_splits(json_tests):
+ _match_results(json_tests, "reset with splits")
+
+
+def test_prd_json_accept_two_resets(json_tests):
+ _match_results(json_tests, "two resets")
+
+
+def test_prd_json_accept_reset_to_new_row(json_tests):
+ _match_results(json_tests, "reset to new row")
+
+
+def test_prd_json_accept_reset_in_between_chunks(json_tests):
+ _match_results(json_tests, "reset in between chunks")
+
+
+def test_prd_json_accept_empty_cell_chunk(json_tests):
+ _match_results(json_tests, "empty cell chunk")
+
+
+def test_prd_json_accept_empty_second_qualifier(json_tests):
+ _match_results(json_tests, "empty second qualifier")
def _flatten_cells(prd):
@@ -1268,6 +1347,8 @@ def next(self):
class _MockFailureIterator_1(object):
def next(self):
+ from google.api_core.exceptions import DeadlineExceeded
+
raise DeadlineExceeded("Failed to read from server")
__next__ = next
@@ -1340,10 +1421,10 @@ def _ReadRowsResponseCellChunkPB(*args, **kw):
return message
-def _make_cell(value):
+def _make_cell_pb(value):
from google.cloud.bigtable import row_data
- return row_data.Cell(value, TestCell.timestamp_micros)
+ return row_data.Cell(value, TIMESTAMP_MICROS)
def _ReadRowsRequestPB(*args, **kw):
@@ -1353,4 +1434,11 @@ def _ReadRowsRequestPB(*args, **kw):
def _read_rows_retry_exception(exc):
+ from google.api_core.exceptions import DeadlineExceeded
+
return isinstance(exc, DeadlineExceeded)
+
+
+class _Client(object):
+
+ data_stub = None
diff --git a/tests/unit/test_row_filters.py b/tests/unit/test_row_filters.py
index c42345ee0..8c591e03c 100644
--- a/tests/unit/test_row_filters.py
+++ b/tests/unit/test_row_filters.py
@@ -13,1047 +13,1107 @@
# limitations under the License.
-import unittest
+import pytest
-class Test_BoolFilter(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import _BoolFilter
+def test_bool_filter_constructor():
+ from google.cloud.bigtable.row_filters import _BoolFilter
- return _BoolFilter
+ flag = object()
+ row_filter = _BoolFilter(flag)
+ assert row_filter.flag is flag
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
- def test_constructor(self):
- flag = object()
- row_filter = self._make_one(flag)
- self.assertIs(row_filter.flag, flag)
+def test_bool_filter___eq__type_differ():
+ from google.cloud.bigtable.row_filters import _BoolFilter
- def test___eq__type_differ(self):
- flag = object()
- row_filter1 = self._make_one(flag)
- row_filter2 = object()
- self.assertNotEqual(row_filter1, row_filter2)
+ flag = object()
+ row_filter1 = _BoolFilter(flag)
+ row_filter2 = object()
+ assert not (row_filter1 == row_filter2)
- def test___eq__same_value(self):
- flag = object()
- row_filter1 = self._make_one(flag)
- row_filter2 = self._make_one(flag)
- self.assertEqual(row_filter1, row_filter2)
- def test___ne__same_value(self):
- flag = object()
- row_filter1 = self._make_one(flag)
- row_filter2 = self._make_one(flag)
- comparison_val = row_filter1 != row_filter2
- self.assertFalse(comparison_val)
+def test_bool_filter___eq__same_value():
+ from google.cloud.bigtable.row_filters import _BoolFilter
+ flag = object()
+ row_filter1 = _BoolFilter(flag)
+ row_filter2 = _BoolFilter(flag)
+ assert row_filter1 == row_filter2
-class TestSinkFilter(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import SinkFilter
- return SinkFilter
+def test_bool_filter___ne__same_value():
+ from google.cloud.bigtable.row_filters import _BoolFilter
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
+ flag = object()
+ row_filter1 = _BoolFilter(flag)
+ row_filter2 = _BoolFilter(flag)
+ assert not (row_filter1 != row_filter2)
- def test_to_pb(self):
- flag = True
- row_filter = self._make_one(flag)
- pb_val = row_filter.to_pb()
- expected_pb = _RowFilterPB(sink=flag)
- self.assertEqual(pb_val, expected_pb)
+def test_sink_filter_to_pb():
+ from google.cloud.bigtable.row_filters import SinkFilter
-class TestPassAllFilter(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import PassAllFilter
+ flag = True
+ row_filter = SinkFilter(flag)
+ pb_val = row_filter.to_pb()
+ expected_pb = _RowFilterPB(sink=flag)
+ assert pb_val == expected_pb
- return PassAllFilter
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
+def test_pass_all_filter_to_pb():
+ from google.cloud.bigtable.row_filters import PassAllFilter
- def test_to_pb(self):
- flag = True
- row_filter = self._make_one(flag)
- pb_val = row_filter.to_pb()
- expected_pb = _RowFilterPB(pass_all_filter=flag)
- self.assertEqual(pb_val, expected_pb)
+ flag = True
+ row_filter = PassAllFilter(flag)
+ pb_val = row_filter.to_pb()
+ expected_pb = _RowFilterPB(pass_all_filter=flag)
+ assert pb_val == expected_pb
-class TestBlockAllFilter(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import BlockAllFilter
+def test_block_all_filter_to_pb():
+ from google.cloud.bigtable.row_filters import BlockAllFilter
- return BlockAllFilter
+ flag = True
+ row_filter = BlockAllFilter(flag)
+ pb_val = row_filter.to_pb()
+ expected_pb = _RowFilterPB(block_all_filter=flag)
+ assert pb_val == expected_pb
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
- def test_to_pb(self):
- flag = True
- row_filter = self._make_one(flag)
- pb_val = row_filter.to_pb()
- expected_pb = _RowFilterPB(block_all_filter=flag)
- self.assertEqual(pb_val, expected_pb)
+def test_regex_filterconstructor():
+ from google.cloud.bigtable.row_filters import _RegexFilter
+ regex = b"abc"
+ row_filter = _RegexFilter(regex)
+ assert row_filter.regex is regex
-class Test_RegexFilter(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import _RegexFilter
- return _RegexFilter
+def test_regex_filterconstructor_non_bytes():
+ from google.cloud.bigtable.row_filters import _RegexFilter
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
+ regex = u"abc"
+ row_filter = _RegexFilter(regex)
+ assert row_filter.regex == b"abc"
- def test_constructor(self):
- regex = b"abc"
- row_filter = self._make_one(regex)
- self.assertIs(row_filter.regex, regex)
- def test_constructor_non_bytes(self):
- regex = u"abc"
- row_filter = self._make_one(regex)
- self.assertEqual(row_filter.regex, b"abc")
+def test_regex_filter__eq__type_differ():
+ from google.cloud.bigtable.row_filters import _RegexFilter
- def test___eq__type_differ(self):
- regex = b"def-rgx"
- row_filter1 = self._make_one(regex)
- row_filter2 = object()
- self.assertNotEqual(row_filter1, row_filter2)
+ regex = b"def-rgx"
+ row_filter1 = _RegexFilter(regex)
+ row_filter2 = object()
+ assert not (row_filter1 == row_filter2)
- def test___eq__same_value(self):
- regex = b"trex-regex"
- row_filter1 = self._make_one(regex)
- row_filter2 = self._make_one(regex)
- self.assertEqual(row_filter1, row_filter2)
- def test___ne__same_value(self):
- regex = b"abc"
- row_filter1 = self._make_one(regex)
- row_filter2 = self._make_one(regex)
- comparison_val = row_filter1 != row_filter2
- self.assertFalse(comparison_val)
+def test_regex_filter__eq__same_value():
+ from google.cloud.bigtable.row_filters import _RegexFilter
+ regex = b"trex-regex"
+ row_filter1 = _RegexFilter(regex)
+ row_filter2 = _RegexFilter(regex)
+ assert row_filter1 == row_filter2
-class TestRowKeyRegexFilter(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import RowKeyRegexFilter
- return RowKeyRegexFilter
+def test_regex_filter__ne__same_value():
+ from google.cloud.bigtable.row_filters import _RegexFilter
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
+ regex = b"abc"
+ row_filter1 = _RegexFilter(regex)
+ row_filter2 = _RegexFilter(regex)
+ assert not (row_filter1 != row_filter2)
- def test_to_pb(self):
- regex = b"row-key-regex"
- row_filter = self._make_one(regex)
- pb_val = row_filter.to_pb()
- expected_pb = _RowFilterPB(row_key_regex_filter=regex)
- self.assertEqual(pb_val, expected_pb)
-
-
-class TestRowSampleFilter(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import RowSampleFilter
- return RowSampleFilter
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- def test_constructor(self):
- sample = object()
- row_filter = self._make_one(sample)
- self.assertIs(row_filter.sample, sample)
-
- def test___eq__type_differ(self):
- sample = object()
- row_filter1 = self._make_one(sample)
- row_filter2 = object()
- self.assertNotEqual(row_filter1, row_filter2)
-
- def test___eq__same_value(self):
- sample = object()
- row_filter1 = self._make_one(sample)
- row_filter2 = self._make_one(sample)
- self.assertEqual(row_filter1, row_filter2)
-
- def test_to_pb(self):
- sample = 0.25
- row_filter = self._make_one(sample)
- pb_val = row_filter.to_pb()
- expected_pb = _RowFilterPB(row_sample_filter=sample)
- self.assertEqual(pb_val, expected_pb)
-
-
-class TestFamilyNameRegexFilter(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import FamilyNameRegexFilter
-
- return FamilyNameRegexFilter
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- def test_to_pb(self):
- regex = u"family-regex"
- row_filter = self._make_one(regex)
- pb_val = row_filter.to_pb()
- expected_pb = _RowFilterPB(family_name_regex_filter=regex)
- self.assertEqual(pb_val, expected_pb)
-
-
-class TestColumnQualifierRegexFilter(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter
-
- return ColumnQualifierRegexFilter
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- def test_to_pb(self):
- regex = b"column-regex"
- row_filter = self._make_one(regex)
- pb_val = row_filter.to_pb()
- expected_pb = _RowFilterPB(column_qualifier_regex_filter=regex)
- self.assertEqual(pb_val, expected_pb)
-
-
-class TestTimestampRange(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import TimestampRange
-
- return TimestampRange
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- def test_constructor(self):
- start = object()
- end = object()
- time_range = self._make_one(start=start, end=end)
- self.assertIs(time_range.start, start)
- self.assertIs(time_range.end, end)
-
- def test___eq__(self):
- start = object()
- end = object()
- time_range1 = self._make_one(start=start, end=end)
- time_range2 = self._make_one(start=start, end=end)
- self.assertEqual(time_range1, time_range2)
-
- def test___eq__type_differ(self):
- start = object()
- end = object()
- time_range1 = self._make_one(start=start, end=end)
- time_range2 = object()
- self.assertNotEqual(time_range1, time_range2)
-
- def test___ne__same_value(self):
- start = object()
- end = object()
- time_range1 = self._make_one(start=start, end=end)
- time_range2 = self._make_one(start=start, end=end)
- comparison_val = time_range1 != time_range2
- self.assertFalse(comparison_val)
-
- def _to_pb_helper(self, pb_kwargs, start=None, end=None):
- import datetime
- from google.cloud._helpers import _EPOCH
-
- if start is not None:
- start = _EPOCH + datetime.timedelta(microseconds=start)
- if end is not None:
- end = _EPOCH + datetime.timedelta(microseconds=end)
- time_range = self._make_one(start=start, end=end)
- expected_pb = _TimestampRangePB(**pb_kwargs)
- time_pb = time_range.to_pb()
- self.assertEqual(
- time_pb.start_timestamp_micros, expected_pb.start_timestamp_micros
- )
- self.assertEqual(time_pb.end_timestamp_micros, expected_pb.end_timestamp_micros)
- self.assertEqual(time_pb, expected_pb)
-
- def test_to_pb(self):
- start_micros = 30871234
- end_micros = 12939371234
- start_millis = start_micros // 1000 * 1000
- self.assertEqual(start_millis, 30871000)
- end_millis = end_micros // 1000 * 1000 + 1000
- self.assertEqual(end_millis, 12939372000)
- pb_kwargs = {}
- pb_kwargs["start_timestamp_micros"] = start_millis
- pb_kwargs["end_timestamp_micros"] = end_millis
- self._to_pb_helper(pb_kwargs, start=start_micros, end=end_micros)
-
- def test_to_pb_start_only(self):
- # Makes sure already milliseconds granularity
- start_micros = 30871000
- start_millis = start_micros // 1000 * 1000
- self.assertEqual(start_millis, 30871000)
- pb_kwargs = {}
- pb_kwargs["start_timestamp_micros"] = start_millis
- self._to_pb_helper(pb_kwargs, start=start_micros, end=None)
-
- def test_to_pb_end_only(self):
- # Makes sure already milliseconds granularity
- end_micros = 12939371000
- end_millis = end_micros // 1000 * 1000
- self.assertEqual(end_millis, 12939371000)
- pb_kwargs = {}
- pb_kwargs["end_timestamp_micros"] = end_millis
- self._to_pb_helper(pb_kwargs, start=None, end=end_micros)
-
-
-class TestTimestampRangeFilter(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import TimestampRangeFilter
-
- return TimestampRangeFilter
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- def test_constructor(self):
- range_ = object()
- row_filter = self._make_one(range_)
- self.assertIs(row_filter.range_, range_)
-
- def test___eq__type_differ(self):
- range_ = object()
- row_filter1 = self._make_one(range_)
- row_filter2 = object()
- self.assertNotEqual(row_filter1, row_filter2)
-
- def test___eq__same_value(self):
- range_ = object()
- row_filter1 = self._make_one(range_)
- row_filter2 = self._make_one(range_)
- self.assertEqual(row_filter1, row_filter2)
-
- def test_to_pb(self):
- from google.cloud.bigtable.row_filters import TimestampRange
-
- range_ = TimestampRange()
- row_filter = self._make_one(range_)
- pb_val = row_filter.to_pb()
- expected_pb = _RowFilterPB(timestamp_range_filter=_TimestampRangePB())
- self.assertEqual(pb_val, expected_pb)
-
-
-class TestColumnRangeFilter(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import ColumnRangeFilter
-
- return ColumnRangeFilter
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- def test_constructor_defaults(self):
- column_family_id = object()
- row_filter = self._make_one(column_family_id)
- self.assertIs(row_filter.column_family_id, column_family_id)
- self.assertIsNone(row_filter.start_column)
- self.assertIsNone(row_filter.end_column)
- self.assertTrue(row_filter.inclusive_start)
- self.assertTrue(row_filter.inclusive_end)
-
- def test_constructor_explicit(self):
- column_family_id = object()
- start_column = object()
- end_column = object()
- inclusive_start = object()
- inclusive_end = object()
- row_filter = self._make_one(
- column_family_id,
- start_column=start_column,
- end_column=end_column,
- inclusive_start=inclusive_start,
- inclusive_end=inclusive_end,
- )
- self.assertIs(row_filter.column_family_id, column_family_id)
- self.assertIs(row_filter.start_column, start_column)
- self.assertIs(row_filter.end_column, end_column)
- self.assertIs(row_filter.inclusive_start, inclusive_start)
- self.assertIs(row_filter.inclusive_end, inclusive_end)
-
- def test_constructor_bad_start(self):
- column_family_id = object()
- self.assertRaises(
- ValueError, self._make_one, column_family_id, inclusive_start=True
- )
+def test_row_key_regex_filter_to_pb():
+ from google.cloud.bigtable.row_filters import RowKeyRegexFilter
- def test_constructor_bad_end(self):
- column_family_id = object()
- self.assertRaises(
- ValueError, self._make_one, column_family_id, inclusive_end=True
- )
+ regex = b"row-key-regex"
+ row_filter = RowKeyRegexFilter(regex)
+ pb_val = row_filter.to_pb()
+ expected_pb = _RowFilterPB(row_key_regex_filter=regex)
+ assert pb_val == expected_pb
- def test___eq__(self):
- column_family_id = object()
- start_column = object()
- end_column = object()
- inclusive_start = object()
- inclusive_end = object()
- row_filter1 = self._make_one(
- column_family_id,
- start_column=start_column,
- end_column=end_column,
- inclusive_start=inclusive_start,
- inclusive_end=inclusive_end,
- )
- row_filter2 = self._make_one(
- column_family_id,
- start_column=start_column,
- end_column=end_column,
- inclusive_start=inclusive_start,
- inclusive_end=inclusive_end,
- )
- self.assertEqual(row_filter1, row_filter2)
-
- def test___eq__type_differ(self):
- column_family_id = object()
- row_filter1 = self._make_one(column_family_id)
- row_filter2 = object()
- self.assertNotEqual(row_filter1, row_filter2)
-
- def test_to_pb(self):
- column_family_id = u"column-family-id"
- row_filter = self._make_one(column_family_id)
- col_range_pb = _ColumnRangePB(family_name=column_family_id)
- expected_pb = _RowFilterPB(column_range_filter=col_range_pb)
- self.assertEqual(row_filter.to_pb(), expected_pb)
-
- def test_to_pb_inclusive_start(self):
- column_family_id = u"column-family-id"
- column = b"column"
- row_filter = self._make_one(column_family_id, start_column=column)
- col_range_pb = _ColumnRangePB(
- family_name=column_family_id, start_qualifier_closed=column
- )
- expected_pb = _RowFilterPB(column_range_filter=col_range_pb)
- self.assertEqual(row_filter.to_pb(), expected_pb)
-
- def test_to_pb_exclusive_start(self):
- column_family_id = u"column-family-id"
- column = b"column"
- row_filter = self._make_one(
- column_family_id, start_column=column, inclusive_start=False
- )
- col_range_pb = _ColumnRangePB(
- family_name=column_family_id, start_qualifier_open=column
- )
- expected_pb = _RowFilterPB(column_range_filter=col_range_pb)
- self.assertEqual(row_filter.to_pb(), expected_pb)
-
- def test_to_pb_inclusive_end(self):
- column_family_id = u"column-family-id"
- column = b"column"
- row_filter = self._make_one(column_family_id, end_column=column)
- col_range_pb = _ColumnRangePB(
- family_name=column_family_id, end_qualifier_closed=column
- )
- expected_pb = _RowFilterPB(column_range_filter=col_range_pb)
- self.assertEqual(row_filter.to_pb(), expected_pb)
-
- def test_to_pb_exclusive_end(self):
- column_family_id = u"column-family-id"
- column = b"column"
- row_filter = self._make_one(
- column_family_id, end_column=column, inclusive_end=False
- )
- col_range_pb = _ColumnRangePB(
- family_name=column_family_id, end_qualifier_open=column
- )
- expected_pb = _RowFilterPB(column_range_filter=col_range_pb)
- self.assertEqual(row_filter.to_pb(), expected_pb)
-
-
-class TestValueRegexFilter(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import ValueRegexFilter
-
- return ValueRegexFilter
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- def test_to_pb_w_bytes(self):
- value = regex = b"value-regex"
- row_filter = self._make_one(value)
- pb_val = row_filter.to_pb()
- expected_pb = _RowFilterPB(value_regex_filter=regex)
- self.assertEqual(pb_val, expected_pb)
-
- def test_to_pb_w_str(self):
- value = u"value-regex"
- regex = value.encode("ascii")
- row_filter = self._make_one(value)
- pb_val = row_filter.to_pb()
- expected_pb = _RowFilterPB(value_regex_filter=regex)
- self.assertEqual(pb_val, expected_pb)
-
-
-class TestExactValueFilter(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import ExactValueFilter
-
- return ExactValueFilter
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- def test_to_pb_w_bytes(self):
- value = regex = b"value-regex"
- row_filter = self._make_one(value)
- pb_val = row_filter.to_pb()
- expected_pb = _RowFilterPB(value_regex_filter=regex)
- self.assertEqual(pb_val, expected_pb)
-
- def test_to_pb_w_str(self):
- value = u"value-regex"
- regex = value.encode("ascii")
- row_filter = self._make_one(value)
- pb_val = row_filter.to_pb()
- expected_pb = _RowFilterPB(value_regex_filter=regex)
- self.assertEqual(pb_val, expected_pb)
-
- def test_to_pb_w_int(self):
- import struct
-
- value = 1
- regex = struct.Struct(">q").pack(value)
- row_filter = self._make_one(value)
- pb_val = row_filter.to_pb()
- expected_pb = _RowFilterPB(value_regex_filter=regex)
- self.assertEqual(pb_val, expected_pb)
-
-
-class TestValueRangeFilter(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import ValueRangeFilter
-
- return ValueRangeFilter
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- def test_constructor_defaults(self):
- row_filter = self._make_one()
-
- self.assertIsNone(row_filter.start_value)
- self.assertIsNone(row_filter.end_value)
- self.assertTrue(row_filter.inclusive_start)
- self.assertTrue(row_filter.inclusive_end)
-
- def test_constructor_explicit(self):
- start_value = object()
- end_value = object()
- inclusive_start = object()
- inclusive_end = object()
-
- row_filter = self._make_one(
- start_value=start_value,
- end_value=end_value,
- inclusive_start=inclusive_start,
- inclusive_end=inclusive_end,
- )
- self.assertIs(row_filter.start_value, start_value)
- self.assertIs(row_filter.end_value, end_value)
- self.assertIs(row_filter.inclusive_start, inclusive_start)
- self.assertIs(row_filter.inclusive_end, inclusive_end)
-
- def test_constructor_w_int_values(self):
- import struct
-
- start_value = 1
- end_value = 10
-
- row_filter = self._make_one(start_value=start_value, end_value=end_value)
-
- expected_start_value = struct.Struct(">q").pack(start_value)
- expected_end_value = struct.Struct(">q").pack(end_value)
-
- self.assertEqual(row_filter.start_value, expected_start_value)
- self.assertEqual(row_filter.end_value, expected_end_value)
- self.assertTrue(row_filter.inclusive_start)
- self.assertTrue(row_filter.inclusive_end)
-
- def test_constructor_bad_start(self):
- with self.assertRaises(ValueError):
- self._make_one(inclusive_start=True)
-
- def test_constructor_bad_end(self):
- with self.assertRaises(ValueError):
- self._make_one(inclusive_end=True)
-
- def test___eq__(self):
- start_value = object()
- end_value = object()
- inclusive_start = object()
- inclusive_end = object()
- row_filter1 = self._make_one(
- start_value=start_value,
- end_value=end_value,
- inclusive_start=inclusive_start,
- inclusive_end=inclusive_end,
- )
- row_filter2 = self._make_one(
- start_value=start_value,
- end_value=end_value,
- inclusive_start=inclusive_start,
- inclusive_end=inclusive_end,
- )
- self.assertEqual(row_filter1, row_filter2)
-
- def test___eq__type_differ(self):
- row_filter1 = self._make_one()
- row_filter2 = object()
- self.assertNotEqual(row_filter1, row_filter2)
-
- def test_to_pb(self):
- row_filter = self._make_one()
- expected_pb = _RowFilterPB(value_range_filter=_ValueRangePB())
- self.assertEqual(row_filter.to_pb(), expected_pb)
-
- def test_to_pb_inclusive_start(self):
- value = b"some-value"
- row_filter = self._make_one(start_value=value)
- val_range_pb = _ValueRangePB(start_value_closed=value)
- expected_pb = _RowFilterPB(value_range_filter=val_range_pb)
- self.assertEqual(row_filter.to_pb(), expected_pb)
-
- def test_to_pb_exclusive_start(self):
- value = b"some-value"
- row_filter = self._make_one(start_value=value, inclusive_start=False)
- val_range_pb = _ValueRangePB(start_value_open=value)
- expected_pb = _RowFilterPB(value_range_filter=val_range_pb)
- self.assertEqual(row_filter.to_pb(), expected_pb)
-
- def test_to_pb_inclusive_end(self):
- value = b"some-value"
- row_filter = self._make_one(end_value=value)
- val_range_pb = _ValueRangePB(end_value_closed=value)
- expected_pb = _RowFilterPB(value_range_filter=val_range_pb)
- self.assertEqual(row_filter.to_pb(), expected_pb)
-
- def test_to_pb_exclusive_end(self):
- value = b"some-value"
- row_filter = self._make_one(end_value=value, inclusive_end=False)
- val_range_pb = _ValueRangePB(end_value_open=value)
- expected_pb = _RowFilterPB(value_range_filter=val_range_pb)
- self.assertEqual(row_filter.to_pb(), expected_pb)
-
-
-class Test_CellCountFilter(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import _CellCountFilter
-
- return _CellCountFilter
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- def test_constructor(self):
- num_cells = object()
- row_filter = self._make_one(num_cells)
- self.assertIs(row_filter.num_cells, num_cells)
-
- def test___eq__type_differ(self):
- num_cells = object()
- row_filter1 = self._make_one(num_cells)
- row_filter2 = object()
- self.assertNotEqual(row_filter1, row_filter2)
-
- def test___eq__same_value(self):
- num_cells = object()
- row_filter1 = self._make_one(num_cells)
- row_filter2 = self._make_one(num_cells)
- self.assertEqual(row_filter1, row_filter2)
-
- def test___ne__same_value(self):
- num_cells = object()
- row_filter1 = self._make_one(num_cells)
- row_filter2 = self._make_one(num_cells)
- comparison_val = row_filter1 != row_filter2
- self.assertFalse(comparison_val)
-
-
-class TestCellsRowOffsetFilter(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import CellsRowOffsetFilter
-
- return CellsRowOffsetFilter
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- def test_to_pb(self):
- num_cells = 76
- row_filter = self._make_one(num_cells)
- pb_val = row_filter.to_pb()
- expected_pb = _RowFilterPB(cells_per_row_offset_filter=num_cells)
- self.assertEqual(pb_val, expected_pb)
-
-
-class TestCellsRowLimitFilter(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import CellsRowLimitFilter
-
- return CellsRowLimitFilter
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- def test_to_pb(self):
- num_cells = 189
- row_filter = self._make_one(num_cells)
- pb_val = row_filter.to_pb()
- expected_pb = _RowFilterPB(cells_per_row_limit_filter=num_cells)
- self.assertEqual(pb_val, expected_pb)
-
-
-class TestCellsColumnLimitFilter(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import CellsColumnLimitFilter
-
- return CellsColumnLimitFilter
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
+def test_row_sample_filter_constructor():
+ from google.cloud.bigtable.row_filters import RowSampleFilter
- def test_to_pb(self):
- num_cells = 10
- row_filter = self._make_one(num_cells)
- pb_val = row_filter.to_pb()
- expected_pb = _RowFilterPB(cells_per_column_limit_filter=num_cells)
- self.assertEqual(pb_val, expected_pb)
+ sample = object()
+ row_filter = RowSampleFilter(sample)
+ assert row_filter.sample is sample
-class TestStripValueTransformerFilter(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import StripValueTransformerFilter
+def test_row_sample_filter___eq__type_differ():
+ from google.cloud.bigtable.row_filters import RowSampleFilter
- return StripValueTransformerFilter
+ sample = object()
+ row_filter1 = RowSampleFilter(sample)
+ row_filter2 = object()
+ assert not (row_filter1 == row_filter2)
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
- def test_to_pb(self):
- flag = True
- row_filter = self._make_one(flag)
- pb_val = row_filter.to_pb()
- expected_pb = _RowFilterPB(strip_value_transformer=flag)
- self.assertEqual(pb_val, expected_pb)
+def test_row_sample_filter___eq__same_value():
+ from google.cloud.bigtable.row_filters import RowSampleFilter
+ sample = object()
+ row_filter1 = RowSampleFilter(sample)
+ row_filter2 = RowSampleFilter(sample)
+ assert row_filter1 == row_filter2
-class TestApplyLabelFilter(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import ApplyLabelFilter
- return ApplyLabelFilter
+def test_row_sample_filter___ne__():
+ from google.cloud.bigtable.row_filters import RowSampleFilter
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
+ sample = object()
+ other_sample = object()
+ row_filter1 = RowSampleFilter(sample)
+ row_filter2 = RowSampleFilter(other_sample)
+ assert row_filter1 != row_filter2
- def test_constructor(self):
- label = object()
- row_filter = self._make_one(label)
- self.assertIs(row_filter.label, label)
- def test___eq__type_differ(self):
- label = object()
- row_filter1 = self._make_one(label)
- row_filter2 = object()
- self.assertNotEqual(row_filter1, row_filter2)
+def test_row_sample_filter_to_pb():
+ from google.cloud.bigtable.row_filters import RowSampleFilter
- def test___eq__same_value(self):
- label = object()
- row_filter1 = self._make_one(label)
- row_filter2 = self._make_one(label)
- self.assertEqual(row_filter1, row_filter2)
+ sample = 0.25
+ row_filter = RowSampleFilter(sample)
+ pb_val = row_filter.to_pb()
+ expected_pb = _RowFilterPB(row_sample_filter=sample)
+ assert pb_val == expected_pb
- def test_to_pb(self):
- label = u"label"
- row_filter = self._make_one(label)
- pb_val = row_filter.to_pb()
- expected_pb = _RowFilterPB(apply_label_transformer=label)
- self.assertEqual(pb_val, expected_pb)
+def test_family_name_regex_filter_to_pb():
+ from google.cloud.bigtable.row_filters import FamilyNameRegexFilter
-class Test_FilterCombination(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import _FilterCombination
+ regex = u"family-regex"
+ row_filter = FamilyNameRegexFilter(regex)
+ pb_val = row_filter.to_pb()
+ expected_pb = _RowFilterPB(family_name_regex_filter=regex)
+ assert pb_val == expected_pb
- return _FilterCombination
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
+def test_column_qualifier_regext_filter_to_pb():
+ from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter
- def test_constructor_defaults(self):
- row_filter = self._make_one()
- self.assertEqual(row_filter.filters, [])
+ regex = b"column-regex"
+ row_filter = ColumnQualifierRegexFilter(regex)
+ pb_val = row_filter.to_pb()
+ expected_pb = _RowFilterPB(column_qualifier_regex_filter=regex)
+ assert pb_val == expected_pb
- def test_constructor_explicit(self):
- filters = object()
- row_filter = self._make_one(filters=filters)
- self.assertIs(row_filter.filters, filters)
- def test___eq__(self):
- filters = object()
- row_filter1 = self._make_one(filters=filters)
- row_filter2 = self._make_one(filters=filters)
- self.assertEqual(row_filter1, row_filter2)
+def test_timestamp_range_constructor():
+ from google.cloud.bigtable.row_filters import TimestampRange
- def test___eq__type_differ(self):
- filters = object()
- row_filter1 = self._make_one(filters=filters)
- row_filter2 = object()
- self.assertNotEqual(row_filter1, row_filter2)
+ start = object()
+ end = object()
+ time_range = TimestampRange(start=start, end=end)
+ assert time_range.start is start
+ assert time_range.end is end
-class TestRowFilterChain(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import RowFilterChain
+def test_timestamp_range___eq__():
+ from google.cloud.bigtable.row_filters import TimestampRange
- return RowFilterChain
+ start = object()
+ end = object()
+ time_range1 = TimestampRange(start=start, end=end)
+ time_range2 = TimestampRange(start=start, end=end)
+ assert time_range1 == time_range2
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
- def test_to_pb(self):
- from google.cloud.bigtable.row_filters import RowSampleFilter
- from google.cloud.bigtable.row_filters import StripValueTransformerFilter
+def test_timestamp_range___eq__type_differ():
+ from google.cloud.bigtable.row_filters import TimestampRange
- row_filter1 = StripValueTransformerFilter(True)
- row_filter1_pb = row_filter1.to_pb()
+ start = object()
+ end = object()
+ time_range1 = TimestampRange(start=start, end=end)
+ time_range2 = object()
+ assert not (time_range1 == time_range2)
- row_filter2 = RowSampleFilter(0.25)
- row_filter2_pb = row_filter2.to_pb()
- row_filter3 = self._make_one(filters=[row_filter1, row_filter2])
- filter_pb = row_filter3.to_pb()
+def test_timestamp_range___ne__same_value():
+ from google.cloud.bigtable.row_filters import TimestampRange
- expected_pb = _RowFilterPB(
- chain=_RowFilterChainPB(filters=[row_filter1_pb, row_filter2_pb])
- )
- self.assertEqual(filter_pb, expected_pb)
+ start = object()
+ end = object()
+ time_range1 = TimestampRange(start=start, end=end)
+ time_range2 = TimestampRange(start=start, end=end)
+ assert not (time_range1 != time_range2)
- def test_to_pb_nested(self):
- from google.cloud.bigtable.row_filters import CellsRowLimitFilter
- from google.cloud.bigtable.row_filters import RowSampleFilter
- from google.cloud.bigtable.row_filters import StripValueTransformerFilter
- row_filter1 = StripValueTransformerFilter(True)
- row_filter2 = RowSampleFilter(0.25)
+def _timestamp_range_to_pb_helper(pb_kwargs, start=None, end=None):
+ import datetime
+ from google.cloud._helpers import _EPOCH
+ from google.cloud.bigtable.row_filters import TimestampRange
- row_filter3 = self._make_one(filters=[row_filter1, row_filter2])
- row_filter3_pb = row_filter3.to_pb()
+ if start is not None:
+ start = _EPOCH + datetime.timedelta(microseconds=start)
+ if end is not None:
+ end = _EPOCH + datetime.timedelta(microseconds=end)
+ time_range = TimestampRange(start=start, end=end)
+ expected_pb = _TimestampRangePB(**pb_kwargs)
+ time_pb = time_range.to_pb()
+ assert time_pb.start_timestamp_micros == expected_pb.start_timestamp_micros
+ assert time_pb.end_timestamp_micros == expected_pb.end_timestamp_micros
+ assert time_pb == expected_pb
- row_filter4 = CellsRowLimitFilter(11)
- row_filter4_pb = row_filter4.to_pb()
- row_filter5 = self._make_one(filters=[row_filter3, row_filter4])
- filter_pb = row_filter5.to_pb()
+def test_timestamp_range_to_pb():
+ start_micros = 30871234
+ end_micros = 12939371234
+ start_millis = start_micros // 1000 * 1000
+ assert start_millis == 30871000
+ end_millis = end_micros // 1000 * 1000 + 1000
+ assert end_millis == 12939372000
+ pb_kwargs = {}
+ pb_kwargs["start_timestamp_micros"] = start_millis
+ pb_kwargs["end_timestamp_micros"] = end_millis
+ _timestamp_range_to_pb_helper(pb_kwargs, start=start_micros, end=end_micros)
- expected_pb = _RowFilterPB(
- chain=_RowFilterChainPB(filters=[row_filter3_pb, row_filter4_pb])
- )
- self.assertEqual(filter_pb, expected_pb)
+def test_timestamp_range_to_pb_start_only():
+ # Makes sure already milliseconds granularity
+ start_micros = 30871000
+ start_millis = start_micros // 1000 * 1000
+ assert start_millis == 30871000
+ pb_kwargs = {}
+ pb_kwargs["start_timestamp_micros"] = start_millis
+ _timestamp_range_to_pb_helper(pb_kwargs, start=start_micros, end=None)
-class TestRowFilterUnion(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import RowFilterUnion
- return RowFilterUnion
+def test_timestamp_range_to_pb_end_only():
+ # Makes sure already milliseconds granularity
+ end_micros = 12939371000
+ end_millis = end_micros // 1000 * 1000
+ assert end_millis == 12939371000
+ pb_kwargs = {}
+ pb_kwargs["end_timestamp_micros"] = end_millis
+ _timestamp_range_to_pb_helper(pb_kwargs, start=None, end=end_micros)
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
- def test_to_pb(self):
- from google.cloud.bigtable.row_filters import RowSampleFilter
- from google.cloud.bigtable.row_filters import StripValueTransformerFilter
+def test_timestamp_range_filter_constructor():
+ from google.cloud.bigtable.row_filters import TimestampRangeFilter
- row_filter1 = StripValueTransformerFilter(True)
- row_filter1_pb = row_filter1.to_pb()
+ range_ = object()
+ row_filter = TimestampRangeFilter(range_)
+ assert row_filter.range_ is range_
- row_filter2 = RowSampleFilter(0.25)
- row_filter2_pb = row_filter2.to_pb()
- row_filter3 = self._make_one(filters=[row_filter1, row_filter2])
- filter_pb = row_filter3.to_pb()
+def test_timestamp_range_filter___eq__type_differ():
+ from google.cloud.bigtable.row_filters import TimestampRangeFilter
+
+ range_ = object()
+ row_filter1 = TimestampRangeFilter(range_)
+ row_filter2 = object()
+ assert not (row_filter1 == row_filter2)
+
+
+def test_timestamp_range_filter___eq__same_value():
+ from google.cloud.bigtable.row_filters import TimestampRangeFilter
+
+ range_ = object()
+ row_filter1 = TimestampRangeFilter(range_)
+ row_filter2 = TimestampRangeFilter(range_)
+ assert row_filter1 == row_filter2
+
+
+def test_timestamp_range_filter___ne__():
+ from google.cloud.bigtable.row_filters import TimestampRangeFilter
+
+ range_ = object()
+ other_range_ = object()
+ row_filter1 = TimestampRangeFilter(range_)
+ row_filter2 = TimestampRangeFilter(other_range_)
+ assert row_filter1 != row_filter2
+
+
+def test_timestamp_range_filter_to_pb():
+ from google.cloud.bigtable.row_filters import TimestampRangeFilter
+ from google.cloud.bigtable.row_filters import TimestampRange
+
+ range_ = TimestampRange()
+ row_filter = TimestampRangeFilter(range_)
+ pb_val = row_filter.to_pb()
+ expected_pb = _RowFilterPB(timestamp_range_filter=_TimestampRangePB())
+ assert pb_val == expected_pb
+
+
+def test_column_range_filter_constructor_defaults():
+ from google.cloud.bigtable.row_filters import ColumnRangeFilter
+
+ column_family_id = object()
+ row_filter = ColumnRangeFilter(column_family_id)
+ assert row_filter.column_family_id is column_family_id
+ assert row_filter.start_column is None
+ assert row_filter.end_column is None
+ assert row_filter.inclusive_start
+ assert row_filter.inclusive_end
+
+
+def test_column_range_filter_constructor_explicit():
+ from google.cloud.bigtable.row_filters import ColumnRangeFilter
+
+ column_family_id = object()
+ start_column = object()
+ end_column = object()
+ inclusive_start = object()
+ inclusive_end = object()
+ row_filter = ColumnRangeFilter(
+ column_family_id,
+ start_column=start_column,
+ end_column=end_column,
+ inclusive_start=inclusive_start,
+ inclusive_end=inclusive_end,
+ )
+ assert row_filter.column_family_id is column_family_id
+ assert row_filter.start_column is start_column
+ assert row_filter.end_column is end_column
+ assert row_filter.inclusive_start is inclusive_start
+ assert row_filter.inclusive_end is inclusive_end
+
+
+def test_column_range_filter_constructor_bad_start():
+ from google.cloud.bigtable.row_filters import ColumnRangeFilter
+
+ column_family_id = object()
+ with pytest.raises(ValueError):
+ ColumnRangeFilter(column_family_id, inclusive_start=True)
+
+
+def test_column_range_filter_constructor_bad_end():
+ from google.cloud.bigtable.row_filters import ColumnRangeFilter
+
+ column_family_id = object()
+ with pytest.raises(ValueError):
+ ColumnRangeFilter(column_family_id, inclusive_end=True)
+
+
+def test_column_range_filter___eq__():
+ from google.cloud.bigtable.row_filters import ColumnRangeFilter
+
+ column_family_id = object()
+ start_column = object()
+ end_column = object()
+ inclusive_start = object()
+ inclusive_end = object()
+ row_filter1 = ColumnRangeFilter(
+ column_family_id,
+ start_column=start_column,
+ end_column=end_column,
+ inclusive_start=inclusive_start,
+ inclusive_end=inclusive_end,
+ )
+ row_filter2 = ColumnRangeFilter(
+ column_family_id,
+ start_column=start_column,
+ end_column=end_column,
+ inclusive_start=inclusive_start,
+ inclusive_end=inclusive_end,
+ )
+ assert row_filter1 == row_filter2
+
+
+def test_column_range_filter___eq__type_differ():
+ from google.cloud.bigtable.row_filters import ColumnRangeFilter
+
+ column_family_id = object()
+ row_filter1 = ColumnRangeFilter(column_family_id)
+ row_filter2 = object()
+ assert not (row_filter1 == row_filter2)
+
+
+def test_column_range_filter___ne__():
+ from google.cloud.bigtable.row_filters import ColumnRangeFilter
+
+ column_family_id = object()
+ other_column_family_id = object()
+ start_column = object()
+ end_column = object()
+ inclusive_start = object()
+ inclusive_end = object()
+ row_filter1 = ColumnRangeFilter(
+ column_family_id,
+ start_column=start_column,
+ end_column=end_column,
+ inclusive_start=inclusive_start,
+ inclusive_end=inclusive_end,
+ )
+ row_filter2 = ColumnRangeFilter(
+ other_column_family_id,
+ start_column=start_column,
+ end_column=end_column,
+ inclusive_start=inclusive_start,
+ inclusive_end=inclusive_end,
+ )
+ assert row_filter1 != row_filter2
+
+
+def test_column_range_filter_to_pb():
+ from google.cloud.bigtable.row_filters import ColumnRangeFilter
+
+ column_family_id = u"column-family-id"
+ row_filter = ColumnRangeFilter(column_family_id)
+ col_range_pb = _ColumnRangePB(family_name=column_family_id)
+ expected_pb = _RowFilterPB(column_range_filter=col_range_pb)
+ assert row_filter.to_pb() == expected_pb
+
+
+def test_column_range_filter_to_pb_inclusive_start():
+ from google.cloud.bigtable.row_filters import ColumnRangeFilter
+
+ column_family_id = u"column-family-id"
+ column = b"column"
+ row_filter = ColumnRangeFilter(column_family_id, start_column=column)
+ col_range_pb = _ColumnRangePB(
+ family_name=column_family_id, start_qualifier_closed=column
+ )
+ expected_pb = _RowFilterPB(column_range_filter=col_range_pb)
+ assert row_filter.to_pb() == expected_pb
+
+
+def test_column_range_filter_to_pb_exclusive_start():
+ from google.cloud.bigtable.row_filters import ColumnRangeFilter
+
+ column_family_id = u"column-family-id"
+ column = b"column"
+ row_filter = ColumnRangeFilter(
+ column_family_id, start_column=column, inclusive_start=False
+ )
+ col_range_pb = _ColumnRangePB(
+ family_name=column_family_id, start_qualifier_open=column
+ )
+ expected_pb = _RowFilterPB(column_range_filter=col_range_pb)
+ assert row_filter.to_pb() == expected_pb
+
+
+def test_column_range_filter_to_pb_inclusive_end():
+ from google.cloud.bigtable.row_filters import ColumnRangeFilter
+
+ column_family_id = u"column-family-id"
+ column = b"column"
+ row_filter = ColumnRangeFilter(column_family_id, end_column=column)
+ col_range_pb = _ColumnRangePB(
+ family_name=column_family_id, end_qualifier_closed=column
+ )
+ expected_pb = _RowFilterPB(column_range_filter=col_range_pb)
+ assert row_filter.to_pb() == expected_pb
+
+
+def test_column_range_filter_to_pb_exclusive_end():
+ from google.cloud.bigtable.row_filters import ColumnRangeFilter
+
+ column_family_id = u"column-family-id"
+ column = b"column"
+ row_filter = ColumnRangeFilter(
+ column_family_id, end_column=column, inclusive_end=False
+ )
+ col_range_pb = _ColumnRangePB(
+ family_name=column_family_id, end_qualifier_open=column
+ )
+ expected_pb = _RowFilterPB(column_range_filter=col_range_pb)
+ assert row_filter.to_pb() == expected_pb
+
+
+def test_value_regex_filter_to_pb_w_bytes():
+ from google.cloud.bigtable.row_filters import ValueRegexFilter
+
+ value = regex = b"value-regex"
+ row_filter = ValueRegexFilter(value)
+ pb_val = row_filter.to_pb()
+ expected_pb = _RowFilterPB(value_regex_filter=regex)
+ assert pb_val == expected_pb
- expected_pb = _RowFilterPB(
- interleave=_RowFilterInterleavePB(filters=[row_filter1_pb, row_filter2_pb])
- )
- self.assertEqual(filter_pb, expected_pb)
- def test_to_pb_nested(self):
- from google.cloud.bigtable.row_filters import CellsRowLimitFilter
- from google.cloud.bigtable.row_filters import RowSampleFilter
- from google.cloud.bigtable.row_filters import StripValueTransformerFilter
+def test_value_regex_filter_to_pb_w_str():
+ from google.cloud.bigtable.row_filters import ValueRegexFilter
- row_filter1 = StripValueTransformerFilter(True)
- row_filter2 = RowSampleFilter(0.25)
+ value = u"value-regex"
+ regex = value.encode("ascii")
+ row_filter = ValueRegexFilter(value)
+ pb_val = row_filter.to_pb()
+ expected_pb = _RowFilterPB(value_regex_filter=regex)
+ assert pb_val == expected_pb
- row_filter3 = self._make_one(filters=[row_filter1, row_filter2])
- row_filter3_pb = row_filter3.to_pb()
- row_filter4 = CellsRowLimitFilter(11)
- row_filter4_pb = row_filter4.to_pb()
+def test_exact_value_filter_to_pb_w_bytes():
+ from google.cloud.bigtable.row_filters import ExactValueFilter
- row_filter5 = self._make_one(filters=[row_filter3, row_filter4])
- filter_pb = row_filter5.to_pb()
+ value = regex = b"value-regex"
+ row_filter = ExactValueFilter(value)
+ pb_val = row_filter.to_pb()
+ expected_pb = _RowFilterPB(value_regex_filter=regex)
+ assert pb_val == expected_pb
- expected_pb = _RowFilterPB(
- interleave=_RowFilterInterleavePB(filters=[row_filter3_pb, row_filter4_pb])
- )
- self.assertEqual(filter_pb, expected_pb)
+def test_exact_value_filter_to_pb_w_str():
+ from google.cloud.bigtable.row_filters import ExactValueFilter
-class TestConditionalRowFilter(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_filters import ConditionalRowFilter
+ value = u"value-regex"
+ regex = value.encode("ascii")
+ row_filter = ExactValueFilter(value)
+ pb_val = row_filter.to_pb()
+ expected_pb = _RowFilterPB(value_regex_filter=regex)
+ assert pb_val == expected_pb
- return ConditionalRowFilter
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
+def test_exact_value_filter_to_pb_w_int():
+ import struct
+ from google.cloud.bigtable.row_filters import ExactValueFilter
- def test_constructor(self):
- base_filter = object()
- true_filter = object()
- false_filter = object()
- cond_filter = self._make_one(
- base_filter, true_filter=true_filter, false_filter=false_filter
- )
- self.assertIs(cond_filter.base_filter, base_filter)
- self.assertIs(cond_filter.true_filter, true_filter)
- self.assertIs(cond_filter.false_filter, false_filter)
-
- def test___eq__(self):
- base_filter = object()
- true_filter = object()
- false_filter = object()
- cond_filter1 = self._make_one(
- base_filter, true_filter=true_filter, false_filter=false_filter
- )
- cond_filter2 = self._make_one(
- base_filter, true_filter=true_filter, false_filter=false_filter
- )
- self.assertEqual(cond_filter1, cond_filter2)
-
- def test___eq__type_differ(self):
- base_filter = object()
- true_filter = object()
- false_filter = object()
- cond_filter1 = self._make_one(
- base_filter, true_filter=true_filter, false_filter=false_filter
- )
- cond_filter2 = object()
- self.assertNotEqual(cond_filter1, cond_filter2)
+ value = 1
+ regex = struct.Struct(">q").pack(value)
+ row_filter = ExactValueFilter(value)
+ pb_val = row_filter.to_pb()
+ expected_pb = _RowFilterPB(value_regex_filter=regex)
+ assert pb_val == expected_pb
- def test_to_pb(self):
- from google.cloud.bigtable.row_filters import CellsRowOffsetFilter
- from google.cloud.bigtable.row_filters import RowSampleFilter
- from google.cloud.bigtable.row_filters import StripValueTransformerFilter
- row_filter1 = StripValueTransformerFilter(True)
- row_filter1_pb = row_filter1.to_pb()
+def test_value_range_filter_constructor_defaults():
+ from google.cloud.bigtable.row_filters import ValueRangeFilter
- row_filter2 = RowSampleFilter(0.25)
- row_filter2_pb = row_filter2.to_pb()
+ row_filter = ValueRangeFilter()
- row_filter3 = CellsRowOffsetFilter(11)
- row_filter3_pb = row_filter3.to_pb()
+ assert row_filter.start_value is None
+ assert row_filter.end_value is None
+ assert row_filter.inclusive_start
+ assert row_filter.inclusive_end
- row_filter4 = self._make_one(
- row_filter1, true_filter=row_filter2, false_filter=row_filter3
- )
- filter_pb = row_filter4.to_pb()
-
- expected_pb = _RowFilterPB(
- condition=_RowFilterConditionPB(
- predicate_filter=row_filter1_pb,
- true_filter=row_filter2_pb,
- false_filter=row_filter3_pb,
- )
+
+def test_value_range_filter_constructor_explicit():
+ from google.cloud.bigtable.row_filters import ValueRangeFilter
+
+ start_value = object()
+ end_value = object()
+ inclusive_start = object()
+ inclusive_end = object()
+
+ row_filter = ValueRangeFilter(
+ start_value=start_value,
+ end_value=end_value,
+ inclusive_start=inclusive_start,
+ inclusive_end=inclusive_end,
+ )
+
+ assert row_filter.start_value is start_value
+ assert row_filter.end_value is end_value
+ assert row_filter.inclusive_start is inclusive_start
+ assert row_filter.inclusive_end is inclusive_end
+
+
+def test_value_range_filter_constructor_w_int_values():
+ from google.cloud.bigtable.row_filters import ValueRangeFilter
+ import struct
+
+ start_value = 1
+ end_value = 10
+
+ row_filter = ValueRangeFilter(start_value=start_value, end_value=end_value)
+
+ expected_start_value = struct.Struct(">q").pack(start_value)
+ expected_end_value = struct.Struct(">q").pack(end_value)
+
+ assert row_filter.start_value == expected_start_value
+ assert row_filter.end_value == expected_end_value
+ assert row_filter.inclusive_start
+ assert row_filter.inclusive_end
+
+
+def test_value_range_filter_constructor_bad_start():
+ from google.cloud.bigtable.row_filters import ValueRangeFilter
+
+ with pytest.raises(ValueError):
+ ValueRangeFilter(inclusive_start=True)
+
+
+def test_value_range_filter_constructor_bad_end():
+ from google.cloud.bigtable.row_filters import ValueRangeFilter
+
+ with pytest.raises(ValueError):
+ ValueRangeFilter(inclusive_end=True)
+
+
+def test_value_range_filter___eq__():
+ from google.cloud.bigtable.row_filters import ValueRangeFilter
+
+ start_value = object()
+ end_value = object()
+ inclusive_start = object()
+ inclusive_end = object()
+ row_filter1 = ValueRangeFilter(
+ start_value=start_value,
+ end_value=end_value,
+ inclusive_start=inclusive_start,
+ inclusive_end=inclusive_end,
+ )
+ row_filter2 = ValueRangeFilter(
+ start_value=start_value,
+ end_value=end_value,
+ inclusive_start=inclusive_start,
+ inclusive_end=inclusive_end,
+ )
+ assert row_filter1 == row_filter2
+
+
+def test_value_range_filter___eq__type_differ():
+ from google.cloud.bigtable.row_filters import ValueRangeFilter
+
+ row_filter1 = ValueRangeFilter()
+ row_filter2 = object()
+ assert not (row_filter1 == row_filter2)
+
+
+def test_value_range_filter___ne__():
+ from google.cloud.bigtable.row_filters import ValueRangeFilter
+
+ start_value = object()
+ other_start_value = object()
+ end_value = object()
+ inclusive_start = object()
+ inclusive_end = object()
+ row_filter1 = ValueRangeFilter(
+ start_value=start_value,
+ end_value=end_value,
+ inclusive_start=inclusive_start,
+ inclusive_end=inclusive_end,
+ )
+ row_filter2 = ValueRangeFilter(
+ start_value=other_start_value,
+ end_value=end_value,
+ inclusive_start=inclusive_start,
+ inclusive_end=inclusive_end,
+ )
+ assert row_filter1 != row_filter2
+
+
+def test_value_range_filter_to_pb():
+ from google.cloud.bigtable.row_filters import ValueRangeFilter
+
+ row_filter = ValueRangeFilter()
+ expected_pb = _RowFilterPB(value_range_filter=_ValueRangePB())
+ assert row_filter.to_pb() == expected_pb
+
+
+def test_value_range_filter_to_pb_inclusive_start():
+ from google.cloud.bigtable.row_filters import ValueRangeFilter
+
+ value = b"some-value"
+ row_filter = ValueRangeFilter(start_value=value)
+ val_range_pb = _ValueRangePB(start_value_closed=value)
+ expected_pb = _RowFilterPB(value_range_filter=val_range_pb)
+ assert row_filter.to_pb() == expected_pb
+
+
+def test_value_range_filter_to_pb_exclusive_start():
+ from google.cloud.bigtable.row_filters import ValueRangeFilter
+
+ value = b"some-value"
+ row_filter = ValueRangeFilter(start_value=value, inclusive_start=False)
+ val_range_pb = _ValueRangePB(start_value_open=value)
+ expected_pb = _RowFilterPB(value_range_filter=val_range_pb)
+ assert row_filter.to_pb() == expected_pb
+
+
+def test_value_range_filter_to_pb_inclusive_end():
+ from google.cloud.bigtable.row_filters import ValueRangeFilter
+
+ value = b"some-value"
+ row_filter = ValueRangeFilter(end_value=value)
+ val_range_pb = _ValueRangePB(end_value_closed=value)
+ expected_pb = _RowFilterPB(value_range_filter=val_range_pb)
+ assert row_filter.to_pb() == expected_pb
+
+
+def test_value_range_filter_to_pb_exclusive_end():
+ from google.cloud.bigtable.row_filters import ValueRangeFilter
+
+ value = b"some-value"
+ row_filter = ValueRangeFilter(end_value=value, inclusive_end=False)
+ val_range_pb = _ValueRangePB(end_value_open=value)
+ expected_pb = _RowFilterPB(value_range_filter=val_range_pb)
+ assert row_filter.to_pb() == expected_pb
+
+
+def test_cell_count_constructor():
+ from google.cloud.bigtable.row_filters import _CellCountFilter
+
+ num_cells = object()
+ row_filter = _CellCountFilter(num_cells)
+ assert row_filter.num_cells is num_cells
+
+
+def test_cell_count___eq__type_differ():
+ from google.cloud.bigtable.row_filters import _CellCountFilter
+
+ num_cells = object()
+ row_filter1 = _CellCountFilter(num_cells)
+ row_filter2 = object()
+ assert not (row_filter1 == row_filter2)
+
+
+def test_cell_count___eq__same_value():
+ from google.cloud.bigtable.row_filters import _CellCountFilter
+
+ num_cells = object()
+ row_filter1 = _CellCountFilter(num_cells)
+ row_filter2 = _CellCountFilter(num_cells)
+ assert row_filter1 == row_filter2
+
+
+def test_cell_count___ne__same_value():
+ from google.cloud.bigtable.row_filters import _CellCountFilter
+
+ num_cells = object()
+ row_filter1 = _CellCountFilter(num_cells)
+ row_filter2 = _CellCountFilter(num_cells)
+ assert not (row_filter1 != row_filter2)
+
+
+def test_cells_row_offset_filter_to_pb():
+ from google.cloud.bigtable.row_filters import CellsRowOffsetFilter
+
+ num_cells = 76
+ row_filter = CellsRowOffsetFilter(num_cells)
+ pb_val = row_filter.to_pb()
+ expected_pb = _RowFilterPB(cells_per_row_offset_filter=num_cells)
+ assert pb_val == expected_pb
+
+
+def test_cells_row_limit_filter_to_pb():
+ from google.cloud.bigtable.row_filters import CellsRowLimitFilter
+
+ num_cells = 189
+ row_filter = CellsRowLimitFilter(num_cells)
+ pb_val = row_filter.to_pb()
+ expected_pb = _RowFilterPB(cells_per_row_limit_filter=num_cells)
+ assert pb_val == expected_pb
+
+
+def test_cells_column_limit_filter_to_pb():
+ from google.cloud.bigtable.row_filters import CellsColumnLimitFilter
+
+ num_cells = 10
+ row_filter = CellsColumnLimitFilter(num_cells)
+ pb_val = row_filter.to_pb()
+ expected_pb = _RowFilterPB(cells_per_column_limit_filter=num_cells)
+ assert pb_val == expected_pb
+
+
+def test_strip_value_transformer_filter_to_pb():
+ from google.cloud.bigtable.row_filters import StripValueTransformerFilter
+
+ flag = True
+ row_filter = StripValueTransformerFilter(flag)
+ pb_val = row_filter.to_pb()
+ expected_pb = _RowFilterPB(strip_value_transformer=flag)
+ assert pb_val == expected_pb
+
+
+def test_apply_label_filter_constructor():
+ from google.cloud.bigtable.row_filters import ApplyLabelFilter
+
+ label = object()
+ row_filter = ApplyLabelFilter(label)
+ assert row_filter.label is label
+
+
+def test_apply_label_filter___eq__type_differ():
+ from google.cloud.bigtable.row_filters import ApplyLabelFilter
+
+ label = object()
+ row_filter1 = ApplyLabelFilter(label)
+ row_filter2 = object()
+ assert not (row_filter1 == row_filter2)
+
+
+def test_apply_label_filter___eq__same_value():
+ from google.cloud.bigtable.row_filters import ApplyLabelFilter
+
+ label = object()
+ row_filter1 = ApplyLabelFilter(label)
+ row_filter2 = ApplyLabelFilter(label)
+ assert row_filter1 == row_filter2
+
+
+def test_apply_label_filter___ne__():
+ from google.cloud.bigtable.row_filters import ApplyLabelFilter
+
+ label = object()
+ other_label = object()
+ row_filter1 = ApplyLabelFilter(label)
+ row_filter2 = ApplyLabelFilter(other_label)
+ assert row_filter1 != row_filter2
+
+
+def test_apply_label_filter_to_pb():
+ from google.cloud.bigtable.row_filters import ApplyLabelFilter
+
+ label = u"label"
+ row_filter = ApplyLabelFilter(label)
+ pb_val = row_filter.to_pb()
+ expected_pb = _RowFilterPB(apply_label_transformer=label)
+ assert pb_val == expected_pb
+
+
+def test_filter_combination_constructor_defaults():
+ from google.cloud.bigtable.row_filters import _FilterCombination
+
+ row_filter = _FilterCombination()
+ assert row_filter.filters == []
+
+
+def test_filter_combination_constructor_explicit():
+ from google.cloud.bigtable.row_filters import _FilterCombination
+
+ filters = object()
+ row_filter = _FilterCombination(filters=filters)
+ assert row_filter.filters is filters
+
+
+def test_filter_combination___eq__():
+ from google.cloud.bigtable.row_filters import _FilterCombination
+
+ filters = object()
+ row_filter1 = _FilterCombination(filters=filters)
+ row_filter2 = _FilterCombination(filters=filters)
+ assert row_filter1 == row_filter2
+
+
+def test_filter_combination___eq__type_differ():
+ from google.cloud.bigtable.row_filters import _FilterCombination
+
+ filters = object()
+ row_filter1 = _FilterCombination(filters=filters)
+ row_filter2 = object()
+ assert not (row_filter1 == row_filter2)
+
+
+def test_filter_combination___ne__():
+ from google.cloud.bigtable.row_filters import _FilterCombination
+
+ filters = object()
+ other_filters = object()
+ row_filter1 = _FilterCombination(filters=filters)
+ row_filter2 = _FilterCombination(filters=other_filters)
+ assert row_filter1 != row_filter2
+
+
+def test_row_filter_chain_to_pb():
+ from google.cloud.bigtable.row_filters import RowFilterChain
+ from google.cloud.bigtable.row_filters import RowSampleFilter
+ from google.cloud.bigtable.row_filters import StripValueTransformerFilter
+
+ row_filter1 = StripValueTransformerFilter(True)
+ row_filter1_pb = row_filter1.to_pb()
+
+ row_filter2 = RowSampleFilter(0.25)
+ row_filter2_pb = row_filter2.to_pb()
+
+ row_filter3 = RowFilterChain(filters=[row_filter1, row_filter2])
+ filter_pb = row_filter3.to_pb()
+
+ expected_pb = _RowFilterPB(
+ chain=_RowFilterChainPB(filters=[row_filter1_pb, row_filter2_pb])
+ )
+ assert filter_pb == expected_pb
+
+
+def test_row_filter_chain_to_pb_nested():
+ from google.cloud.bigtable.row_filters import CellsRowLimitFilter
+ from google.cloud.bigtable.row_filters import RowFilterChain
+ from google.cloud.bigtable.row_filters import RowSampleFilter
+ from google.cloud.bigtable.row_filters import StripValueTransformerFilter
+
+ row_filter1 = StripValueTransformerFilter(True)
+ row_filter2 = RowSampleFilter(0.25)
+
+ row_filter3 = RowFilterChain(filters=[row_filter1, row_filter2])
+ row_filter3_pb = row_filter3.to_pb()
+
+ row_filter4 = CellsRowLimitFilter(11)
+ row_filter4_pb = row_filter4.to_pb()
+
+ row_filter5 = RowFilterChain(filters=[row_filter3, row_filter4])
+ filter_pb = row_filter5.to_pb()
+
+ expected_pb = _RowFilterPB(
+ chain=_RowFilterChainPB(filters=[row_filter3_pb, row_filter4_pb])
+ )
+ assert filter_pb == expected_pb
+
+
+def test_row_filter_union_to_pb():
+ from google.cloud.bigtable.row_filters import RowFilterUnion
+ from google.cloud.bigtable.row_filters import RowSampleFilter
+ from google.cloud.bigtable.row_filters import StripValueTransformerFilter
+
+ row_filter1 = StripValueTransformerFilter(True)
+ row_filter1_pb = row_filter1.to_pb()
+
+ row_filter2 = RowSampleFilter(0.25)
+ row_filter2_pb = row_filter2.to_pb()
+
+ row_filter3 = RowFilterUnion(filters=[row_filter1, row_filter2])
+ filter_pb = row_filter3.to_pb()
+
+ expected_pb = _RowFilterPB(
+ interleave=_RowFilterInterleavePB(filters=[row_filter1_pb, row_filter2_pb])
+ )
+ assert filter_pb == expected_pb
+
+
+def test_row_filter_union_to_pb_nested():
+ from google.cloud.bigtable.row_filters import CellsRowLimitFilter
+ from google.cloud.bigtable.row_filters import RowFilterUnion
+ from google.cloud.bigtable.row_filters import RowSampleFilter
+ from google.cloud.bigtable.row_filters import StripValueTransformerFilter
+
+ row_filter1 = StripValueTransformerFilter(True)
+ row_filter2 = RowSampleFilter(0.25)
+
+ row_filter3 = RowFilterUnion(filters=[row_filter1, row_filter2])
+ row_filter3_pb = row_filter3.to_pb()
+
+ row_filter4 = CellsRowLimitFilter(11)
+ row_filter4_pb = row_filter4.to_pb()
+
+ row_filter5 = RowFilterUnion(filters=[row_filter3, row_filter4])
+ filter_pb = row_filter5.to_pb()
+
+ expected_pb = _RowFilterPB(
+ interleave=_RowFilterInterleavePB(filters=[row_filter3_pb, row_filter4_pb])
+ )
+ assert filter_pb == expected_pb
+
+
+def test_conditional_row_filter_constructor():
+ from google.cloud.bigtable.row_filters import ConditionalRowFilter
+
+ base_filter = object()
+ true_filter = object()
+ false_filter = object()
+ cond_filter = ConditionalRowFilter(
+ base_filter, true_filter=true_filter, false_filter=false_filter
+ )
+ assert cond_filter.base_filter is base_filter
+ assert cond_filter.true_filter is true_filter
+ assert cond_filter.false_filter is false_filter
+
+
+def test_conditional_row_filter___eq__():
+ from google.cloud.bigtable.row_filters import ConditionalRowFilter
+
+ base_filter = object()
+ true_filter = object()
+ false_filter = object()
+ cond_filter1 = ConditionalRowFilter(
+ base_filter, true_filter=true_filter, false_filter=false_filter
+ )
+ cond_filter2 = ConditionalRowFilter(
+ base_filter, true_filter=true_filter, false_filter=false_filter
+ )
+ assert cond_filter1 == cond_filter2
+
+
+def test_conditional_row_filter___eq__type_differ():
+ from google.cloud.bigtable.row_filters import ConditionalRowFilter
+
+ base_filter = object()
+ true_filter = object()
+ false_filter = object()
+ cond_filter1 = ConditionalRowFilter(
+ base_filter, true_filter=true_filter, false_filter=false_filter
+ )
+ cond_filter2 = object()
+ assert not (cond_filter1 == cond_filter2)
+
+
+def test_conditional_row_filter___ne__():
+ from google.cloud.bigtable.row_filters import ConditionalRowFilter
+
+ base_filter = object()
+ other_base_filter = object()
+ true_filter = object()
+ false_filter = object()
+ cond_filter1 = ConditionalRowFilter(
+ base_filter, true_filter=true_filter, false_filter=false_filter
+ )
+ cond_filter2 = ConditionalRowFilter(
+ other_base_filter, true_filter=true_filter, false_filter=false_filter
+ )
+ assert cond_filter1 != cond_filter2
+
+
+def test_conditional_row_filter_to_pb():
+ from google.cloud.bigtable.row_filters import ConditionalRowFilter
+ from google.cloud.bigtable.row_filters import CellsRowOffsetFilter
+ from google.cloud.bigtable.row_filters import RowSampleFilter
+ from google.cloud.bigtable.row_filters import StripValueTransformerFilter
+
+ row_filter1 = StripValueTransformerFilter(True)
+ row_filter1_pb = row_filter1.to_pb()
+
+ row_filter2 = RowSampleFilter(0.25)
+ row_filter2_pb = row_filter2.to_pb()
+
+ row_filter3 = CellsRowOffsetFilter(11)
+ row_filter3_pb = row_filter3.to_pb()
+
+ row_filter4 = ConditionalRowFilter(
+ row_filter1, true_filter=row_filter2, false_filter=row_filter3
+ )
+ filter_pb = row_filter4.to_pb()
+
+ expected_pb = _RowFilterPB(
+ condition=_RowFilterConditionPB(
+ predicate_filter=row_filter1_pb,
+ true_filter=row_filter2_pb,
+ false_filter=row_filter3_pb,
)
- self.assertEqual(filter_pb, expected_pb)
+ )
+ assert filter_pb == expected_pb
- def test_to_pb_true_only(self):
- from google.cloud.bigtable.row_filters import RowSampleFilter
- from google.cloud.bigtable.row_filters import StripValueTransformerFilter
- row_filter1 = StripValueTransformerFilter(True)
- row_filter1_pb = row_filter1.to_pb()
+def test_conditional_row_filter_to_pb_true_only():
+ from google.cloud.bigtable.row_filters import ConditionalRowFilter
+ from google.cloud.bigtable.row_filters import RowSampleFilter
+ from google.cloud.bigtable.row_filters import StripValueTransformerFilter
- row_filter2 = RowSampleFilter(0.25)
- row_filter2_pb = row_filter2.to_pb()
+ row_filter1 = StripValueTransformerFilter(True)
+ row_filter1_pb = row_filter1.to_pb()
- row_filter3 = self._make_one(row_filter1, true_filter=row_filter2)
- filter_pb = row_filter3.to_pb()
+ row_filter2 = RowSampleFilter(0.25)
+ row_filter2_pb = row_filter2.to_pb()
- expected_pb = _RowFilterPB(
- condition=_RowFilterConditionPB(
- predicate_filter=row_filter1_pb, true_filter=row_filter2_pb
- )
+ row_filter3 = ConditionalRowFilter(row_filter1, true_filter=row_filter2)
+ filter_pb = row_filter3.to_pb()
+
+ expected_pb = _RowFilterPB(
+ condition=_RowFilterConditionPB(
+ predicate_filter=row_filter1_pb, true_filter=row_filter2_pb
)
- self.assertEqual(filter_pb, expected_pb)
+ )
+ assert filter_pb == expected_pb
+
- def test_to_pb_false_only(self):
- from google.cloud.bigtable.row_filters import RowSampleFilter
- from google.cloud.bigtable.row_filters import StripValueTransformerFilter
+def test_conditional_row_filter_to_pb_false_only():
+ from google.cloud.bigtable.row_filters import ConditionalRowFilter
+ from google.cloud.bigtable.row_filters import RowSampleFilter
+ from google.cloud.bigtable.row_filters import StripValueTransformerFilter
- row_filter1 = StripValueTransformerFilter(True)
- row_filter1_pb = row_filter1.to_pb()
+ row_filter1 = StripValueTransformerFilter(True)
+ row_filter1_pb = row_filter1.to_pb()
- row_filter2 = RowSampleFilter(0.25)
- row_filter2_pb = row_filter2.to_pb()
+ row_filter2 = RowSampleFilter(0.25)
+ row_filter2_pb = row_filter2.to_pb()
- row_filter3 = self._make_one(row_filter1, false_filter=row_filter2)
- filter_pb = row_filter3.to_pb()
+ row_filter3 = ConditionalRowFilter(row_filter1, false_filter=row_filter2)
+ filter_pb = row_filter3.to_pb()
- expected_pb = _RowFilterPB(
- condition=_RowFilterConditionPB(
- predicate_filter=row_filter1_pb, false_filter=row_filter2_pb
- )
+ expected_pb = _RowFilterPB(
+ condition=_RowFilterConditionPB(
+ predicate_filter=row_filter1_pb, false_filter=row_filter2_pb
)
- self.assertEqual(filter_pb, expected_pb)
+ )
+ assert filter_pb == expected_pb
def _ColumnRangePB(*args, **kw):
diff --git a/tests/unit/test_row_set.py b/tests/unit/test_row_set.py
index c1fa4ca87..1a33be720 100644
--- a/tests/unit/test_row_set.py
+++ b/tests/unit/test_row_set.py
@@ -13,260 +13,308 @@
# limitations under the License.
-import unittest
-from google.cloud.bigtable.row_set import RowRange
-from google.cloud._helpers import _to_bytes
+def test_row_set_constructor():
+ from google.cloud.bigtable.row_set import RowSet
+ row_set = RowSet()
+ assert [] == row_set.row_keys
+ assert [] == row_set.row_ranges
-class TestRowSet(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_set import RowSet
- return RowSet
+def test_row_set__eq__():
+ from google.cloud.bigtable.row_set import RowRange
+ from google.cloud.bigtable.row_set import RowSet
- def _make_one(self):
- return self._get_target_class()()
+ row_key1 = b"row_key1"
+ row_key2 = b"row_key1"
+ row_range1 = RowRange(b"row_key4", b"row_key9")
+ row_range2 = RowRange(b"row_key4", b"row_key9")
- def test_constructor(self):
- row_set = self._make_one()
- self.assertEqual([], row_set.row_keys)
- self.assertEqual([], row_set.row_ranges)
+ row_set1 = RowSet()
+ row_set2 = RowSet()
- def test__eq__(self):
- row_key1 = b"row_key1"
- row_key2 = b"row_key1"
- row_range1 = RowRange(b"row_key4", b"row_key9")
- row_range2 = RowRange(b"row_key4", b"row_key9")
+ row_set1.add_row_key(row_key1)
+ row_set2.add_row_key(row_key2)
+ row_set1.add_row_range(row_range1)
+ row_set2.add_row_range(row_range2)
- row_set1 = self._make_one()
- row_set2 = self._make_one()
+ assert row_set1 == row_set2
- row_set1.add_row_key(row_key1)
- row_set2.add_row_key(row_key2)
- row_set1.add_row_range(row_range1)
- row_set2.add_row_range(row_range2)
- self.assertEqual(row_set1, row_set2)
+def test_row_set__eq__type_differ():
+ from google.cloud.bigtable.row_set import RowSet
- def test__eq__type_differ(self):
- row_set1 = self._make_one()
- row_set2 = object()
- self.assertNotEqual(row_set1, row_set2)
+ row_set1 = RowSet()
+ row_set2 = object()
+ assert not (row_set1 == row_set2)
- def test__eq__len_row_keys_differ(self):
- row_key1 = b"row_key1"
- row_key2 = b"row_key1"
- row_set1 = self._make_one()
- row_set2 = self._make_one()
-
- row_set1.add_row_key(row_key1)
- row_set1.add_row_key(row_key2)
- row_set2.add_row_key(row_key2)
-
- self.assertNotEqual(row_set1, row_set2)
-
- def test__eq__len_row_ranges_differ(self):
- row_range1 = RowRange(b"row_key4", b"row_key9")
- row_range2 = RowRange(b"row_key4", b"row_key9")
-
- row_set1 = self._make_one()
- row_set2 = self._make_one()
-
- row_set1.add_row_range(row_range1)
- row_set1.add_row_range(row_range2)
- row_set2.add_row_range(row_range2)
-
- self.assertNotEqual(row_set1, row_set2)
-
- def test__eq__row_keys_differ(self):
- row_set1 = self._make_one()
- row_set2 = self._make_one()
-
- row_set1.add_row_key(b"row_key1")
- row_set1.add_row_key(b"row_key2")
- row_set1.add_row_key(b"row_key3")
- row_set2.add_row_key(b"row_key1")
- row_set2.add_row_key(b"row_key2")
- row_set2.add_row_key(b"row_key4")
+def test_row_set__eq__len_row_keys_differ():
+ from google.cloud.bigtable.row_set import RowSet
- self.assertNotEqual(row_set1, row_set2)
-
- def test__eq__row_ranges_differ(self):
- row_range1 = RowRange(b"row_key4", b"row_key9")
- row_range2 = RowRange(b"row_key14", b"row_key19")
- row_range3 = RowRange(b"row_key24", b"row_key29")
+ row_key1 = b"row_key1"
+ row_key2 = b"row_key1"
- row_set1 = self._make_one()
- row_set2 = self._make_one()
+ row_set1 = RowSet()
+ row_set2 = RowSet()
- row_set1.add_row_range(row_range1)
- row_set1.add_row_range(row_range2)
- row_set1.add_row_range(row_range3)
- row_set2.add_row_range(row_range1)
- row_set2.add_row_range(row_range2)
-
- self.assertNotEqual(row_set1, row_set2)
-
- def test__ne__(self):
- row_key1 = b"row_key1"
- row_key2 = b"row_key1"
- row_range1 = RowRange(b"row_key4", b"row_key9")
- row_range2 = RowRange(b"row_key5", b"row_key9")
-
- row_set1 = self._make_one()
- row_set2 = self._make_one()
-
- row_set1.add_row_key(row_key1)
- row_set2.add_row_key(row_key2)
- row_set1.add_row_range(row_range1)
- row_set2.add_row_range(row_range2)
-
- self.assertNotEqual(row_set1, row_set2)
-
- def test__ne__same_value(self):
- row_key1 = b"row_key1"
- row_key2 = b"row_key1"
- row_range1 = RowRange(b"row_key4", b"row_key9")
- row_range2 = RowRange(b"row_key4", b"row_key9")
-
- row_set1 = self._make_one()
- row_set2 = self._make_one()
-
- row_set1.add_row_key(row_key1)
- row_set2.add_row_key(row_key2)
- row_set1.add_row_range(row_range1)
- row_set2.add_row_range(row_range2)
-
- comparison_val = row_set1 != row_set2
- self.assertFalse(comparison_val)
-
- def test_add_row_key(self):
- row_set = self._make_one()
- row_set.add_row_key("row_key1")
- row_set.add_row_key("row_key2")
- self.assertEqual(["row_key1", "row_key2"], row_set.row_keys)
-
- def test_add_row_range(self):
- row_set = self._make_one()
- row_range1 = RowRange(b"row_key1", b"row_key9")
- row_range2 = RowRange(b"row_key21", b"row_key29")
- row_set.add_row_range(row_range1)
- row_set.add_row_range(row_range2)
- expected = [row_range1, row_range2]
- self.assertEqual(expected, row_set.row_ranges)
-
- def test_add_row_range_from_keys(self):
- row_set = self._make_one()
- row_set.add_row_range_from_keys(
- start_key=b"row_key1",
- end_key=b"row_key9",
- start_inclusive=False,
- end_inclusive=True,
- )
- self.assertEqual(row_set.row_ranges[0].end_key, b"row_key9")
-
- def test_add_row_range_with_prefix(self):
- row_set = self._make_one()
- row_set.add_row_range_with_prefix("row")
- self.assertEqual(row_set.row_ranges[0].end_key, b"rox")
-
- def test__update_message_request(self):
- row_set = self._make_one()
- table_name = "table_name"
- row_set.add_row_key("row_key1")
- row_range1 = RowRange(b"row_key21", b"row_key29")
- row_set.add_row_range(row_range1)
-
- request = _ReadRowsRequestPB(table_name=table_name)
- row_set._update_message_request(request)
-
- expected_request = _ReadRowsRequestPB(table_name=table_name)
- expected_request.rows.row_keys.append(_to_bytes("row_key1"))
-
- expected_request.rows.row_ranges.append(row_range1.get_range_kwargs())
-
- self.assertEqual(request, expected_request)
-
-
-class TestRowRange(unittest.TestCase):
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.row_set import RowRange
-
- return RowRange
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- def test_constructor(self):
- start_key = "row_key1"
- end_key = "row_key9"
- row_range = self._make_one(start_key, end_key)
- self.assertEqual(start_key, row_range.start_key)
- self.assertEqual(end_key, row_range.end_key)
- self.assertTrue(row_range.start_inclusive)
- self.assertFalse(row_range.end_inclusive)
-
- def test___hash__set_equality(self):
- row_range1 = self._make_one("row_key1", "row_key9")
- row_range2 = self._make_one("row_key1", "row_key9")
- set_one = {row_range1, row_range2}
- set_two = {row_range1, row_range2}
- self.assertEqual(set_one, set_two)
-
- def test___hash__not_equals(self):
- row_range1 = self._make_one("row_key1", "row_key9")
- row_range2 = self._make_one("row_key1", "row_key19")
- set_one = {row_range1}
- set_two = {row_range2}
- self.assertNotEqual(set_one, set_two)
-
- def test__eq__(self):
- start_key = b"row_key1"
- end_key = b"row_key9"
- row_range1 = self._make_one(start_key, end_key, True, False)
- row_range2 = self._make_one(start_key, end_key, True, False)
- self.assertEqual(row_range1, row_range2)
-
- def test___eq__type_differ(self):
- start_key = b"row_key1"
- end_key = b"row_key9"
- row_range1 = self._make_one(start_key, end_key, True, False)
- row_range2 = object()
- self.assertNotEqual(row_range1, row_range2)
-
- def test__ne__(self):
- start_key = b"row_key1"
- end_key = b"row_key9"
- row_range1 = self._make_one(start_key, end_key, True, False)
- row_range2 = self._make_one(start_key, end_key, False, True)
- self.assertNotEqual(row_range1, row_range2)
-
- def test__ne__same_value(self):
- start_key = b"row_key1"
- end_key = b"row_key9"
- row_range1 = self._make_one(start_key, end_key, True, False)
- row_range2 = self._make_one(start_key, end_key, True, False)
- comparison_val = row_range1 != row_range2
- self.assertFalse(comparison_val)
-
- def test_get_range_kwargs_closed_open(self):
- start_key = b"row_key1"
- end_key = b"row_key9"
- expected_result = {"start_key_closed": start_key, "end_key_open": end_key}
- row_range = self._make_one(start_key, end_key)
- actual_result = row_range.get_range_kwargs()
- self.assertEqual(expected_result, actual_result)
-
- def test_get_range_kwargs_open_closed(self):
- start_key = b"row_key1"
- end_key = b"row_key9"
- expected_result = {"start_key_open": start_key, "end_key_closed": end_key}
- row_range = self._make_one(start_key, end_key, False, True)
- actual_result = row_range.get_range_kwargs()
- self.assertEqual(expected_result, actual_result)
+ row_set1.add_row_key(row_key1)
+ row_set1.add_row_key(row_key2)
+ row_set2.add_row_key(row_key2)
+
+ assert not (row_set1 == row_set2)
+
+
+def test_row_set__eq__len_row_ranges_differ():
+ from google.cloud.bigtable.row_set import RowRange
+ from google.cloud.bigtable.row_set import RowSet
+
+ row_range1 = RowRange(b"row_key4", b"row_key9")
+ row_range2 = RowRange(b"row_key4", b"row_key9")
+
+ row_set1 = RowSet()
+ row_set2 = RowSet()
+
+ row_set1.add_row_range(row_range1)
+ row_set1.add_row_range(row_range2)
+ row_set2.add_row_range(row_range2)
+
+ assert not (row_set1 == row_set2)
+
+
+def test_row_set__eq__row_keys_differ():
+ from google.cloud.bigtable.row_set import RowSet
+
+ row_set1 = RowSet()
+ row_set2 = RowSet()
+
+ row_set1.add_row_key(b"row_key1")
+ row_set1.add_row_key(b"row_key2")
+ row_set1.add_row_key(b"row_key3")
+ row_set2.add_row_key(b"row_key1")
+ row_set2.add_row_key(b"row_key2")
+ row_set2.add_row_key(b"row_key4")
+
+ assert not (row_set1 == row_set2)
+
+
+def test_row_set__eq__row_ranges_differ():
+ from google.cloud.bigtable.row_set import RowRange
+ from google.cloud.bigtable.row_set import RowSet
+
+ row_range1 = RowRange(b"row_key4", b"row_key9")
+ row_range2 = RowRange(b"row_key14", b"row_key19")
+ row_range3 = RowRange(b"row_key24", b"row_key29")
+
+ row_set1 = RowSet()
+ row_set2 = RowSet()
+
+ row_set1.add_row_range(row_range1)
+ row_set1.add_row_range(row_range2)
+ row_set1.add_row_range(row_range3)
+ row_set2.add_row_range(row_range1)
+ row_set2.add_row_range(row_range2)
+
+ assert not (row_set1 == row_set2)
+
+
+def test_row_set__ne__():
+ from google.cloud.bigtable.row_set import RowRange
+ from google.cloud.bigtable.row_set import RowSet
+
+ row_key1 = b"row_key1"
+ row_key2 = b"row_key1"
+ row_range1 = RowRange(b"row_key4", b"row_key9")
+ row_range2 = RowRange(b"row_key5", b"row_key9")
+
+ row_set1 = RowSet()
+ row_set2 = RowSet()
+
+ row_set1.add_row_key(row_key1)
+ row_set2.add_row_key(row_key2)
+ row_set1.add_row_range(row_range1)
+ row_set2.add_row_range(row_range2)
+
+ assert row_set1 != row_set2
+
+
+def test_row_set__ne__same_value():
+ from google.cloud.bigtable.row_set import RowRange
+ from google.cloud.bigtable.row_set import RowSet
+
+ row_key1 = b"row_key1"
+ row_key2 = b"row_key1"
+ row_range1 = RowRange(b"row_key4", b"row_key9")
+ row_range2 = RowRange(b"row_key4", b"row_key9")
+
+ row_set1 = RowSet()
+ row_set2 = RowSet()
+
+ row_set1.add_row_key(row_key1)
+ row_set2.add_row_key(row_key2)
+ row_set1.add_row_range(row_range1)
+ row_set2.add_row_range(row_range2)
+
+ assert not (row_set1 != row_set2)
+
+
+def test_row_set_add_row_key():
+ from google.cloud.bigtable.row_set import RowSet
+
+ row_set = RowSet()
+ row_set.add_row_key("row_key1")
+ row_set.add_row_key("row_key2")
+ assert ["row_key1" == "row_key2"], row_set.row_keys
+
+
+def test_row_set_add_row_range():
+ from google.cloud.bigtable.row_set import RowRange
+ from google.cloud.bigtable.row_set import RowSet
+
+ row_set = RowSet()
+ row_range1 = RowRange(b"row_key1", b"row_key9")
+ row_range2 = RowRange(b"row_key21", b"row_key29")
+ row_set.add_row_range(row_range1)
+ row_set.add_row_range(row_range2)
+ expected = [row_range1, row_range2]
+ assert expected == row_set.row_ranges
+
+
+def test_row_set_add_row_range_from_keys():
+ from google.cloud.bigtable.row_set import RowSet
+
+ row_set = RowSet()
+ row_set.add_row_range_from_keys(
+ start_key=b"row_key1",
+ end_key=b"row_key9",
+ start_inclusive=False,
+ end_inclusive=True,
+ )
+ assert row_set.row_ranges[0].end_key == b"row_key9"
+
+
+def test_row_set_add_row_range_with_prefix():
+ from google.cloud.bigtable.row_set import RowSet
+
+ row_set = RowSet()
+ row_set.add_row_range_with_prefix("row")
+ assert row_set.row_ranges[0].end_key == b"rox"
+
+
+def test_row_set__update_message_request():
+ from google.cloud._helpers import _to_bytes
+ from google.cloud.bigtable.row_set import RowRange
+ from google.cloud.bigtable.row_set import RowSet
+
+ row_set = RowSet()
+ table_name = "table_name"
+ row_set.add_row_key("row_key1")
+ row_range1 = RowRange(b"row_key21", b"row_key29")
+ row_set.add_row_range(row_range1)
+
+ request = _ReadRowsRequestPB(table_name=table_name)
+ row_set._update_message_request(request)
+
+ expected_request = _ReadRowsRequestPB(table_name=table_name)
+ expected_request.rows.row_keys.append(_to_bytes("row_key1"))
+
+ expected_request.rows.row_ranges.append(row_range1.get_range_kwargs())
+
+ assert request == expected_request
+
+
+def test_row_range_constructor():
+ from google.cloud.bigtable.row_set import RowRange
+
+ start_key = "row_key1"
+ end_key = "row_key9"
+ row_range = RowRange(start_key, end_key)
+ assert start_key == row_range.start_key
+ assert end_key == row_range.end_key
+ assert row_range.start_inclusive
+ assert not row_range.end_inclusive
+
+
+def test_row_range___hash__set_equality():
+ from google.cloud.bigtable.row_set import RowRange
+
+ row_range1 = RowRange("row_key1", "row_key9")
+ row_range2 = RowRange("row_key1", "row_key9")
+ set_one = {row_range1, row_range2}
+ set_two = {row_range1, row_range2}
+ assert set_one == set_two
+
+
+def test_row_range___hash__not_equals():
+ from google.cloud.bigtable.row_set import RowRange
+
+ row_range1 = RowRange("row_key1", "row_key9")
+ row_range2 = RowRange("row_key1", "row_key19")
+ set_one = {row_range1}
+ set_two = {row_range2}
+ assert set_one != set_two
+
+
+def test_row_range__eq__():
+ from google.cloud.bigtable.row_set import RowRange
+
+ start_key = b"row_key1"
+ end_key = b"row_key9"
+ row_range1 = RowRange(start_key, end_key, True, False)
+ row_range2 = RowRange(start_key, end_key, True, False)
+ assert row_range1 == row_range2
+
+
+def test_row_range___eq__type_differ():
+ from google.cloud.bigtable.row_set import RowRange
+
+ start_key = b"row_key1"
+ end_key = b"row_key9"
+ row_range1 = RowRange(start_key, end_key, True, False)
+ row_range2 = object()
+ assert row_range1 != row_range2
+
+
+def test_row_range__ne__():
+ from google.cloud.bigtable.row_set import RowRange
+
+ start_key = b"row_key1"
+ end_key = b"row_key9"
+ row_range1 = RowRange(start_key, end_key, True, False)
+ row_range2 = RowRange(start_key, end_key, False, True)
+ assert row_range1 != row_range2
+
+
+def test_row_range__ne__same_value():
+ from google.cloud.bigtable.row_set import RowRange
+
+ start_key = b"row_key1"
+ end_key = b"row_key9"
+ row_range1 = RowRange(start_key, end_key, True, False)
+ row_range2 = RowRange(start_key, end_key, True, False)
+ assert not (row_range1 != row_range2)
+
+
+def test_row_range_get_range_kwargs_closed_open():
+ from google.cloud.bigtable.row_set import RowRange
+
+ start_key = b"row_key1"
+ end_key = b"row_key9"
+ expected_result = {"start_key_closed": start_key, "end_key_open": end_key}
+ row_range = RowRange(start_key, end_key)
+ actual_result = row_range.get_range_kwargs()
+ assert expected_result == actual_result
+
+
+def test_row_range_get_range_kwargs_open_closed():
+ from google.cloud.bigtable.row_set import RowRange
+
+ start_key = b"row_key1"
+ end_key = b"row_key9"
+ expected_result = {"start_key_open": start_key, "end_key_closed": end_key}
+ row_range = RowRange(start_key, end_key, False, True)
+ actual_result = row_range.get_range_kwargs()
+ assert expected_result == actual_result
def _ReadRowsRequestPB(*args, **kw):
diff --git a/tests/unit/test_table.py b/tests/unit/test_table.py
index bb6cca6a7..eacde3c3e 100644
--- a/tests/unit/test_table.py
+++ b/tests/unit/test_table.py
@@ -13,2154 +13,1996 @@
# limitations under the License.
-import unittest
import warnings
import mock
+import pytest
+from grpc import StatusCode
-from ._testing import _make_credentials
from google.api_core.exceptions import DeadlineExceeded
+from ._testing import _make_credentials
+PROJECT_ID = "project-id"
+INSTANCE_ID = "instance-id"
+INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
+CLUSTER_ID = "cluster-id"
+CLUSTER_NAME = INSTANCE_NAME + "/clusters/" + CLUSTER_ID
+TABLE_ID = "table-id"
+TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID
+BACKUP_ID = "backup-id"
+BACKUP_NAME = CLUSTER_NAME + "/backups/" + BACKUP_ID
+ROW_KEY = b"row-key"
+ROW_KEY_1 = b"row-key-1"
+ROW_KEY_2 = b"row-key-2"
+ROW_KEY_3 = b"row-key-3"
+FAMILY_NAME = "family"
+QUALIFIER = b"qualifier"
+TIMESTAMP_MICROS = 100
+VALUE = b"value"
+
+# RPC Status Codes
+SUCCESS = StatusCode.OK.value[0]
+RETRYABLE_1 = StatusCode.DEADLINE_EXCEEDED.value[0]
+RETRYABLE_2 = StatusCode.ABORTED.value[0]
+RETRYABLE_3 = StatusCode.UNAVAILABLE.value[0]
+RETRYABLES = (RETRYABLE_1, RETRYABLE_2, RETRYABLE_3)
+NON_RETRYABLE = StatusCode.CANCELLED.value[0]
+
+
+@mock.patch("google.cloud.bigtable.table._MAX_BULK_MUTATIONS", new=3)
+def test__compile_mutation_entries_w_too_many_mutations():
+ from google.cloud.bigtable.row import DirectRow
+ from google.cloud.bigtable.table import TooManyMutationsError
+ from google.cloud.bigtable.table import _compile_mutation_entries
+
+ table = mock.Mock(name="table", spec=["name"])
+ table.name = "table"
+ rows = [
+ DirectRow(row_key=b"row_key", table=table),
+ DirectRow(row_key=b"row_key_2", table=table),
+ ]
+ rows[0].set_cell("cf1", b"c1", 1)
+ rows[0].set_cell("cf1", b"c1", 2)
+ rows[1].set_cell("cf1", b"c1", 3)
+ rows[1].set_cell("cf1", b"c1", 4)
+
+ with pytest.raises(TooManyMutationsError):
+ _compile_mutation_entries("table", rows)
-class Test__compile_mutation_entries(unittest.TestCase):
- def _call_fut(self, table_name, rows):
- from google.cloud.bigtable.table import _compile_mutation_entries
- return _compile_mutation_entries(table_name, rows)
+def test__compile_mutation_entries_normal():
+ from google.cloud.bigtable.row import DirectRow
+ from google.cloud.bigtable.table import _compile_mutation_entries
+ from google.cloud.bigtable_v2.types import MutateRowsRequest
+ from google.cloud.bigtable_v2.types import data
- @mock.patch("google.cloud.bigtable.table._MAX_BULK_MUTATIONS", new=3)
- def test_w_too_many_mutations(self):
- from google.cloud.bigtable.row import DirectRow
- from google.cloud.bigtable.table import TooManyMutationsError
+ table = mock.Mock(spec=["name"])
+ table.name = "table"
+ rows = [
+ DirectRow(row_key=b"row_key", table=table),
+ DirectRow(row_key=b"row_key_2"),
+ ]
+ rows[0].set_cell("cf1", b"c1", b"1")
+ rows[1].set_cell("cf1", b"c1", b"2")
+
+ result = _compile_mutation_entries("table", rows)
+
+ entry_1 = MutateRowsRequest.Entry()
+ entry_1.row_key = b"row_key"
+ mutations_1 = data.Mutation()
+ mutations_1.set_cell.family_name = "cf1"
+ mutations_1.set_cell.column_qualifier = b"c1"
+ mutations_1.set_cell.timestamp_micros = -1
+ mutations_1.set_cell.value = b"1"
+ entry_1.mutations.append(mutations_1)
+
+ entry_2 = MutateRowsRequest.Entry()
+ entry_2.row_key = b"row_key_2"
+ mutations_2 = data.Mutation()
+ mutations_2.set_cell.family_name = "cf1"
+ mutations_2.set_cell.column_qualifier = b"c1"
+ mutations_2.set_cell.timestamp_micros = -1
+ mutations_2.set_cell.value = b"2"
+ entry_2.mutations.append(mutations_2)
+ assert result == [entry_1, entry_2]
+
+
+def test__check_row_table_name_w_wrong_table_name():
+ from google.cloud.bigtable.table import _check_row_table_name
+ from google.cloud.bigtable.table import TableMismatchError
+ from google.cloud.bigtable.row import DirectRow
+
+ table = mock.Mock(name="table", spec=["name"])
+ table.name = "table"
+ row = DirectRow(row_key=b"row_key", table=table)
+
+ with pytest.raises(TableMismatchError):
+ _check_row_table_name("other_table", row)
+
+
+def test__check_row_table_name_w_right_table_name():
+ from google.cloud.bigtable.row import DirectRow
+ from google.cloud.bigtable.table import _check_row_table_name
+
+ table = mock.Mock(name="table", spec=["name"])
+ table.name = "table"
+ row = DirectRow(row_key=b"row_key", table=table)
- table = mock.Mock(name="table", spec=["name"])
- table.name = "table"
- rows = [
- DirectRow(row_key=b"row_key", table=table),
- DirectRow(row_key=b"row_key_2", table=table),
- ]
- rows[0].set_cell("cf1", b"c1", 1)
- rows[0].set_cell("cf1", b"c1", 2)
- rows[1].set_cell("cf1", b"c1", 3)
- rows[1].set_cell("cf1", b"c1", 4)
-
- with self.assertRaises(TooManyMutationsError):
- self._call_fut("table", rows)
-
- def test_normal(self):
- from google.cloud.bigtable.row import DirectRow
- from google.cloud.bigtable_v2.types import MutateRowsRequest
- from google.cloud.bigtable_v2.types import data
-
- table = mock.Mock(spec=["name"])
- table.name = "table"
- rows = [
- DirectRow(row_key=b"row_key", table=table),
- DirectRow(row_key=b"row_key_2"),
- ]
- rows[0].set_cell("cf1", b"c1", b"1")
- rows[1].set_cell("cf1", b"c1", b"2")
-
- result = self._call_fut("table", rows)
-
- entry_1 = MutateRowsRequest.Entry()
- entry_1.row_key = b"row_key"
- mutations_1 = data.Mutation()
- mutations_1.set_cell.family_name = "cf1"
- mutations_1.set_cell.column_qualifier = b"c1"
- mutations_1.set_cell.timestamp_micros = -1
- mutations_1.set_cell.value = b"1"
- entry_1.mutations.append(mutations_1)
+ assert not _check_row_table_name("table", row)
- entry_2 = MutateRowsRequest.Entry()
- entry_2.row_key = b"row_key_2"
- mutations_2 = data.Mutation()
- mutations_2.set_cell.family_name = "cf1"
- mutations_2.set_cell.column_qualifier = b"c1"
- mutations_2.set_cell.timestamp_micros = -1
- mutations_2.set_cell.value = b"2"
- entry_2.mutations.append(mutations_2)
- self.assertEqual(result, [entry_1, entry_2])
-
-
-class Test__check_row_table_name(unittest.TestCase):
- def _call_fut(self, table_name, row):
- from google.cloud.bigtable.table import _check_row_table_name
-
- return _check_row_table_name(table_name, row)
-
- def test_wrong_table_name(self):
- from google.cloud.bigtable.table import TableMismatchError
- from google.cloud.bigtable.row import DirectRow
-
- table = mock.Mock(name="table", spec=["name"])
- table.name = "table"
- row = DirectRow(row_key=b"row_key", table=table)
- with self.assertRaises(TableMismatchError):
- self._call_fut("other_table", row)
-
- def test_right_table_name(self):
- from google.cloud.bigtable.row import DirectRow
-
- table = mock.Mock(name="table", spec=["name"])
- table.name = "table"
- row = DirectRow(row_key=b"row_key", table=table)
- result = self._call_fut("table", row)
- self.assertFalse(result)
-
-
-class Test__check_row_type(unittest.TestCase):
- def _call_fut(self, row):
- from google.cloud.bigtable.table import _check_row_type
-
- return _check_row_type(row)
-
- def test_test_wrong_row_type(self):
- from google.cloud.bigtable.row import ConditionalRow
-
- row = ConditionalRow(row_key=b"row_key", table="table", filter_=None)
- with self.assertRaises(TypeError):
- self._call_fut(row)
-
- def test_right_row_type(self):
- from google.cloud.bigtable.row import DirectRow
-
- row = DirectRow(row_key=b"row_key", table="table")
- result = self._call_fut(row)
- self.assertFalse(result)
-
-
-class TestTable(unittest.TestCase):
-
- PROJECT_ID = "project-id"
- INSTANCE_ID = "instance-id"
- INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
- CLUSTER_ID = "cluster-id"
- CLUSTER_NAME = INSTANCE_NAME + "/clusters/" + CLUSTER_ID
- TABLE_ID = "table-id"
- TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID
- BACKUP_ID = "backup-id"
- BACKUP_NAME = CLUSTER_NAME + "/backups/" + BACKUP_ID
- ROW_KEY = b"row-key"
- ROW_KEY_1 = b"row-key-1"
- ROW_KEY_2 = b"row-key-2"
- ROW_KEY_3 = b"row-key-3"
- FAMILY_NAME = "family"
- QUALIFIER = b"qualifier"
- TIMESTAMP_MICROS = 100
- VALUE = b"value"
- _json_tests = None
-
- @staticmethod
- def _get_target_class():
- from google.cloud.bigtable.table import Table
-
- return Table
-
- def _make_one(self, *args, **kwargs):
- return self._get_target_class()(*args, **kwargs)
-
- @staticmethod
- def _get_target_client_class():
- from google.cloud.bigtable.client import Client
-
- return Client
-
- def _make_client(self, *args, **kwargs):
- return self._get_target_client_class()(*args, **kwargs)
-
- def test_constructor_defaults(self):
- instance = mock.Mock(spec=[])
- table = self._make_one(self.TABLE_ID, instance)
-
- self.assertEqual(table.table_id, self.TABLE_ID)
- self.assertIs(table._instance, instance)
- self.assertIsNone(table.mutation_timeout)
- self.assertIsNone(table._app_profile_id)
+def test__check_row_type_w_wrong_row_type():
+ from google.cloud.bigtable.row import ConditionalRow
+ from google.cloud.bigtable.table import _check_row_type
- def test_constructor_explicit(self):
- instance = mock.Mock(spec=[])
- mutation_timeout = 123
- app_profile_id = "profile-123"
+ row = ConditionalRow(row_key=b"row_key", table="table", filter_=None)
+ with pytest.raises(TypeError):
+ _check_row_type(row)
- table = self._make_one(
- self.TABLE_ID,
- instance,
- mutation_timeout=mutation_timeout,
- app_profile_id=app_profile_id,
- )
- self.assertEqual(table.table_id, self.TABLE_ID)
- self.assertIs(table._instance, instance)
- self.assertEqual(table.mutation_timeout, mutation_timeout)
- self.assertEqual(table._app_profile_id, app_profile_id)
-
- def test_name(self):
- table_data_client = mock.Mock(spec=["table_path"])
- client = mock.Mock(
- project=self.PROJECT_ID,
- table_data_client=table_data_client,
- spec=["project", "table_data_client"],
- )
- instance = mock.Mock(
- _client=client,
- instance_id=self.INSTANCE_ID,
- spec=["_client", "instance_id"],
- )
+def test__check_row_type_w_right_row_type():
+ from google.cloud.bigtable.row import DirectRow
+ from google.cloud.bigtable.table import _check_row_type
- table = self._make_one(self.TABLE_ID, instance)
+ row = DirectRow(row_key=b"row_key", table="table")
+ assert not _check_row_type(row)
- self.assertEqual(table.name, table_data_client.table_path.return_value)
- def _row_methods_helper(self):
- client = self._make_client(
- project="project-id", credentials=_make_credentials(), admin=True
- )
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_one(self.TABLE_ID, instance)
- row_key = b"row_key"
- return table, row_key
+def _make_client(*args, **kwargs):
+ from google.cloud.bigtable.client import Client
- def test_row_factory_direct(self):
- from google.cloud.bigtable.row import DirectRow
+ return Client(*args, **kwargs)
- table, row_key = self._row_methods_helper()
- with warnings.catch_warnings(record=True) as warned:
- row = table.row(row_key)
- self.assertIsInstance(row, DirectRow)
- self.assertEqual(row._row_key, row_key)
- self.assertEqual(row._table, table)
+def _make_table(*args, **kwargs):
+ from google.cloud.bigtable.table import Table
- self.assertEqual(len(warned), 1)
- self.assertIs(warned[0].category, PendingDeprecationWarning)
+ return Table(*args, **kwargs)
- def test_row_factory_conditional(self):
- from google.cloud.bigtable.row import ConditionalRow
- table, row_key = self._row_methods_helper()
- filter_ = object()
+def test_table_constructor_defaults():
+ instance = mock.Mock(spec=[])
- with warnings.catch_warnings(record=True) as warned:
- row = table.row(row_key, filter_=filter_)
+ table = _make_table(TABLE_ID, instance)
+
+ assert table.table_id == TABLE_ID
+ assert table._instance is instance
+ assert table.mutation_timeout is None
+ assert table._app_profile_id is None
+
+
+def test_table_constructor_explicit():
+ instance = mock.Mock(spec=[])
+ mutation_timeout = 123
+ app_profile_id = "profile-123"
+
+ table = _make_table(
+ TABLE_ID,
+ instance,
+ mutation_timeout=mutation_timeout,
+ app_profile_id=app_profile_id,
+ )
+
+ assert table.table_id == TABLE_ID
+ assert table._instance is instance
+ assert table.mutation_timeout == mutation_timeout
+ assert table._app_profile_id == app_profile_id
+
+
+def test_table_name():
+ table_data_client = mock.Mock(spec=["table_path"])
+ client = mock.Mock(
+ project=PROJECT_ID,
+ table_data_client=table_data_client,
+ spec=["project", "table_data_client"],
+ )
+ instance = mock.Mock(
+ _client=client, instance_id=INSTANCE_ID, spec=["_client", "instance_id"],
+ )
+
+ table = _make_table(TABLE_ID, instance)
+
+ assert table.name == table_data_client.table_path.return_value
- self.assertIsInstance(row, ConditionalRow)
- self.assertEqual(row._row_key, row_key)
- self.assertEqual(row._table, table)
- self.assertEqual(len(warned), 1)
- self.assertIs(warned[0].category, PendingDeprecationWarning)
+def _table_row_methods_helper():
+ client = _make_client(
+ project="project-id", credentials=_make_credentials(), admin=True
+ )
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
+ row_key = b"row_key"
+ return table, row_key
+
+
+def test_table_row_factory_direct():
+ from google.cloud.bigtable.row import DirectRow
+
+ table, row_key = _table_row_methods_helper()
+ with warnings.catch_warnings(record=True) as warned:
+ row = table.row(row_key)
+
+ assert isinstance(row, DirectRow)
+ assert row._row_key == row_key
+ assert row._table == table
+
+ assert len(warned) == 1
+ assert warned[0].category is PendingDeprecationWarning
+
+
+def test_table_row_factory_conditional():
+ from google.cloud.bigtable.row import ConditionalRow
+
+ table, row_key = _table_row_methods_helper()
+ filter_ = object()
+
+ with warnings.catch_warnings(record=True) as warned:
+ row = table.row(row_key, filter_=filter_)
+
+ assert isinstance(row, ConditionalRow)
+ assert row._row_key == row_key
+ assert row._table == table
- def test_row_factory_append(self):
- from google.cloud.bigtable.row import AppendRow
+ assert len(warned) == 1
+ assert warned[0].category is PendingDeprecationWarning
- table, row_key = self._row_methods_helper()
+def test_table_row_factory_append():
+ from google.cloud.bigtable.row import AppendRow
+
+ table, row_key = _table_row_methods_helper()
+
+ with warnings.catch_warnings(record=True) as warned:
+ row = table.row(row_key, append=True)
+
+ assert isinstance(row, AppendRow)
+ assert row._row_key == row_key
+ assert row._table == table
+
+ assert len(warned) == 1
+ assert warned[0].category is PendingDeprecationWarning
+
+
+def test_table_row_factory_failure():
+ table, row_key = _table_row_methods_helper()
+
+ with pytest.raises(ValueError):
with warnings.catch_warnings(record=True) as warned:
- row = table.row(row_key, append=True)
+ table.row(row_key, filter_=object(), append=True)
- self.assertIsInstance(row, AppendRow)
- self.assertEqual(row._row_key, row_key)
- self.assertEqual(row._table, table)
+ assert len(warned) == 1
+ assert warned[0].category is PendingDeprecationWarning
- self.assertEqual(len(warned), 1)
- self.assertIs(warned[0].category, PendingDeprecationWarning)
- def test_row_factory_failure(self):
- table, row_key = self._row_methods_helper()
- with self.assertRaises(ValueError):
- with warnings.catch_warnings(record=True) as warned:
- table.row(row_key, filter_=object(), append=True)
+def test_table_direct_row():
+ from google.cloud.bigtable.row import DirectRow
- self.assertEqual(len(warned), 1)
- self.assertIs(warned[0].category, PendingDeprecationWarning)
+ table, row_key = _table_row_methods_helper()
+ row = table.direct_row(row_key)
- def test_direct_row(self):
- from google.cloud.bigtable.row import DirectRow
+ assert isinstance(row, DirectRow)
+ assert row._row_key == row_key
+ assert row._table == table
- table, row_key = self._row_methods_helper()
- row = table.direct_row(row_key)
- self.assertIsInstance(row, DirectRow)
- self.assertEqual(row._row_key, row_key)
- self.assertEqual(row._table, table)
+def test_table_conditional_row():
+ from google.cloud.bigtable.row import ConditionalRow
- def test_conditional_row(self):
- from google.cloud.bigtable.row import ConditionalRow
+ table, row_key = _table_row_methods_helper()
+ filter_ = object()
+ row = table.conditional_row(row_key, filter_=filter_)
- table, row_key = self._row_methods_helper()
- filter_ = object()
- row = table.conditional_row(row_key, filter_=filter_)
+ assert isinstance(row, ConditionalRow)
+ assert row._row_key == row_key
+ assert row._table == table
- self.assertIsInstance(row, ConditionalRow)
- self.assertEqual(row._row_key, row_key)
- self.assertEqual(row._table, table)
- def test_append_row(self):
- from google.cloud.bigtable.row import AppendRow
+def test_table_append_row():
+ from google.cloud.bigtable.row import AppendRow
- table, row_key = self._row_methods_helper()
- row = table.append_row(row_key)
+ table, row_key = _table_row_methods_helper()
+ row = table.append_row(row_key)
- self.assertIsInstance(row, AppendRow)
- self.assertEqual(row._row_key, row_key)
- self.assertEqual(row._table, table)
+ assert isinstance(row, AppendRow)
+ assert row._row_key == row_key
+ assert row._table == table
- def test___eq__(self):
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table1 = self._make_one(self.TABLE_ID, instance)
- table2 = self._make_one(self.TABLE_ID, instance)
- self.assertEqual(table1, table2)
-
- def test___eq__type_differ(self):
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table1 = self._make_one(self.TABLE_ID, instance)
- table2 = object()
- self.assertNotEqual(table1, table2)
-
- def test___ne__same_value(self):
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table1 = self._make_one(self.TABLE_ID, instance)
- table2 = self._make_one(self.TABLE_ID, instance)
- comparison_val = table1 != table2
- self.assertFalse(comparison_val)
-
- def test___ne__(self):
- table1 = self._make_one("table_id1", None)
- table2 = self._make_one("table_id2", None)
- self.assertNotEqual(table1, table2)
-
- def _create_test_helper(self, split_keys=[], column_families={}):
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
- from google.cloud.bigtable_admin_v2.types import table as table_pb2
- from google.cloud.bigtable_admin_v2.types import (
- bigtable_table_admin as table_admin_messages_v2_pb2,
- )
- from google.cloud.bigtable.column_family import ColumnFamily
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_one(self.TABLE_ID, instance)
+def test_table___eq__():
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table1 = _make_table(TABLE_ID, instance)
+ table2 = _make_table(TABLE_ID, instance)
+ assert table1 == table2
+
+
+def test_table___eq__type_differ():
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table1 = _make_table(TABLE_ID, instance)
+ table2 = object()
+ assert not (table1 == table2)
+
+
+def test_table___ne__same_value():
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table1 = _make_table(TABLE_ID, instance)
+ table2 = _make_table(TABLE_ID, instance)
+ assert not (table1 != table2)
+
+
+def test_table___ne__():
+ table1 = _make_table("table_id1", None)
+ table2 = _make_table("table_id2", None)
+ assert table1 != table2
+
+
+def _make_table_api():
+ from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
+ client as bigtable_table_admin,
+ )
- # Patch API calls
- client._table_admin_client = table_api
+ return mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
- # Perform the method and check the result.
- table.create(column_families=column_families, initial_split_keys=split_keys)
- families = {
- id: ColumnFamily(id, self, rule).to_pb()
- for (id, rule) in column_families.items()
+def _create_table_helper(split_keys=[], column_families={}):
+ from google.cloud.bigtable_admin_v2.types import table as table_pb2
+ from google.cloud.bigtable_admin_v2.types import (
+ bigtable_table_admin as table_admin_messages_v2_pb2,
+ )
+ from google.cloud.bigtable.column_family import ColumnFamily
+
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
+
+ table_api = client._table_admin_client = _make_table_api()
+
+ table.create(column_families=column_families, initial_split_keys=split_keys)
+
+ families = {
+ id: ColumnFamily(id, table, rule).to_pb()
+ for (id, rule) in column_families.items()
+ }
+
+ split = table_admin_messages_v2_pb2.CreateTableRequest.Split
+ splits = [split(key=split_key) for split_key in split_keys]
+
+ table_api.create_table.assert_called_once_with(
+ request={
+ "parent": INSTANCE_NAME,
+ "table": table_pb2.Table(column_families=families),
+ "table_id": TABLE_ID,
+ "initial_splits": splits,
}
+ )
- split = table_admin_messages_v2_pb2.CreateTableRequest.Split
- splits = [split(key=split_key) for split_key in split_keys]
- table_api.create_table.assert_called_once_with(
- request={
- "parent": self.INSTANCE_NAME,
- "table": table_pb2.Table(column_families=families),
- "table_id": self.TABLE_ID,
- "initial_splits": splits,
- }
- )
+def test_table_create():
+ _create_table_helper()
- def test_create(self):
- self._create_test_helper()
- def test_create_with_families(self):
- from google.cloud.bigtable.column_family import MaxVersionsGCRule
+def test_table_create_with_families():
+ from google.cloud.bigtable.column_family import MaxVersionsGCRule
- families = {"family": MaxVersionsGCRule(5)}
- self._create_test_helper(column_families=families)
+ families = {"family": MaxVersionsGCRule(5)}
+ _create_table_helper(column_families=families)
- def test_create_with_split_keys(self):
- self._create_test_helper(split_keys=[b"split1", b"split2", b"split3"])
- def test_exists(self):
- from google.cloud.bigtable_admin_v2.types import ListTablesResponse
- from google.cloud.bigtable_admin_v2.types import Table
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as table_admin_client,
- )
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- client as instance_admin_client,
- )
- from google.api_core.exceptions import NotFound
- from google.api_core.exceptions import BadRequest
+def test_table_create_with_split_keys():
+ _create_table_helper(split_keys=[b"split1", b"split2", b"split3"])
- table_api = mock.create_autospec(table_admin_client.BigtableTableAdminClient)
- instance_api = mock.create_autospec(
- instance_admin_client.BigtableInstanceAdminClient
- )
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- instance = client.instance(instance_id=self.INSTANCE_ID)
- # Create response_pb
- response_pb = ListTablesResponse(tables=[Table(name=self.TABLE_NAME)])
-
- # Patch API calls
- client._table_admin_client = table_api
- client._instance_admin_client = instance_api
- bigtable_table_stub = client._table_admin_client
-
- bigtable_table_stub.get_table.side_effect = [
- response_pb,
- NotFound("testing"),
- BadRequest("testing"),
- ]
+def test_table_exists_hit():
+ from google.cloud.bigtable_admin_v2.types import ListTablesResponse
+ from google.cloud.bigtable_admin_v2.types import Table
+ from google.cloud.bigtable import enums
- client._table_admin_client = table_api
- client._instance_admin_client = instance_api
- bigtable_table_stub = client._table_admin_client
- bigtable_table_stub.get_table.side_effect = [
- response_pb,
- NotFound("testing"),
- BadRequest("testing"),
- ]
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = instance.table(TABLE_ID)
- # Perform the method and check the result.
- table1 = instance.table(self.TABLE_ID)
- table2 = instance.table("table-id2")
+ response_pb = ListTablesResponse(tables=[Table(name=TABLE_NAME)])
+ table_api = client._table_admin_client = _make_table_api()
+ table_api.get_table.return_value = response_pb
- result = table1.exists()
- self.assertEqual(True, result)
+ assert table.exists()
- result = table2.exists()
- self.assertEqual(False, result)
+ expected_request = {
+ "name": table.name,
+ "view": enums.Table.View.NAME_ONLY,
+ }
+ table_api.get_table.assert_called_once_with(request=expected_request)
- with self.assertRaises(BadRequest):
- table2.exists()
- def test_delete(self):
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
+def test_table_exists_miss():
+ from google.api_core.exceptions import NotFound
+ from google.cloud.bigtable import enums
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_one(self.TABLE_ID, instance)
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = instance.table("nonesuch-table-id2")
- # Patch API calls
- client._table_admin_client = table_api
+ table_api = client._table_admin_client = _make_table_api()
+ table_api.get_table.side_effect = NotFound("testing")
- # Create expected_result.
- expected_result = None # delete() has no return value.
+ assert not table.exists()
- # Perform the method and check the result.
- result = table.delete()
- self.assertEqual(result, expected_result)
+ expected_request = {
+ "name": table.name,
+ "view": enums.Table.View.NAME_ONLY,
+ }
+ table_api.get_table.assert_called_once_with(request=expected_request)
- def _list_column_families_helper(self):
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_one(self.TABLE_ID, instance)
+def test_table_exists_error():
+ from google.api_core.exceptions import BadRequest
+ from google.cloud.bigtable import enums
- # Create response_pb
- COLUMN_FAMILY_ID = "foo"
- column_family = _ColumnFamilyPB()
- response_pb = _TablePB(column_families={COLUMN_FAMILY_ID: column_family})
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
- # Patch the stub used by the API method.
- client._table_admin_client = table_api
- bigtable_table_stub = client._table_admin_client
- bigtable_table_stub.get_table.side_effect = [response_pb]
+ table_api = client._table_admin_client = _make_table_api()
+ table_api.get_table.side_effect = BadRequest("testing")
- # Create expected_result.
- expected_result = {COLUMN_FAMILY_ID: table.column_family(COLUMN_FAMILY_ID)}
+ table = instance.table(TABLE_ID)
- # Perform the method and check the result.
- result = table.list_column_families()
- self.assertEqual(result, expected_result)
+ with pytest.raises(BadRequest):
+ table.exists()
- def test_list_column_families(self):
- self._list_column_families_helper()
+ expected_request = {
+ "name": table.name,
+ "view": enums.Table.View.NAME_ONLY,
+ }
+ table_api.get_table.assert_called_once_with(request=expected_request)
- def test_get_cluster_states(self):
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
- from google.cloud.bigtable.enums import Table as enum_table
- from google.cloud.bigtable.table import ClusterState
- INITIALIZING = enum_table.ReplicationState.INITIALIZING
- PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE
- READY = enum_table.ReplicationState.READY
+def test_table_delete():
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_one(self.TABLE_ID, instance)
-
- response_pb = _TablePB(
- cluster_states={
- "cluster-id1": _ClusterStatePB(INITIALIZING),
- "cluster-id2": _ClusterStatePB(PLANNED_MAINTENANCE),
- "cluster-id3": _ClusterStatePB(READY),
- }
- )
+ table_api = client._table_admin_client = _make_table_api()
- # Patch the stub used by the API method.
- client._table_admin_client = table_api
- bigtable_table_stub = client._table_admin_client
+ assert table.delete() is None
- bigtable_table_stub.get_table.side_effect = [response_pb]
+ table_api.delete_table.assert_called_once_with(request={"name": table.name})
- # build expected result
- expected_result = {
- "cluster-id1": ClusterState(INITIALIZING),
- "cluster-id2": ClusterState(PLANNED_MAINTENANCE),
- "cluster-id3": ClusterState(READY),
- }
- # Perform the method and check the result.
- result = table.get_cluster_states()
- self.assertEqual(result, expected_result)
+def _table_list_column_families_helper():
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
- def test_get_encryption_info(self):
- from google.rpc.code_pb2 import Code
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
- from google.cloud.bigtable.encryption_info import EncryptionInfo
- from google.cloud.bigtable.enums import EncryptionInfo as enum_crypto
- from google.cloud.bigtable.error import Status
+ # Create response_pb
+ COLUMN_FAMILY_ID = "foo"
+ column_family = _ColumnFamilyPB()
+ response_pb = _TablePB(column_families={COLUMN_FAMILY_ID: column_family})
- ENCRYPTION_TYPE_UNSPECIFIED = (
- enum_crypto.EncryptionType.ENCRYPTION_TYPE_UNSPECIFIED
- )
- GOOGLE_DEFAULT_ENCRYPTION = enum_crypto.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION
- CUSTOMER_MANAGED_ENCRYPTION = (
- enum_crypto.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION
- )
+ # Patch the stub used by the API method.
+ table_api = client._table_admin_client = _make_table_api()
+ table_api.get_table.return_value = response_pb
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_one(self.TABLE_ID, instance)
-
- response_pb = _TablePB(
- cluster_states={
- "cluster-id1": _ClusterStateEncryptionInfoPB(
- encryption_type=ENCRYPTION_TYPE_UNSPECIFIED,
- encryption_status=_StatusPB(Code.OK, "Status OK"),
- ),
- "cluster-id2": _ClusterStateEncryptionInfoPB(
- encryption_type=GOOGLE_DEFAULT_ENCRYPTION,
- ),
- "cluster-id3": _ClusterStateEncryptionInfoPB(
- encryption_type=CUSTOMER_MANAGED_ENCRYPTION,
- encryption_status=_StatusPB(
- Code.UNKNOWN, "Key version is not yet known."
- ),
- kms_key_version="UNKNOWN",
- ),
- }
- )
+ # Create expected_result.
+ expected_result = {COLUMN_FAMILY_ID: table.column_family(COLUMN_FAMILY_ID)}
- # Patch the stub used by the API method.
- client._table_admin_client = table_api
- bigtable_table_stub = client._table_admin_client
+ # Perform the method and check the result.
+ result = table.list_column_families()
- bigtable_table_stub.get_table.side_effect = [response_pb]
+ assert result == expected_result
- # build expected result
- expected_result = {
- "cluster-id1": (
- EncryptionInfo(
- encryption_type=ENCRYPTION_TYPE_UNSPECIFIED,
- encryption_status=Status(_StatusPB(Code.OK, "Status OK")),
- kms_key_version="",
- ),
+ table_api.get_table.assert_called_once_with(request={"name": table.name})
+
+
+def test_table_list_column_families():
+ _table_list_column_families_helper()
+
+
+def test_table_get_cluster_states():
+ from google.cloud.bigtable.enums import Table as enum_table
+ from google.cloud.bigtable.table import ClusterState
+
+ INITIALIZING = enum_table.ReplicationState.INITIALIZING
+ PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE
+ READY = enum_table.ReplicationState.READY
+
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
+
+ response_pb = _TablePB(
+ cluster_states={
+ "cluster-id1": _ClusterStatePB(INITIALIZING),
+ "cluster-id2": _ClusterStatePB(PLANNED_MAINTENANCE),
+ "cluster-id3": _ClusterStatePB(READY),
+ }
+ )
+
+ # Patch the stub used by the API method.
+ table_api = client._table_admin_client = _make_table_api()
+ table_api.get_table.return_value = response_pb
+
+ # build expected result
+ expected_result = {
+ "cluster-id1": ClusterState(INITIALIZING),
+ "cluster-id2": ClusterState(PLANNED_MAINTENANCE),
+ "cluster-id3": ClusterState(READY),
+ }
+
+ # Perform the method and check the result.
+ result = table.get_cluster_states()
+
+ assert result == expected_result
+
+ expected_request = {
+ "name": table.name,
+ "view": enum_table.View.REPLICATION_VIEW,
+ }
+ table_api.get_table.assert_called_once_with(request=expected_request)
+
+
+def test_table_get_encryption_info():
+ from google.rpc.code_pb2 import Code
+ from google.cloud.bigtable.encryption_info import EncryptionInfo
+ from google.cloud.bigtable.enums import EncryptionInfo as enum_crypto
+ from google.cloud.bigtable.enums import Table as enum_table
+ from google.cloud.bigtable.error import Status
+
+ ENCRYPTION_TYPE_UNSPECIFIED = enum_crypto.EncryptionType.ENCRYPTION_TYPE_UNSPECIFIED
+ GOOGLE_DEFAULT_ENCRYPTION = enum_crypto.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION
+ CUSTOMER_MANAGED_ENCRYPTION = enum_crypto.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION
+
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
+
+ response_pb = _TablePB(
+ cluster_states={
+ "cluster-id1": _ClusterStateEncryptionInfoPB(
+ encryption_type=ENCRYPTION_TYPE_UNSPECIFIED,
+ encryption_status=_StatusPB(Code.OK, "Status OK"),
),
- "cluster-id2": (
- EncryptionInfo(
- encryption_type=GOOGLE_DEFAULT_ENCRYPTION,
- encryption_status=Status(_StatusPB(0, "")),
- kms_key_version="",
- ),
+ "cluster-id2": _ClusterStateEncryptionInfoPB(
+ encryption_type=GOOGLE_DEFAULT_ENCRYPTION,
),
- "cluster-id3": (
- EncryptionInfo(
- encryption_type=CUSTOMER_MANAGED_ENCRYPTION,
- encryption_status=Status(
- _StatusPB(Code.UNKNOWN, "Key version is not yet known.")
- ),
- kms_key_version="UNKNOWN",
+ "cluster-id3": _ClusterStateEncryptionInfoPB(
+ encryption_type=CUSTOMER_MANAGED_ENCRYPTION,
+ encryption_status=_StatusPB(
+ Code.UNKNOWN, "Key version is not yet known."
),
+ kms_key_version="UNKNOWN",
),
}
+ )
- # Perform the method and check the result.
- result = table.get_encryption_info()
- self.assertEqual(result, expected_result)
+ # Patch the stub used by the API method.
+ table_api = client._table_admin_client = _make_table_api()
+ table_api.get_table.return_value = response_pb
+
+ # build expected result
+ expected_result = {
+ "cluster-id1": (
+ EncryptionInfo(
+ encryption_type=ENCRYPTION_TYPE_UNSPECIFIED,
+ encryption_status=Status(_StatusPB(Code.OK, "Status OK")),
+ kms_key_version="",
+ ),
+ ),
+ "cluster-id2": (
+ EncryptionInfo(
+ encryption_type=GOOGLE_DEFAULT_ENCRYPTION,
+ encryption_status=Status(_StatusPB(0, "")),
+ kms_key_version="",
+ ),
+ ),
+ "cluster-id3": (
+ EncryptionInfo(
+ encryption_type=CUSTOMER_MANAGED_ENCRYPTION,
+ encryption_status=Status(
+ _StatusPB(Code.UNKNOWN, "Key version is not yet known.")
+ ),
+ kms_key_version="UNKNOWN",
+ ),
+ ),
+ }
- def _read_row_helper(self, chunks, expected_result, app_profile_id=None):
+ # Perform the method and check the result.
+ result = table.get_encryption_info()
- from google.cloud._testing import _Monkey
- from google.cloud.bigtable import table as MUT
- from google.cloud.bigtable.row_set import RowSet
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
- from google.cloud.bigtable.row_filters import RowSampleFilter
+ assert result == expected_result
+ expected_request = {
+ "name": table.name,
+ "view": enum_table.View.ENCRYPTION_VIEW,
+ }
+ table_api.get_table.assert_called_once_with(request=expected_request)
- data_api = mock.create_autospec(BigtableClient)
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_one(self.TABLE_ID, instance, app_profile_id=app_profile_id)
- # Create request_pb
- request_pb = object() # Returned by our mock.
- mock_created = []
+def _make_data_api():
+ from google.cloud.bigtable_v2.services.bigtable import BigtableClient
- def mock_create_row_request(table_name, **kwargs):
- mock_created.append((table_name, kwargs))
- return request_pb
+ return mock.create_autospec(BigtableClient)
- # Create response_iterator
- if chunks is None:
- response_iterator = iter(()) # no responses at all
- else:
- response_pb = _ReadRowsResponsePB(chunks=chunks)
- response_iterator = iter([response_pb])
-
- # Patch the stub used by the API method.
- client._table_data_client = data_api
- client._table_admin_client = table_api
- client._table_data_client.read_rows.side_effect = [response_iterator]
- table._instance._client._table_data_client = client._table_data_client
- # Perform the method and check the result.
- filter_obj = RowSampleFilter(0.33)
- result = None
- with _Monkey(MUT, _create_row_request=mock_create_row_request):
- result = table.read_row(self.ROW_KEY, filter_=filter_obj)
- row_set = RowSet()
- row_set.add_row_key(self.ROW_KEY)
- expected_request = [
- (
- table.name,
- {
- "end_inclusive": False,
- "row_set": row_set,
- "app_profile_id": app_profile_id,
- "end_key": None,
- "limit": None,
- "start_key": None,
- "filter_": filter_obj,
- },
- )
- ]
- self.assertEqual(result, expected_result)
- self.assertEqual(mock_created, expected_request)
-
- def test_read_row_miss_no__responses(self):
- self._read_row_helper(None, None)
-
- def test_read_row_miss_no_chunks_in_response(self):
- chunks = []
- self._read_row_helper(chunks, None)
-
- def test_read_row_complete(self):
- from google.cloud.bigtable.row_data import Cell
- from google.cloud.bigtable.row_data import PartialRowData
-
- app_profile_id = "app-profile-id"
- chunk = _ReadRowsResponseCellChunkPB(
- row_key=self.ROW_KEY,
- family_name=self.FAMILY_NAME,
- qualifier=self.QUALIFIER,
- timestamp_micros=self.TIMESTAMP_MICROS,
- value=self.VALUE,
- commit_row=True,
- )
- chunks = [chunk]
- expected_result = PartialRowData(row_key=self.ROW_KEY)
- family = expected_result._cells.setdefault(self.FAMILY_NAME, {})
- column = family.setdefault(self.QUALIFIER, [])
- column.append(Cell.from_pb(chunk))
- self._read_row_helper(chunks, expected_result, app_profile_id)
-
- def test_read_row_more_than_one_row_returned(self):
- app_profile_id = "app-profile-id"
- chunk_1 = _ReadRowsResponseCellChunkPB(
- row_key=self.ROW_KEY,
- family_name=self.FAMILY_NAME,
- qualifier=self.QUALIFIER,
- timestamp_micros=self.TIMESTAMP_MICROS,
- value=self.VALUE,
- commit_row=True,
- )._pb
- chunk_2 = _ReadRowsResponseCellChunkPB(
- row_key=self.ROW_KEY_2,
- family_name=self.FAMILY_NAME,
- qualifier=self.QUALIFIER,
- timestamp_micros=self.TIMESTAMP_MICROS,
- value=self.VALUE,
- commit_row=True,
- )._pb
-
- chunks = [chunk_1, chunk_2]
- with self.assertRaises(ValueError):
- self._read_row_helper(chunks, None, app_profile_id)
-
- def test_read_row_still_partial(self):
- chunk = _ReadRowsResponseCellChunkPB(
- row_key=self.ROW_KEY,
- family_name=self.FAMILY_NAME,
- qualifier=self.QUALIFIER,
- timestamp_micros=self.TIMESTAMP_MICROS,
- value=self.VALUE,
- )
- # No "commit row".
- chunks = [chunk]
- with self.assertRaises(ValueError):
- self._read_row_helper(chunks, None)
-
- def _mutate_rows_helper(
- self, mutation_timeout=None, app_profile_id=None, retry=None, timeout=None
- ):
- from google.rpc.status_pb2 import Status
- from google.cloud.bigtable.table import DEFAULT_RETRY
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- instance = client.instance(instance_id=self.INSTANCE_ID)
- client._table_admin_client = table_api
- ctor_kwargs = {}
+def _table_read_row_helper(chunks, expected_result, app_profile_id=None):
+ from google.cloud._testing import _Monkey
+ from google.cloud.bigtable import table as MUT
+ from google.cloud.bigtable.row_set import RowSet
+ from google.cloud.bigtable.row_filters import RowSampleFilter
+
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance, app_profile_id=app_profile_id)
+
+ # Create request_pb
+ request_pb = object() # Returned by our mock.
+ mock_created = []
+
+ def mock_create_row_request(table_name, **kwargs):
+ mock_created.append((table_name, kwargs))
+ return request_pb
- if mutation_timeout is not None:
- ctor_kwargs["mutation_timeout"] = mutation_timeout
+ # Create response_iterator
+ if chunks is None:
+ response_iterator = iter(()) # no responses at all
+ else:
+ response_pb = _ReadRowsResponsePB(chunks=chunks)
+ response_iterator = iter([response_pb])
- if app_profile_id is not None:
- ctor_kwargs["app_profile_id"] = app_profile_id
+ data_api = client._table_data_client = _make_data_api()
+ data_api.read_rows.return_value = response_iterator
- table = self._make_one(self.TABLE_ID, instance, **ctor_kwargs)
+ filter_obj = RowSampleFilter(0.33)
- rows = [mock.MagicMock(), mock.MagicMock()]
- response = [Status(code=0), Status(code=1)]
- instance_mock = mock.Mock(return_value=response)
- klass_mock = mock.patch(
- "google.cloud.bigtable.table._RetryableMutateRowsWorker",
- new=mock.MagicMock(return_value=instance_mock),
+ with _Monkey(MUT, _create_row_request=mock_create_row_request):
+ result = table.read_row(ROW_KEY, filter_=filter_obj)
+
+ row_set = RowSet()
+ row_set.add_row_key(ROW_KEY)
+ expected_request = [
+ (
+ table.name,
+ {
+ "end_inclusive": False,
+ "row_set": row_set,
+ "app_profile_id": app_profile_id,
+ "end_key": None,
+ "limit": None,
+ "start_key": None,
+ "filter_": filter_obj,
+ },
)
+ ]
+ assert result == expected_result
+ assert mock_created == expected_request
- call_kwargs = {}
+ data_api.read_rows.assert_called_once_with(request_pb, timeout=61.0)
- if retry is not None:
- call_kwargs["retry"] = retry
- if timeout is not None:
- expected_timeout = call_kwargs["timeout"] = timeout
- else:
- expected_timeout = mutation_timeout
+def test_table_read_row_miss_no__responses():
+ _table_read_row_helper(None, None)
- with klass_mock:
- statuses = table.mutate_rows(rows, **call_kwargs)
- result = [status.code for status in statuses]
- expected_result = [0, 1]
- self.assertEqual(result, expected_result)
-
- klass_mock.new.assert_called_once_with(
- client,
- self.TABLE_NAME,
- rows,
- app_profile_id=app_profile_id,
- timeout=expected_timeout,
- )
+def test_table_read_row_miss_no_chunks_in_response():
+ chunks = []
+ _table_read_row_helper(chunks, None)
- if retry is not None:
- instance_mock.assert_called_once_with(retry=retry)
- else:
- instance_mock.assert_called_once_with(retry=DEFAULT_RETRY)
-
- def test_mutate_rows_w_default_mutation_timeout_app_profile_id(self):
- self._mutate_rows_helper()
-
- def test_mutate_rows_w_mutation_timeout(self):
- mutation_timeout = 123
- self._mutate_rows_helper(mutation_timeout=mutation_timeout)
-
- def test_mutate_rows_w_app_profile_id(self):
- app_profile_id = "profile-123"
- self._mutate_rows_helper(app_profile_id=app_profile_id)
-
- def test_mutate_rows_w_retry(self):
- retry = mock.Mock()
- self._mutate_rows_helper(retry=retry)
-
- def test_mutate_rows_w_timeout_arg(self):
- timeout = 123
- self._mutate_rows_helper(timeout=timeout)
-
- def test_mutate_rows_w_mutation_timeout_and_timeout_arg(self):
- mutation_timeout = 123
- timeout = 456
- self._mutate_rows_helper(mutation_timeout=mutation_timeout, timeout=timeout)
-
- def test_read_rows(self):
- from google.cloud._testing import _Monkey
- from google.cloud.bigtable.row_data import PartialRowsData
- from google.cloud.bigtable import table as MUT
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
- from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS
- data_api = mock.create_autospec(BigtableClient)
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- client._table_data_client = data_api
- client._table_admin_client = table_api
- instance = client.instance(instance_id=self.INSTANCE_ID)
- app_profile_id = "app-profile-id"
- table = self._make_one(self.TABLE_ID, instance, app_profile_id=app_profile_id)
-
- # Create request_pb
- request = object() # Returned by our mock.
- retry = DEFAULT_RETRY_READ_ROWS
- mock_created = []
-
- def mock_create_row_request(table_name, **kwargs):
- mock_created.append((table_name, kwargs))
- return request
-
- # Create expected_result.
- expected_result = PartialRowsData(
- client._table_data_client.transport.read_rows, request, retry
- )
+def test_table_read_row_complete():
+ from google.cloud.bigtable.row_data import Cell
+ from google.cloud.bigtable.row_data import PartialRowData
- # Perform the method and check the result.
- start_key = b"start-key"
- end_key = b"end-key"
- filter_obj = object()
- limit = 22
- with _Monkey(MUT, _create_row_request=mock_create_row_request):
- result = table.read_rows(
- start_key=start_key,
- end_key=end_key,
- filter_=filter_obj,
- limit=limit,
- retry=retry,
- )
+ app_profile_id = "app-profile-id"
+ chunk = _ReadRowsResponseCellChunkPB(
+ row_key=ROW_KEY,
+ family_name=FAMILY_NAME,
+ qualifier=QUALIFIER,
+ timestamp_micros=TIMESTAMP_MICROS,
+ value=VALUE,
+ commit_row=True,
+ )
+ chunks = [chunk]
+ expected_result = PartialRowData(row_key=ROW_KEY)
+ family = expected_result._cells.setdefault(FAMILY_NAME, {})
+ column = family.setdefault(QUALIFIER, [])
+ column.append(Cell.from_pb(chunk))
+
+ _table_read_row_helper(chunks, expected_result, app_profile_id)
+
+
+def test_table_read_row_more_than_one_row_returned():
+ app_profile_id = "app-profile-id"
+ chunk_1 = _ReadRowsResponseCellChunkPB(
+ row_key=ROW_KEY,
+ family_name=FAMILY_NAME,
+ qualifier=QUALIFIER,
+ timestamp_micros=TIMESTAMP_MICROS,
+ value=VALUE,
+ commit_row=True,
+ )._pb
+ chunk_2 = _ReadRowsResponseCellChunkPB(
+ row_key=ROW_KEY_2,
+ family_name=FAMILY_NAME,
+ qualifier=QUALIFIER,
+ timestamp_micros=TIMESTAMP_MICROS,
+ value=VALUE,
+ commit_row=True,
+ )._pb
+
+ chunks = [chunk_1, chunk_2]
+
+ with pytest.raises(ValueError):
+ _table_read_row_helper(chunks, None, app_profile_id)
+
+
+def test_table_read_row_still_partial():
+ chunk = _ReadRowsResponseCellChunkPB(
+ row_key=ROW_KEY,
+ family_name=FAMILY_NAME,
+ qualifier=QUALIFIER,
+ timestamp_micros=TIMESTAMP_MICROS,
+ value=VALUE,
+ )
+ chunks = [chunk] # No "commit row".
- self.assertEqual(result.rows, expected_result.rows)
- self.assertEqual(result.retry, expected_result.retry)
- created_kwargs = {
- "start_key": start_key,
- "end_key": end_key,
- "filter_": filter_obj,
- "limit": limit,
- "end_inclusive": False,
- "app_profile_id": app_profile_id,
- "row_set": None,
- }
- self.assertEqual(mock_created, [(table.name, created_kwargs)])
+ with pytest.raises(ValueError):
+ _table_read_row_helper(chunks, None)
- def test_read_retry_rows(self):
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
- from google.api_core import retry
- data_api = mock.create_autospec(BigtableClient)
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- client._table_data_client = data_api
- client._table_admin_client = table_api
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_one(self.TABLE_ID, instance)
-
- retry_read_rows = retry.Retry(predicate=_read_rows_retry_exception)
-
- # Create response_iterator
- chunk_1 = _ReadRowsResponseCellChunkPB(
- row_key=self.ROW_KEY_1,
- family_name=self.FAMILY_NAME,
- qualifier=self.QUALIFIER,
- timestamp_micros=self.TIMESTAMP_MICROS,
- value=self.VALUE,
- commit_row=True,
- )
+def _table_mutate_rows_helper(
+ mutation_timeout=None, app_profile_id=None, retry=None, timeout=None
+):
+ from google.rpc.status_pb2 import Status
+ from google.cloud.bigtable.table import DEFAULT_RETRY
- chunk_2 = _ReadRowsResponseCellChunkPB(
- row_key=self.ROW_KEY_2,
- family_name=self.FAMILY_NAME,
- qualifier=self.QUALIFIER,
- timestamp_micros=self.TIMESTAMP_MICROS,
- value=self.VALUE,
- commit_row=True,
- )
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ ctor_kwargs = {}
- response_1 = _ReadRowsResponseV2([chunk_1])
- response_2 = _ReadRowsResponseV2([chunk_2])
- response_failure_iterator_1 = _MockFailureIterator_1()
- response_failure_iterator_2 = _MockFailureIterator_2([response_1])
- response_iterator = _MockReadRowsIterator(response_2)
-
- # Patch the stub used by the API method.
- data_api.table_path.return_value = f"projects/{self.PROJECT_ID}/instances/{self.INSTANCE_ID}/tables/{self.TABLE_ID}"
-
- client._table_data_client.read_rows = mock.Mock(
- side_effect=[
- response_failure_iterator_1,
- response_failure_iterator_2,
- response_iterator,
- ]
- )
+ if mutation_timeout is not None:
+ ctor_kwargs["mutation_timeout"] = mutation_timeout
- table._instance._client._table_data_client = data_api
- table._instance._client._table_admin_client = table_api
- rows = []
- for row in table.read_rows(
- start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2, retry=retry_read_rows
- ):
- rows.append(row)
+ if app_profile_id is not None:
+ ctor_kwargs["app_profile_id"] = app_profile_id
- result = rows[1]
- self.assertEqual(result.row_key, self.ROW_KEY_2)
+ table = _make_table(TABLE_ID, instance, **ctor_kwargs)
- def test_yield_retry_rows(self):
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
+ rows = [mock.MagicMock(), mock.MagicMock()]
+ response = [Status(code=0), Status(code=1)]
+ instance_mock = mock.Mock(return_value=response)
+ klass_mock = mock.patch(
+ "google.cloud.bigtable.table._RetryableMutateRowsWorker",
+ new=mock.MagicMock(return_value=instance_mock),
+ )
- data_api = mock.create_autospec(BigtableClient)
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- client._table_data_client = data_api
- client._table_admin_client = table_api
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_one(self.TABLE_ID, instance)
-
- # Create response_iterator
- chunk_1 = _ReadRowsResponseCellChunkPB(
- row_key=self.ROW_KEY_1,
- family_name=self.FAMILY_NAME,
- qualifier=self.QUALIFIER,
- timestamp_micros=self.TIMESTAMP_MICROS,
- value=self.VALUE,
- commit_row=True,
- )
+ call_kwargs = {}
- chunk_2 = _ReadRowsResponseCellChunkPB(
- row_key=self.ROW_KEY_2,
- family_name=self.FAMILY_NAME,
- qualifier=self.QUALIFIER,
- timestamp_micros=self.TIMESTAMP_MICROS,
- value=self.VALUE,
- commit_row=True,
- )
+ if retry is not None:
+ call_kwargs["retry"] = retry
- response_1 = _ReadRowsResponseV2([chunk_1])
- response_2 = _ReadRowsResponseV2([chunk_2])
- response_failure_iterator_1 = _MockFailureIterator_1()
- response_failure_iterator_2 = _MockFailureIterator_2([response_1])
- response_iterator = _MockReadRowsIterator(response_2)
-
- # Patch the stub used by the API method.
- data_api.table_path.return_value = f"projects/{self.PROJECT_ID}/instances/{self.INSTANCE_ID}/tables/{self.TABLE_ID}"
- table_api.table_path.return_value = f"projects/{self.PROJECT_ID}/instances/{self.INSTANCE_ID}/tables/{self.TABLE_ID}"
-
- table._instance._client._table_data_client = data_api
- table._instance._client._table_admin_client = table_api
- client._table_data_client.read_rows.side_effect = [
- response_failure_iterator_1,
- response_failure_iterator_2,
- response_iterator,
- ]
+ if timeout is not None:
+ expected_timeout = call_kwargs["timeout"] = timeout
+ else:
+ expected_timeout = mutation_timeout
- rows = []
- with warnings.catch_warnings(record=True) as warned:
- for row in table.yield_rows(
- start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2
- ):
- rows.append(row)
+ with klass_mock:
+ statuses = table.mutate_rows(rows, **call_kwargs)
- self.assertEqual(len(warned), 1)
- self.assertIs(warned[0].category, DeprecationWarning)
+ result = [status.code for status in statuses]
+ expected_result = [0, 1]
+ assert result == expected_result
- result = rows[1]
- self.assertEqual(result.row_key, self.ROW_KEY_2)
+ klass_mock.new.assert_called_once_with(
+ client,
+ TABLE_NAME,
+ rows,
+ app_profile_id=app_profile_id,
+ timeout=expected_timeout,
+ )
- def test_yield_rows_with_row_set(self):
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
- from google.cloud.bigtable.row_set import RowSet
- from google.cloud.bigtable.row_set import RowRange
-
- data_api = mock.create_autospec(BigtableClient)
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- client._table_data_client = data_api
- client._table_admin_client = table_api
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_one(self.TABLE_ID, instance)
-
- # Create response_iterator
- chunk_1 = _ReadRowsResponseCellChunkPB(
- row_key=self.ROW_KEY_1,
- family_name=self.FAMILY_NAME,
- qualifier=self.QUALIFIER,
- timestamp_micros=self.TIMESTAMP_MICROS,
- value=self.VALUE,
- commit_row=True,
- )
+ if retry is not None:
+ instance_mock.assert_called_once_with(retry=retry)
+ else:
+ instance_mock.assert_called_once_with(retry=DEFAULT_RETRY)
- chunk_2 = _ReadRowsResponseCellChunkPB(
- row_key=self.ROW_KEY_2,
- family_name=self.FAMILY_NAME,
- qualifier=self.QUALIFIER,
- timestamp_micros=self.TIMESTAMP_MICROS,
- value=self.VALUE,
- commit_row=True,
- )
- chunk_3 = _ReadRowsResponseCellChunkPB(
- row_key=self.ROW_KEY_3,
- family_name=self.FAMILY_NAME,
- qualifier=self.QUALIFIER,
- timestamp_micros=self.TIMESTAMP_MICROS,
- value=self.VALUE,
- commit_row=True,
- )
+def test_table_mutate_rows_w_default_mutation_timeout_app_profile_id():
+ _table_mutate_rows_helper()
- response_1 = _ReadRowsResponseV2([chunk_1])
- response_2 = _ReadRowsResponseV2([chunk_2])
- response_3 = _ReadRowsResponseV2([chunk_3])
- response_iterator = _MockReadRowsIterator(response_1, response_2, response_3)
- # Patch the stub used by the API method.
- data_api.table_path.return_value = f"projects/{self.PROJECT_ID}/instances/{self.INSTANCE_ID}/tables/{self.TABLE_ID}"
- table_api.table_path.return_value = f"projects/{self.PROJECT_ID}/instances/{self.INSTANCE_ID}/tables/{self.TABLE_ID}"
+def test_table_mutate_rows_w_mutation_timeout():
+ mutation_timeout = 123
+ _table_mutate_rows_helper(mutation_timeout=mutation_timeout)
- table._instance._client._table_data_client = data_api
- table._instance._client._table_admin_client = table_api
- client._table_data_client.read_rows.side_effect = [response_iterator]
- rows = []
- row_set = RowSet()
- row_set.add_row_range(
- RowRange(start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2)
- )
- row_set.add_row_key(self.ROW_KEY_3)
+def test_table_mutate_rows_w_app_profile_id():
+ app_profile_id = "profile-123"
+ _table_mutate_rows_helper(app_profile_id=app_profile_id)
- with warnings.catch_warnings(record=True) as warned:
- for row in table.yield_rows(row_set=row_set):
- rows.append(row)
- self.assertEqual(len(warned), 1)
- self.assertIs(warned[0].category, DeprecationWarning)
+def test_table_mutate_rows_w_retry():
+ retry = mock.Mock()
+ _table_mutate_rows_helper(retry=retry)
- self.assertEqual(rows[0].row_key, self.ROW_KEY_1)
- self.assertEqual(rows[1].row_key, self.ROW_KEY_2)
- self.assertEqual(rows[2].row_key, self.ROW_KEY_3)
- def test_sample_row_keys(self):
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
+def test_table_mutate_rows_w_timeout_arg():
+ timeout = 123
+ _table_mutate_rows_helper(timeout=timeout)
- data_api = mock.create_autospec(BigtableClient)
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- client._table_data_client = data_api
- client._table_admin_client = table_api
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_one(self.TABLE_ID, instance)
- # Create response_iterator
- response_iterator = object() # Just passed to a mock.
+def test_table_mutate_rows_w_mutation_timeout_and_timeout_arg():
+ mutation_timeout = 123
+ timeout = 456
+ _table_mutate_rows_helper(mutation_timeout=mutation_timeout, timeout=timeout)
- # Patch the stub used by the API method.
- client._table_data_client.sample_row_keys.side_effect = [[response_iterator]]
- # Create expected_result.
- expected_result = response_iterator
+def test_table_read_rows():
+ from google.cloud._testing import _Monkey
+ from google.cloud.bigtable.row_data import PartialRowsData
+ from google.cloud.bigtable import table as MUT
+ from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS
- # Perform the method and check the result.
- result = table.sample_row_keys()
- self.assertEqual(result[0], expected_result)
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ data_api = client._table_data_client = _make_data_api()
+ instance = client.instance(instance_id=INSTANCE_ID)
+ app_profile_id = "app-profile-id"
+ table = _make_table(TABLE_ID, instance, app_profile_id=app_profile_id)
- def test_truncate(self):
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
+ # Create request_pb
+ request_pb = object() # Returned by our mock.
+ retry = DEFAULT_RETRY_READ_ROWS
+ mock_created = []
- data_api = mock.create_autospec(BigtableClient)
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- client._table_data_client = data_api
- client._table_admin_client = table_api
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_one(self.TABLE_ID, instance)
+ def mock_create_row_request(table_name, **kwargs):
+ mock_created.append((table_name, kwargs))
+ return request_pb
- expected_result = None # truncate() has no return value.
- with mock.patch("google.cloud.bigtable.table.Table.name", new=self.TABLE_NAME):
- result = table.truncate()
+ # Create expected_result.
+ expected_result = PartialRowsData(
+ client._table_data_client.transport.read_rows, request_pb, retry
+ )
- table_api.drop_row_range.assert_called_once_with(
- request={"name": self.TABLE_NAME, "delete_all_data_from_table": True}
- )
+ # Perform the method and check the result.
+ start_key = b"start-key"
+ end_key = b"end-key"
+ filter_obj = object()
+ limit = 22
+ with _Monkey(MUT, _create_row_request=mock_create_row_request):
+ result = table.read_rows(
+ start_key=start_key,
+ end_key=end_key,
+ filter_=filter_obj,
+ limit=limit,
+ retry=retry,
+ )
+
+ assert result.rows == expected_result.rows
+ assert result.retry == expected_result.retry
+ created_kwargs = {
+ "start_key": start_key,
+ "end_key": end_key,
+ "filter_": filter_obj,
+ "limit": limit,
+ "end_inclusive": False,
+ "app_profile_id": app_profile_id,
+ "row_set": None,
+ }
+ assert mock_created == [(table.name, created_kwargs)]
+
+ data_api.read_rows.assert_called_once_with(request_pb, timeout=61.0)
+
+
+def test_table_read_retry_rows():
+ from google.api_core import retry
+ from google.cloud.bigtable.table import _create_row_request
+
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ data_api = client._table_data_client = _make_data_api()
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
+
+ retry_read_rows = retry.Retry(predicate=_read_rows_retry_exception)
+
+ # Create response_iterator
+ chunk_1 = _ReadRowsResponseCellChunkPB(
+ row_key=ROW_KEY_1,
+ family_name=FAMILY_NAME,
+ qualifier=QUALIFIER,
+ timestamp_micros=TIMESTAMP_MICROS,
+ value=VALUE,
+ commit_row=True,
+ )
- self.assertEqual(result, expected_result)
+ chunk_2 = _ReadRowsResponseCellChunkPB(
+ row_key=ROW_KEY_2,
+ family_name=FAMILY_NAME,
+ qualifier=QUALIFIER,
+ timestamp_micros=TIMESTAMP_MICROS,
+ value=VALUE,
+ commit_row=True,
+ )
- def test_truncate_w_timeout(self):
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
+ response_1 = _ReadRowsResponseV2([chunk_1])
+ response_2 = _ReadRowsResponseV2([chunk_2])
+ response_failure_iterator_1 = _MockFailureIterator_1()
+ response_failure_iterator_2 = _MockFailureIterator_2([response_1])
+ response_iterator = _MockReadRowsIterator(response_2)
- data_api = mock.create_autospec(BigtableClient)
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
+ data_api.table_path.return_value = (
+ f"projects/{PROJECT_ID}/instances/{INSTANCE_ID}/tables/{TABLE_ID}"
+ )
+
+ data_api.read_rows.side_effect = [
+ response_failure_iterator_1,
+ response_failure_iterator_2,
+ response_iterator,
+ ]
+
+ rows = [
+ row
+ for row in table.read_rows(
+ start_key=ROW_KEY_1, end_key=ROW_KEY_2, retry=retry_read_rows
)
- client._table_data_client = data_api
- client._table_admin_client = table_api
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_one(self.TABLE_ID, instance)
+ ]
- expected_result = None # truncate() has no return value.
+ result = rows[1]
+ assert result.row_key == ROW_KEY_2
- timeout = 120
- result = table.truncate(timeout=timeout)
+ expected_request = _create_row_request(
+ table.name, start_key=ROW_KEY_1, end_key=ROW_KEY_2,
+ )
+ data_api.read_rows.mock_calls = [expected_request] * 3
- self.assertEqual(result, expected_result)
- def test_drop_by_prefix(self):
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
+def test_table_yield_retry_rows():
+ from google.cloud.bigtable.table import _create_row_request
- data_api = mock.create_autospec(BigtableClient)
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- client._table_data_client = data_api
- client._table_admin_client = table_api
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_one(self.TABLE_ID, instance)
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
+
+ # Create response_iterator
+ chunk_1 = _ReadRowsResponseCellChunkPB(
+ row_key=ROW_KEY_1,
+ family_name=FAMILY_NAME,
+ qualifier=QUALIFIER,
+ timestamp_micros=TIMESTAMP_MICROS,
+ value=VALUE,
+ commit_row=True,
+ )
+
+ chunk_2 = _ReadRowsResponseCellChunkPB(
+ row_key=ROW_KEY_2,
+ family_name=FAMILY_NAME,
+ qualifier=QUALIFIER,
+ timestamp_micros=TIMESTAMP_MICROS,
+ value=VALUE,
+ commit_row=True,
+ )
- expected_result = None # drop_by_prefix() has no return value.
+ response_1 = _ReadRowsResponseV2([chunk_1])
+ response_2 = _ReadRowsResponseV2([chunk_2])
+ response_failure_iterator_1 = _MockFailureIterator_1()
+ response_failure_iterator_2 = _MockFailureIterator_2([response_1])
+ response_iterator = _MockReadRowsIterator(response_2)
- row_key_prefix = "row-key-prefix"
+ data_api = client._table_data_client = _make_data_api()
+ data_api.table_path.return_value = (
+ f"projects/{PROJECT_ID}/instances/{INSTANCE_ID}/tables/{TABLE_ID}"
+ )
+ data_api.read_rows.side_effect = [
+ response_failure_iterator_1,
+ response_failure_iterator_2,
+ response_iterator,
+ ]
+
+ rows = []
+ with warnings.catch_warnings(record=True) as warned:
+ for row in table.yield_rows(start_key=ROW_KEY_1, end_key=ROW_KEY_2):
+ rows.append(row)
- result = table.drop_by_prefix(row_key_prefix=row_key_prefix)
+ assert len(warned) == 1
+ assert warned[0].category is DeprecationWarning
- self.assertEqual(result, expected_result)
+ result = rows[1]
+ assert result.row_key == ROW_KEY_2
- def test_drop_by_prefix_w_timeout(self):
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
+ expected_request = _create_row_request(
+ table.name, start_key=ROW_KEY_1, end_key=ROW_KEY_2,
+ )
+ data_api.read_rows.mock_calls = [expected_request] * 3
+
+
+def test_table_yield_rows_with_row_set():
+ from google.cloud.bigtable.row_set import RowSet
+ from google.cloud.bigtable.row_set import RowRange
+ from google.cloud.bigtable.table import _create_row_request
+
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
+
+ # Create response_iterator
+ chunk_1 = _ReadRowsResponseCellChunkPB(
+ row_key=ROW_KEY_1,
+ family_name=FAMILY_NAME,
+ qualifier=QUALIFIER,
+ timestamp_micros=TIMESTAMP_MICROS,
+ value=VALUE,
+ commit_row=True,
+ )
- data_api = mock.create_autospec(BigtableClient)
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- client._table_data_client = data_api
- client._table_admin_client = table_api
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_one(self.TABLE_ID, instance)
+ chunk_2 = _ReadRowsResponseCellChunkPB(
+ row_key=ROW_KEY_2,
+ family_name=FAMILY_NAME,
+ qualifier=QUALIFIER,
+ timestamp_micros=TIMESTAMP_MICROS,
+ value=VALUE,
+ commit_row=True,
+ )
- expected_result = None # drop_by_prefix() has no return value.
+ chunk_3 = _ReadRowsResponseCellChunkPB(
+ row_key=ROW_KEY_3,
+ family_name=FAMILY_NAME,
+ qualifier=QUALIFIER,
+ timestamp_micros=TIMESTAMP_MICROS,
+ value=VALUE,
+ commit_row=True,
+ )
- row_key_prefix = "row-key-prefix"
+ response_1 = _ReadRowsResponseV2([chunk_1])
+ response_2 = _ReadRowsResponseV2([chunk_2])
+ response_3 = _ReadRowsResponseV2([chunk_3])
+ response_iterator = _MockReadRowsIterator(response_1, response_2, response_3)
- timeout = 120
- result = table.drop_by_prefix(row_key_prefix=row_key_prefix, timeout=timeout)
+ data_api = client._table_data_client = _make_data_api()
+ data_api.table_path.return_value = (
+ f"projects/{PROJECT_ID}/instances/{INSTANCE_ID}/tables/{TABLE_ID}"
+ )
+ data_api.read_rows.side_effect = [response_iterator]
- self.assertEqual(result, expected_result)
+ rows = []
+ row_set = RowSet()
+ row_set.add_row_range(RowRange(start_key=ROW_KEY_1, end_key=ROW_KEY_2))
+ row_set.add_row_key(ROW_KEY_3)
- def test_mutations_batcher_factory(self):
- flush_count = 100
- max_row_bytes = 1000
- table = self._make_one(self.TABLE_ID, None)
- mutation_batcher = table.mutations_batcher(
- flush_count=flush_count, max_row_bytes=max_row_bytes
- )
+ with warnings.catch_warnings(record=True) as warned:
+ for row in table.yield_rows(row_set=row_set):
+ rows.append(row)
- self.assertEqual(mutation_batcher.table.table_id, self.TABLE_ID)
- self.assertEqual(mutation_batcher.flush_count, flush_count)
- self.assertEqual(mutation_batcher.max_row_bytes, max_row_bytes)
+ assert len(warned) == 1
+ assert warned[0].category is DeprecationWarning
- def test_get_iam_policy(self):
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
- from google.iam.v1 import policy_pb2
- from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
+ assert rows[0].row_key == ROW_KEY_1
+ assert rows[1].row_key == ROW_KEY_2
+ assert rows[2].row_key == ROW_KEY_3
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_one(self.TABLE_ID, instance)
+ expected_request = _create_row_request(
+ table.name, start_key=ROW_KEY_1, end_key=ROW_KEY_2,
+ )
+ expected_request.rows.row_keys.append(ROW_KEY_3)
+ data_api.read_rows.assert_called_once_with(expected_request, timeout=61.0)
- version = 1
- etag = b"etag_v1"
- members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
- bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}]
- iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
- client._table_admin_client = table_api
- table_api.get_iam_policy.return_value = iam_policy
+def test_table_sample_row_keys():
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
+ response_iterator = object()
- result = table.get_iam_policy()
+ data_api = client._table_data_client = _make_data_api()
+ data_api.sample_row_keys.return_value = [response_iterator]
- table_api.get_iam_policy.assert_called_once_with(
- request={"resource": table.name}
- )
- self.assertEqual(result.version, version)
- self.assertEqual(result.etag, etag)
- admins = result.bigtable_admins
- self.assertEqual(len(admins), len(members))
- for found, expected in zip(sorted(admins), sorted(members)):
- self.assertEqual(found, expected)
-
- def test_set_iam_policy(self):
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
- from google.iam.v1 import policy_pb2
- from google.cloud.bigtable.policy import Policy
- from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
+ result = table.sample_row_keys()
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_one(self.TABLE_ID, instance)
-
- version = 1
- etag = b"etag_v1"
- members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
- bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}]
- iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
-
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
- client._table_admin_client = table_api
- table_api.set_iam_policy.return_value = iam_policy_pb
-
- iam_policy = Policy(etag=etag, version=version)
- iam_policy[BIGTABLE_ADMIN_ROLE] = [
- Policy.user("user1@test.com"),
- Policy.service_account("service_acc1@test.com"),
- ]
+ assert result[0] == response_iterator
- result = table.set_iam_policy(iam_policy)
- table_api.set_iam_policy.assert_called_once_with(
- request={"resource": table.name, "policy": iam_policy_pb}
- )
- self.assertEqual(result.version, version)
- self.assertEqual(result.etag, etag)
- admins = result.bigtable_admins
- self.assertEqual(len(admins), len(members))
- for found, expected in zip(sorted(admins), sorted(members)):
- self.assertEqual(found, expected)
-
- def test_test_iam_permissions(self):
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
- from google.iam.v1 import iam_policy_pb2
+def test_table_truncate():
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
+ table_api = client._table_admin_client = _make_table_api()
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_one(self.TABLE_ID, instance)
+ with mock.patch("google.cloud.bigtable.table.Table.name", new=TABLE_NAME):
+ result = table.truncate()
+
+ assert result is None
- permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"]
+ table_api.drop_row_range.assert_called_once_with(
+ request={"name": TABLE_NAME, "delete_all_data_from_table": True}
+ )
- response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions)
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
- table_api.test_iam_permissions.return_value = response
- client._table_admin_client = table_api
+def test_table_truncate_w_timeout():
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
+ table_api = client._table_admin_client = _make_table_api()
- result = table.test_iam_permissions(permissions)
+ timeout = 120
+ result = table.truncate(timeout=timeout)
- self.assertEqual(result, permissions)
- table_api.test_iam_permissions.assert_called_once_with(
- request={"resource": table.name, "permissions": permissions}
- )
+ assert result is None
- def test_backup_factory_defaults(self):
- from google.cloud.bigtable.backup import Backup
-
- instance = self._make_one(self.INSTANCE_ID, None)
- table = self._make_one(self.TABLE_ID, instance)
- backup = table.backup(self.BACKUP_ID)
-
- self.assertIsInstance(backup, Backup)
- self.assertEqual(backup.backup_id, self.BACKUP_ID)
- self.assertIs(backup._instance, instance)
- self.assertIsNone(backup._cluster)
- self.assertEqual(backup.table_id, self.TABLE_ID)
- self.assertIsNone(backup._expire_time)
-
- self.assertIsNone(backup._parent)
- self.assertIsNone(backup._source_table)
- self.assertIsNone(backup._start_time)
- self.assertIsNone(backup._end_time)
- self.assertIsNone(backup._size_bytes)
- self.assertIsNone(backup._state)
-
- def test_backup_factory_non_defaults(self):
- import datetime
- from google.cloud._helpers import UTC
- from google.cloud.bigtable.backup import Backup
-
- instance = self._make_one(self.INSTANCE_ID, None)
- table = self._make_one(self.TABLE_ID, instance)
- timestamp = datetime.datetime.utcnow().replace(tzinfo=UTC)
- backup = table.backup(
- self.BACKUP_ID, cluster_id=self.CLUSTER_ID, expire_time=timestamp,
- )
+ table_api.drop_row_range.assert_called_once_with(
+ request={"name": TABLE_NAME, "delete_all_data_from_table": True}, timeout=120,
+ )
- self.assertIsInstance(backup, Backup)
- self.assertEqual(backup.backup_id, self.BACKUP_ID)
- self.assertIs(backup._instance, instance)
-
- self.assertEqual(backup.backup_id, self.BACKUP_ID)
- self.assertIs(backup._cluster, self.CLUSTER_ID)
- self.assertEqual(backup.table_id, self.TABLE_ID)
- self.assertEqual(backup._expire_time, timestamp)
- self.assertIsNone(backup._start_time)
- self.assertIsNone(backup._end_time)
- self.assertIsNone(backup._size_bytes)
- self.assertIsNone(backup._state)
-
- def _list_backups_helper(self, cluster_id=None, filter_=None, **kwargs):
- from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import (
- BigtableInstanceAdminClient,
- )
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- BigtableTableAdminClient,
- )
- from google.cloud.bigtable_admin_v2.types import (
- bigtable_table_admin,
- Backup as backup_pb,
- )
- from google.cloud.bigtable.backup import Backup
- instance_api = mock.create_autospec(BigtableInstanceAdminClient)
- table_api = mock.create_autospec(BigtableTableAdminClient)
- client = self._make_client(
- project=self.PROJECT_ID, credentials=_make_credentials(), admin=True
- )
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_one(self.TABLE_ID, instance)
-
- client._instance_admin_client = instance_api
- client._table_admin_client = table_api
- table._instance._client._instance_admin_client = instance_api
- table._instance._client._table_admin_client = table_api
-
- parent = self.INSTANCE_NAME + "/clusters/cluster"
- backups_pb = bigtable_table_admin.ListBackupsResponse(
- backups=[
- backup_pb(name=parent + "/backups/op1"),
- backup_pb(name=parent + "/backups/op2"),
- backup_pb(name=parent + "/backups/op3"),
- ]
- )
+def test_table_drop_by_prefix():
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
+ table_api = client._table_admin_client = _make_table_api()
- table_api.list_backups.return_value = backups_pb
- api = table._instance._client._table_admin_client.list_backups
+ row_key_prefix = b"row-key-prefix"
- backups_filter = "source_table:{}".format(self.TABLE_NAME)
- if filter_:
- backups_filter = "({}) AND ({})".format(backups_filter, filter_)
+ result = table.drop_by_prefix(row_key_prefix=row_key_prefix)
- backups = table.list_backups(cluster_id=cluster_id, filter_=filter_, **kwargs)
+ assert result is None
- for backup in backups:
- self.assertIsInstance(backup, Backup)
+ table_api.drop_row_range.assert_called_once_with(
+ request={"name": TABLE_NAME, "row_key_prefix": row_key_prefix},
+ )
- if not cluster_id:
- cluster_id = "-"
- parent = "{}/clusters/{}".format(self.INSTANCE_NAME, cluster_id)
- order_by = None
- page_size = 0
- if "order_by" in kwargs:
- order_by = kwargs["order_by"]
+def test_table_drop_by_prefix_w_timeout():
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
+ table_api = client._table_admin_client = _make_table_api()
- if "page_size" in kwargs:
- page_size = kwargs["page_size"]
+ row_key_prefix = b"row-key-prefix"
- api.assert_called_once_with(
- request={
- "parent": parent,
- "filter": backups_filter,
- "order_by": order_by,
- "page_size": page_size,
- }
- )
+ timeout = 120
+ result = table.drop_by_prefix(row_key_prefix=row_key_prefix, timeout=timeout)
- def test_list_backups_defaults(self):
- self._list_backups_helper()
+ assert result is None
- def test_list_backups_w_options(self):
- self._list_backups_helper(
- cluster_id="cluster", filter_="filter", order_by="order_by", page_size=10
- )
+ table_api.drop_row_range.assert_called_once_with(
+ request={"name": TABLE_NAME, "row_key_prefix": row_key_prefix}, timeout=120,
+ )
- def _restore_helper(self, backup_name=None):
- from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient
- from google.cloud.bigtable.instance import Instance
- op_future = object()
- credentials = _make_credentials()
- client = self._make_client(
- project=self.PROJECT_ID, credentials=credentials, admin=True
- )
+def test_table_mutations_batcher_factory():
+ flush_count = 100
+ max_row_bytes = 1000
+ table = _make_table(TABLE_ID, None)
+ mutation_batcher = table.mutations_batcher(
+ flush_count=flush_count, max_row_bytes=max_row_bytes
+ )
- instance = Instance(self.INSTANCE_ID, client=client)
- table = self._make_one(self.TABLE_ID, instance)
+ assert mutation_batcher.table.table_id == TABLE_ID
+ assert mutation_batcher.flush_count == flush_count
+ assert mutation_batcher.max_row_bytes == max_row_bytes
- api = client._table_admin_client = mock.create_autospec(
- BigtableTableAdminClient
- )
- api.restore_table.return_value = op_future
- table._instance._client._table_admin_client = api
+def test_table_get_iam_policy():
+ from google.iam.v1 import policy_pb2
+ from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
- if backup_name:
- future = table.restore(self.TABLE_ID, backup_name=self.BACKUP_NAME)
- else:
- future = table.restore(self.TABLE_ID, self.CLUSTER_ID, self.BACKUP_ID)
- self.assertIs(future, op_future)
-
- api.restore_table.assert_called_once_with(
- request={
- "parent": self.INSTANCE_NAME,
- "table_id": self.TABLE_ID,
- "backup": self.BACKUP_NAME,
- }
- )
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
+
+ version = 1
+ etag = b"etag_v1"
+ members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
+ bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}]
+ iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
+
+ table_api = client._table_admin_client = _make_table_api()
+ table_api.get_iam_policy.return_value = iam_policy
- def test_restore_table_w_backup_id(self):
- self._restore_helper()
+ result = table.get_iam_policy()
- def test_restore_table_w_backup_name(self):
- self._restore_helper(backup_name=self.BACKUP_NAME)
+ assert result.version == version
+ assert result.etag == etag
+ admins = result.bigtable_admins
+ assert len(admins) == len(members)
+ for found, expected in zip(sorted(admins), sorted(members)):
+ assert found == expected
-class Test__RetryableMutateRowsWorker(unittest.TestCase):
- from grpc import StatusCode
+ table_api.get_iam_policy.assert_called_once_with(request={"resource": table.name})
- PROJECT_ID = "project-id"
- INSTANCE_ID = "instance-id"
- INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
- TABLE_ID = "table-id"
- # RPC Status Codes
- SUCCESS = StatusCode.OK.value[0]
- RETRYABLE_1 = StatusCode.DEADLINE_EXCEEDED.value[0]
- RETRYABLE_2 = StatusCode.ABORTED.value[0]
- RETRYABLE_3 = StatusCode.UNAVAILABLE.value[0]
- RETRYABLES = (RETRYABLE_1, RETRYABLE_2, RETRYABLE_3)
- NON_RETRYABLE = StatusCode.CANCELLED.value[0]
+def test_table_set_iam_policy():
+ from google.iam.v1 import policy_pb2
+ from google.cloud.bigtable.policy import Policy
+ from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
- @staticmethod
- def _get_target_class_for_worker():
- from google.cloud.bigtable.table import _RetryableMutateRowsWorker
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
- return _RetryableMutateRowsWorker
+ version = 1
+ etag = b"etag_v1"
+ members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
+ bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}]
+ iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
- def _make_worker(self, *args, **kwargs):
- return self._get_target_class_for_worker()(*args, **kwargs)
+ table_api = client._table_admin_client = _make_table_api()
+ table_api.set_iam_policy.return_value = iam_policy_pb
- @staticmethod
- def _get_target_class_for_table():
- from google.cloud.bigtable.table import Table
+ iam_policy = Policy(etag=etag, version=version)
+ iam_policy[BIGTABLE_ADMIN_ROLE] = [
+ Policy.user("user1@test.com"),
+ Policy.service_account("service_acc1@test.com"),
+ ]
- return Table
+ result = table.set_iam_policy(iam_policy)
- def _make_table(self, *args, **kwargs):
- return self._get_target_class_for_table()(*args, **kwargs)
+ assert result.version == version
+ assert result.etag == etag
+ admins = result.bigtable_admins
+ assert len(admins) == len(members)
- @staticmethod
- def _get_target_client_class():
- from google.cloud.bigtable.client import Client
+ for found, expected in zip(sorted(admins), sorted(members)):
+ assert found == expected
+
+ table_api.set_iam_policy.assert_called_once_with(
+ request={"resource": table.name, "policy": iam_policy_pb}
+ )
- return Client
- def _make_client(self, *args, **kwargs):
- return self._get_target_client_class()(*args, **kwargs)
+def test_table_test_iam_permissions():
+ from google.iam.v1 import iam_policy_pb2
- def _make_responses_statuses(self, codes):
- from google.rpc.status_pb2 import Status
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
- response = [Status(code=code) for code in codes]
- return response
+ permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"]
- def _make_responses(self, codes):
- from google.cloud.bigtable_v2.types.bigtable import MutateRowsResponse
- from google.rpc.status_pb2 import Status
+ response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions)
- entries = [
- MutateRowsResponse.Entry(index=i, status=Status(code=codes[i]))
- for i in range(len(codes))
+ table_api = client._table_admin_client = _make_table_api()
+ table_api.test_iam_permissions.return_value = response
+
+ result = table.test_iam_permissions(permissions)
+
+ assert result == permissions
+
+ table_api.test_iam_permissions.assert_called_once_with(
+ request={"resource": table.name, "permissions": permissions}
+ )
+
+
+def test_table_backup_factory_defaults():
+ from google.cloud.bigtable.backup import Backup
+
+ instance = _make_table(INSTANCE_ID, None)
+ table = _make_table(TABLE_ID, instance)
+ backup = table.backup(BACKUP_ID)
+
+ assert isinstance(backup, Backup)
+ assert backup.backup_id == BACKUP_ID
+ assert backup._instance is instance
+ assert backup._cluster is None
+ assert backup.table_id == TABLE_ID
+ assert backup._expire_time is None
+
+ assert backup._parent is None
+ assert backup._source_table is None
+ assert backup._start_time is None
+ assert backup._end_time is None
+ assert backup._size_bytes is None
+ assert backup._state is None
+
+
+def test_table_backup_factory_non_defaults():
+ import datetime
+ from google.cloud._helpers import UTC
+ from google.cloud.bigtable.backup import Backup
+ from google.cloud.bigtable.instance import Instance
+
+ instance = Instance(INSTANCE_ID, None)
+ table = _make_table(TABLE_ID, instance)
+ timestamp = datetime.datetime.utcnow().replace(tzinfo=UTC)
+ backup = table.backup(BACKUP_ID, cluster_id=CLUSTER_ID, expire_time=timestamp,)
+
+ assert isinstance(backup, Backup)
+ assert backup.backup_id == BACKUP_ID
+ assert backup._instance is instance
+
+ assert backup.backup_id == BACKUP_ID
+ assert backup._cluster is CLUSTER_ID
+ assert backup.table_id == TABLE_ID
+ assert backup._expire_time == timestamp
+ assert backup._start_time is None
+ assert backup._end_time is None
+ assert backup._size_bytes is None
+ assert backup._state is None
+
+
+def _table_list_backups_helper(cluster_id=None, filter_=None, **kwargs):
+ from google.cloud.bigtable_admin_v2.types import (
+ Backup as backup_pb,
+ bigtable_table_admin,
+ )
+ from google.cloud.bigtable.backup import Backup
+
+ client = _make_client(
+ project=PROJECT_ID, credentials=_make_credentials(), admin=True
+ )
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
+
+ parent = INSTANCE_NAME + "/clusters/cluster"
+ backups_pb = bigtable_table_admin.ListBackupsResponse(
+ backups=[
+ backup_pb(name=parent + "/backups/op1"),
+ backup_pb(name=parent + "/backups/op2"),
+ backup_pb(name=parent + "/backups/op3"),
]
- return MutateRowsResponse(entries=entries)
+ )
- def test_callable_empty_rows(self):
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
+ table_api = client._table_admin_client = _make_table_api()
+ table_api.list_backups.return_value = backups_pb
- data_api = mock.create_autospec(BigtableClient)
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- client._table_data_client = data_api
- client._table_admin_client = table_api
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_table(self.TABLE_ID, instance)
+ backups_filter = "source_table:{}".format(TABLE_NAME)
+ if filter_:
+ backups_filter = "({}) AND ({})".format(backups_filter, filter_)
- worker = self._make_worker(client, table.name, [])
- statuses = worker()
+ backups = table.list_backups(cluster_id=cluster_id, filter_=filter_, **kwargs)
- self.assertEqual(len(statuses), 0)
+ for backup in backups:
+ assert isinstance(backup, Backup)
- def test_callable_no_retry_strategy(self):
- from google.cloud.bigtable.row import DirectRow
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
+ if not cluster_id:
+ cluster_id = "-"
+ parent = "{}/clusters/{}".format(INSTANCE_NAME, cluster_id)
- # Setup:
- # - Mutate 3 rows.
- # Action:
- # - Attempt to mutate the rows w/o any retry strategy.
- # Expectation:
- # - Since no retry, should return statuses as they come back.
- # - Even if there are retryable errors, no retry attempt is made.
- # - State of responses_statuses should be
- # [success, retryable, non-retryable]
-
- data_api = mock.create_autospec(BigtableClient)
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
-
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- client._table_data_client = data_api
- client._table_admin_client = table_api
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_table(self.TABLE_ID, instance)
+ order_by = None
+ page_size = 0
+ if "order_by" in kwargs:
+ order_by = kwargs["order_by"]
- row_1 = DirectRow(row_key=b"row_key", table=table)
- row_1.set_cell("cf", b"col", b"value1")
- row_2 = DirectRow(row_key=b"row_key_2", table=table)
- row_2.set_cell("cf", b"col", b"value2")
- row_3 = DirectRow(row_key=b"row_key_3", table=table)
- row_3.set_cell("cf", b"col", b"value3")
+ if "page_size" in kwargs:
+ page_size = kwargs["page_size"]
- response_codes = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
- response = self._make_responses(response_codes)
- data_api.mutate_rows = mock.MagicMock(return_value=[response])
+ table_api.list_backups.assert_called_once_with(
+ request={
+ "parent": parent,
+ "filter": backups_filter,
+ "order_by": order_by,
+ "page_size": page_size,
+ }
+ )
- table._instance._client._table_data_client = data_api
- table._instance._client._table_admin_client = table_api
- table._instance._client._table_data_client.mutate_rows.return_value = [response]
+def test_table_list_backups_defaults():
+ _table_list_backups_helper()
- worker = self._make_worker(client, table.name, [row_1, row_2, row_3])
- statuses = worker(retry=None)
- result = [status.code for status in statuses]
- self.assertEqual(result, response_codes)
+def test_table_list_backups_w_options():
+ _table_list_backups_helper(
+ cluster_id="cluster", filter_="filter", order_by="order_by", page_size=10
+ )
- data_api.mutate_rows.assert_called_once()
- def test_callable_retry(self):
- from google.cloud.bigtable.row import DirectRow
- from google.cloud.bigtable.table import DEFAULT_RETRY
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
- from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import (
- client as bigtable_table_admin,
- )
+def _table_restore_helper(backup_name=None):
+ from google.cloud.bigtable.instance import Instance
- # Setup:
- # - Mutate 3 rows.
- # Action:
- # - Initial attempt will mutate all 3 rows.
- # Expectation:
- # - First attempt will result in one retryable error.
- # - Second attempt will result in success for the retry-ed row.
- # - Check MutateRows is called twice.
- # - State of responses_statuses should be
- # [success, success, non-retryable]
-
- data_api = mock.create_autospec(BigtableClient)
- table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient)
-
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- client._table_data_client = data_api
- client._table_admin_client = table_api
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_table(self.TABLE_ID, instance)
- row_1 = DirectRow(row_key=b"row_key", table=table)
- row_1.set_cell("cf", b"col", b"value1")
- row_2 = DirectRow(row_key=b"row_key_2", table=table)
- row_2.set_cell("cf", b"col", b"value2")
- row_3 = DirectRow(row_key=b"row_key_3", table=table)
- row_3.set_cell("cf", b"col", b"value3")
-
- response_1 = self._make_responses(
- [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
- )
- response_2 = self._make_responses([self.SUCCESS])
+ op_future = object()
+ credentials = _make_credentials()
+ client = _make_client(project=PROJECT_ID, credentials=credentials, admin=True)
- # Patch the stub used by the API method.
- client._table_data_client.mutate_rows.side_effect = [[response_1], [response_2]]
- table._instance._client._table_data_client = data_api
- table._instance._client._table_admin_client = table_api
+ instance = Instance(INSTANCE_ID, client=client)
+ table = _make_table(TABLE_ID, instance)
- retry = DEFAULT_RETRY.with_delay(initial=0.1)
- worker = self._make_worker(client, table.name, [row_1, row_2, row_3])
- statuses = worker(retry=retry)
+ table_api = client._table_admin_client = _make_table_api()
+ table_api.restore_table.return_value = op_future
- result = [status.code for status in statuses]
- expected_result = [self.SUCCESS, self.SUCCESS, self.NON_RETRYABLE]
+ if backup_name:
+ future = table.restore(TABLE_ID, backup_name=BACKUP_NAME)
+ else:
+ future = table.restore(TABLE_ID, CLUSTER_ID, BACKUP_ID)
- self.assertEqual(client._table_data_client.mutate_rows.call_count, 2)
- self.assertEqual(result, expected_result)
+ assert future is op_future
- def _do_mutate_retryable_rows_helper(
- self,
- row_cells,
- responses,
- prior_statuses=None,
- expected_result=None,
- raising_retry=False,
- retryable_error=False,
- timeout=None,
- ):
- from google.api_core.exceptions import ServiceUnavailable
- from google.cloud.bigtable.row import DirectRow
- from google.cloud.bigtable.table import _BigtableRetryableError
- from google.cloud.bigtable_v2.services.bigtable import BigtableClient
- from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2
-
- # Setup:
- # - Mutate 2 rows.
- # Action:
- # - Initial attempt will mutate all 2 rows.
- # Expectation:
- # - Expect [success, non-retryable]
-
- data_api = mock.create_autospec(BigtableClient)
-
- credentials = _make_credentials()
- client = self._make_client(
- project="project-id", credentials=credentials, admin=True
- )
- client._table_data_client = data_api
- instance = client.instance(instance_id=self.INSTANCE_ID)
- table = self._make_table(self.TABLE_ID, instance)
-
- rows = []
- for row_key, cell_data in row_cells:
- row = DirectRow(row_key=row_key, table=table)
- row.set_cell(*cell_data)
- rows.append(row)
+ expected_request = {
+ "parent": INSTANCE_NAME,
+ "table_id": TABLE_ID,
+ "backup": BACKUP_NAME,
+ }
+ table_api.restore_table.assert_called_once_with(request=expected_request)
- response = self._make_responses(responses)
- if retryable_error:
- data_api.mutate_rows.side_effect = ServiceUnavailable("testing")
- else:
- data_api.mutate_rows.side_effect = [[response]]
+def test_table_restore_table_w_backup_id():
+ _table_restore_helper()
- worker = self._make_worker(client, table.name, rows=rows)
- if prior_statuses is not None:
- assert len(prior_statuses) == len(rows)
- worker.responses_statuses = self._make_responses_statuses(prior_statuses)
- expected_entries = []
- for row, prior_status in zip(rows, worker.responses_statuses):
+def test_table_restore_table_w_backup_name():
+ _table_restore_helper(backup_name=BACKUP_NAME)
- if prior_status is None or prior_status.code in self.RETRYABLES:
- mutations = row._get_mutations().copy() # row clears on success
- entry = data_messages_v2_pb2.MutateRowsRequest.Entry(
- row_key=row.row_key, mutations=mutations,
- )
- expected_entries.append(entry)
- expected_kwargs = {}
- if timeout is not None:
- worker.timeout = timeout
- expected_kwargs["timeout"] = mock.ANY
+def _make_worker(*args, **kwargs):
+ from google.cloud.bigtable.table import _RetryableMutateRowsWorker
- if retryable_error or raising_retry:
- with self.assertRaises(_BigtableRetryableError):
- worker._do_mutate_retryable_rows()
- statuses = worker.responses_statuses
- else:
- statuses = worker._do_mutate_retryable_rows()
+ return _RetryableMutateRowsWorker(*args, **kwargs)
- if not retryable_error:
- result = [status.code for status in statuses]
- if expected_result is None:
- expected_result = responses
+def _make_responses_statuses(codes):
+ from google.rpc.status_pb2 import Status
- self.assertEqual(result, expected_result)
+ response = [Status(code=code) for code in codes]
+ return response
- if len(responses) == 0 and not retryable_error:
- data_api.mutate_rows.assert_not_called()
- else:
- data_api.mutate_rows.assert_called_once_with(
- table_name=table.name,
- entries=expected_entries,
- app_profile_id=None,
- retry=None,
- **expected_kwargs,
+
+def _make_responses(codes):
+ from google.cloud.bigtable_v2.types.bigtable import MutateRowsResponse
+ from google.rpc.status_pb2 import Status
+
+ entries = [
+ MutateRowsResponse.Entry(index=i, status=Status(code=codes[i]))
+ for i in range(len(codes))
+ ]
+ return MutateRowsResponse(entries=entries)
+
+
+def test_rmrw_callable_empty_rows():
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
+ data_api = client._table_data_client = _make_data_api()
+ data_api.mutate_rows.return_value = []
+ data_api.table_path.return_value = (
+ f"projects/{PROJECT_ID}/instances/{INSTANCE_ID}/tables/{TABLE_ID}"
+ )
+
+ worker = _make_worker(client, table.name, [])
+ statuses = worker()
+
+ assert len(statuses) == 0
+
+
+def test_rmrw_callable_no_retry_strategy():
+ from google.cloud.bigtable.row import DirectRow
+
+ # Setup:
+ # - Mutate 3 rows.
+ # Action:
+ # - Attempt to mutate the rows w/o any retry strategy.
+ # Expectation:
+ # - Since no retry, should return statuses as they come back.
+ # - Even if there are retryable errors, no retry attempt is made.
+ # - State of responses_statuses should be
+ # [success, retryable, non-retryable]
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
+
+ row_1 = DirectRow(row_key=b"row_key", table=table)
+ row_1.set_cell("cf", b"col", b"value1")
+ row_2 = DirectRow(row_key=b"row_key_2", table=table)
+ row_2.set_cell("cf", b"col", b"value2")
+ row_3 = DirectRow(row_key=b"row_key_3", table=table)
+ row_3.set_cell("cf", b"col", b"value3")
+
+ response_codes = [SUCCESS, RETRYABLE_1, NON_RETRYABLE]
+ response = _make_responses(response_codes)
+
+ data_api = client._table_data_client = _make_data_api()
+ data_api.mutate_rows.return_value = [response]
+ data_api.table_path.return_value = (
+ f"projects/{PROJECT_ID}/instances/{INSTANCE_ID}/tables/{TABLE_ID}"
+ )
+ worker = _make_worker(client, table.name, [row_1, row_2, row_3])
+
+ statuses = worker(retry=None)
+
+ result = [status.code for status in statuses]
+ assert result == response_codes
+
+ data_api.mutate_rows.assert_called_once()
+
+
+def test_rmrw_callable_retry():
+ from google.cloud.bigtable.row import DirectRow
+ from google.cloud.bigtable.table import DEFAULT_RETRY
+
+ # Setup:
+ # - Mutate 3 rows.
+ # Action:
+ # - Initial attempt will mutate all 3 rows.
+ # Expectation:
+ # - First attempt will result in one retryable error.
+ # - Second attempt will result in success for the retry-ed row.
+ # - Check MutateRows is called twice.
+ # - State of responses_statuses should be
+ # [success, success, non-retryable]
+
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
+ row_1 = DirectRow(row_key=b"row_key", table=table)
+ row_1.set_cell("cf", b"col", b"value1")
+ row_2 = DirectRow(row_key=b"row_key_2", table=table)
+ row_2.set_cell("cf", b"col", b"value2")
+ row_3 = DirectRow(row_key=b"row_key_3", table=table)
+ row_3.set_cell("cf", b"col", b"value3")
+
+ response_1 = _make_responses([SUCCESS, RETRYABLE_1, NON_RETRYABLE])
+ response_2 = _make_responses([SUCCESS])
+ data_api = client._table_data_client = _make_data_api()
+ data_api.mutate_rows.side_effect = [[response_1], [response_2]]
+ data_api.table_path.return_value = (
+ f"projects/{PROJECT_ID}/instances/{INSTANCE_ID}/tables/{TABLE_ID}"
+ )
+ worker = _make_worker(client, table.name, [row_1, row_2, row_3])
+ retry = DEFAULT_RETRY.with_delay(initial=0.1)
+
+ statuses = worker(retry=retry)
+
+ result = [status.code for status in statuses]
+
+ assert result == [SUCCESS, SUCCESS, NON_RETRYABLE]
+
+ assert client._table_data_client.mutate_rows.call_count == 2
+
+
+def _do_mutate_retryable_rows_helper(
+ row_cells,
+ responses,
+ prior_statuses=None,
+ expected_result=None,
+ raising_retry=False,
+ retryable_error=False,
+ timeout=None,
+):
+ from google.api_core.exceptions import ServiceUnavailable
+ from google.cloud.bigtable.row import DirectRow
+ from google.cloud.bigtable.table import _BigtableRetryableError
+ from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2
+
+ # Setup:
+ # - Mutate 2 rows.
+ # Action:
+ # - Initial attempt will mutate all 2 rows.
+ # Expectation:
+ # - Expect [success, non-retryable]
+
+ credentials = _make_credentials()
+ client = _make_client(project="project-id", credentials=credentials, admin=True)
+ instance = client.instance(instance_id=INSTANCE_ID)
+ table = _make_table(TABLE_ID, instance)
+
+ rows = []
+ for row_key, cell_data in row_cells:
+ row = DirectRow(row_key=row_key, table=table)
+ row.set_cell(*cell_data)
+ rows.append(row)
+
+ response = _make_responses(responses)
+
+ data_api = client._table_data_client = _make_data_api()
+ if retryable_error:
+ data_api.mutate_rows.side_effect = ServiceUnavailable("testing")
+ else:
+ data_api.mutate_rows.return_value = [response]
+
+ worker = _make_worker(client, table.name, rows=rows)
+
+ if prior_statuses is not None:
+ assert len(prior_statuses) == len(rows)
+ worker.responses_statuses = _make_responses_statuses(prior_statuses)
+
+ expected_entries = []
+ for row, prior_status in zip(rows, worker.responses_statuses):
+
+ if prior_status is None or prior_status.code in RETRYABLES:
+ mutations = row._get_mutations().copy() # row clears on success
+ entry = data_messages_v2_pb2.MutateRowsRequest.Entry(
+ row_key=row.row_key, mutations=mutations,
)
- if timeout is not None:
- called = data_api.mutate_rows.mock_calls[0]
- self.assertEqual(called.kwargs["timeout"]._deadline, timeout)
-
- def test_do_mutate_retryable_rows_empty_rows(self):
- #
- # Setup:
- # - No mutated rows.
- # Action:
- # - No API call made.
- # Expectation:
- # - No change.
- #
- row_cells = []
- responses = []
-
- self._do_mutate_retryable_rows_helper(row_cells, responses)
-
- def test_do_mutate_retryable_rows_w_timeout(self):
- #
- # Setup:
- # - Mutate 2 rows.
- # Action:
- # - Initial attempt will mutate all 2 rows.
- # Expectation:
- # - No retryable error codes, so don't expect a raise.
- # - State of responses_statuses should be [success, non-retryable].
- #
- row_cells = [
- (b"row_key_1", ("cf", b"col", b"value1")),
- (b"row_key_2", ("cf", b"col", b"value2")),
- ]
+ expected_entries.append(entry)
- responses = [self.SUCCESS, self.NON_RETRYABLE]
+ expected_kwargs = {}
+ if timeout is not None:
+ worker.timeout = timeout
+ expected_kwargs["timeout"] = mock.ANY
- timeout = 5 # seconds
+ if retryable_error or raising_retry:
+ with pytest.raises(_BigtableRetryableError):
+ worker._do_mutate_retryable_rows()
+ statuses = worker.responses_statuses
+ else:
+ statuses = worker._do_mutate_retryable_rows()
- self._do_mutate_retryable_rows_helper(
- row_cells, responses, timeout=timeout,
- )
+ if not retryable_error:
+ result = [status.code for status in statuses]
- def test_do_mutate_retryable_rows_w_retryable_error(self):
- #
- # Setup:
- # - Mutate 2 rows.
- # Action:
- # - Initial attempt will mutate all 2 rows.
- # Expectation:
- # - No retryable error codes, so don't expect a raise.
- # - State of responses_statuses should be [success, non-retryable].
- #
- row_cells = [
- (b"row_key_1", ("cf", b"col", b"value1")),
- (b"row_key_2", ("cf", b"col", b"value2")),
- ]
+ if expected_result is None:
+ expected_result = responses
- responses = ()
+ assert result == expected_result
- self._do_mutate_retryable_rows_helper(
- row_cells, responses, retryable_error=True,
+ if len(responses) == 0 and not retryable_error:
+ data_api.mutate_rows.assert_not_called()
+ else:
+ data_api.mutate_rows.assert_called_once_with(
+ table_name=table.name,
+ entries=expected_entries,
+ app_profile_id=None,
+ retry=None,
+ **expected_kwargs,
)
+ if timeout is not None:
+ called = data_api.mutate_rows.mock_calls[0]
+ assert called.kwargs["timeout"]._deadline == timeout
+
+
+def test_rmrw_do_mutate_retryable_rows_empty_rows():
+ #
+ # Setup:
+ # - No mutated rows.
+ # Action:
+ # - No API call made.
+ # Expectation:
+ # - No change.
+ #
+ row_cells = []
+ responses = []
+
+ _do_mutate_retryable_rows_helper(row_cells, responses)
+
+
+def test_rmrw_do_mutate_retryable_rows_w_timeout():
+ #
+ # Setup:
+ # - Mutate 2 rows.
+ # Action:
+ # - Initial attempt will mutate all 2 rows.
+ # Expectation:
+ # - No retryable error codes, so don't expect a raise.
+ # - State of responses_statuses should be [success, non-retryable].
+ #
+ row_cells = [
+ (b"row_key_1", ("cf", b"col", b"value1")),
+ (b"row_key_2", ("cf", b"col", b"value2")),
+ ]
+
+ responses = [SUCCESS, NON_RETRYABLE]
+
+ timeout = 5 # seconds
+
+ _do_mutate_retryable_rows_helper(
+ row_cells, responses, timeout=timeout,
+ )
- def test_do_mutate_retryable_rows_retry(self):
- #
- # Setup:
- # - Mutate 3 rows.
- # Action:
- # - Initial attempt will mutate all 3 rows.
- # Expectation:
- # - Second row returns retryable error code, so expect a raise.
- # - State of responses_statuses should be
- # [success, retryable, non-retryable]
- #
- row_cells = [
- (b"row_key_1", ("cf", b"col", b"value1")),
- (b"row_key_2", ("cf", b"col", b"value2")),
- (b"row_key_3", ("cf", b"col", b"value3")),
- ]
- responses = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
+def test_rmrw_do_mutate_retryable_rows_w_retryable_error():
+ #
+ # Setup:
+ # - Mutate 2 rows.
+ # Action:
+ # - Initial attempt will mutate all 2 rows.
+ # Expectation:
+ # - No retryable error codes, so don't expect a raise.
+ # - State of responses_statuses should be [success, non-retryable].
+ #
+ row_cells = [
+ (b"row_key_1", ("cf", b"col", b"value1")),
+ (b"row_key_2", ("cf", b"col", b"value2")),
+ ]
+
+ responses = ()
+
+ _do_mutate_retryable_rows_helper(
+ row_cells, responses, retryable_error=True,
+ )
- self._do_mutate_retryable_rows_helper(
- row_cells, responses, raising_retry=True,
- )
- def test_do_mutate_retryable_rows_second_retry(self):
- #
- # Setup:
- # - Mutate 4 rows.
- # - First try results:
- # [success, retryable, non-retryable, retryable]
- # Action:
- # - Second try should re-attempt the 'retryable' rows.
- # Expectation:
- # - After second try:
- # [success, success, non-retryable, retryable]
- # - One of the rows tried second time returns retryable error code,
- # so expect a raise.
- # - Exception contains response whose index should be '3' even though
- # only two rows were retried.
- #
- row_cells = [
- (b"row_key_1", ("cf", b"col", b"value1")),
- (b"row_key_2", ("cf", b"col", b"value2")),
- (b"row_key_3", ("cf", b"col", b"value3")),
- (b"row_key_4", ("cf", b"col", b"value4")),
- ]
+def test_rmrw_do_mutate_retryable_rows_retry():
+ #
+ # Setup:
+ # - Mutate 3 rows.
+ # Action:
+ # - Initial attempt will mutate all 3 rows.
+ # Expectation:
+ # - Second row returns retryable error code, so expect a raise.
+ # - State of responses_statuses should be
+ # [success, retryable, non-retryable]
+ #
+ row_cells = [
+ (b"row_key_1", ("cf", b"col", b"value1")),
+ (b"row_key_2", ("cf", b"col", b"value2")),
+ (b"row_key_3", ("cf", b"col", b"value3")),
+ ]
+
+ responses = [SUCCESS, RETRYABLE_1, NON_RETRYABLE]
+
+ _do_mutate_retryable_rows_helper(
+ row_cells, responses, raising_retry=True,
+ )
- responses = [self.SUCCESS, self.RETRYABLE_1]
- prior_statuses = [
- self.SUCCESS,
- self.RETRYABLE_1,
- self.NON_RETRYABLE,
- self.RETRYABLE_2,
- ]
+def test_rmrw_do_mutate_retryable_rows_second_retry():
+ #
+ # Setup:
+ # - Mutate 4 rows.
+ # - First try results:
+ # [success, retryable, non-retryable, retryable]
+ # Action:
+ # - Second try should re-attempt the 'retryable' rows.
+ # Expectation:
+ # - After second try:
+ # [success, success, non-retryable, retryable]
+ # - One of the rows tried second time returns retryable error code,
+ # so expect a raise.
+ # - Exception contains response whose index should be '3' even though
+ # only two rows were retried.
+ #
+ row_cells = [
+ (b"row_key_1", ("cf", b"col", b"value1")),
+ (b"row_key_2", ("cf", b"col", b"value2")),
+ (b"row_key_3", ("cf", b"col", b"value3")),
+ (b"row_key_4", ("cf", b"col", b"value4")),
+ ]
+
+ responses = [SUCCESS, RETRYABLE_1]
+
+ prior_statuses = [
+ SUCCESS,
+ RETRYABLE_1,
+ NON_RETRYABLE,
+ RETRYABLE_2,
+ ]
+
+ expected_result = [
+ SUCCESS,
+ SUCCESS,
+ NON_RETRYABLE,
+ RETRYABLE_1,
+ ]
+
+ _do_mutate_retryable_rows_helper(
+ row_cells,
+ responses,
+ prior_statuses=prior_statuses,
+ expected_result=expected_result,
+ raising_retry=True,
+ )
- expected_result = [
- self.SUCCESS,
- self.SUCCESS,
- self.NON_RETRYABLE,
- self.RETRYABLE_1,
- ]
- self._do_mutate_retryable_rows_helper(
- row_cells,
- responses,
- prior_statuses=prior_statuses,
- expected_result=expected_result,
- raising_retry=True,
- )
+def test_rmrw_do_mutate_retryable_rows_second_try():
+ #
+ # Setup:
+ # - Mutate 4 rows.
+ # - First try results:
+ # [success, retryable, non-retryable, retryable]
+ # Action:
+ # - Second try should re-attempt the 'retryable' rows.
+ # Expectation:
+ # - After second try:
+ # [success, non-retryable, non-retryable, success]
+ #
+ row_cells = [
+ (b"row_key_1", ("cf", b"col", b"value1")),
+ (b"row_key_2", ("cf", b"col", b"value2")),
+ (b"row_key_3", ("cf", b"col", b"value3")),
+ (b"row_key_4", ("cf", b"col", b"value4")),
+ ]
+
+ responses = [NON_RETRYABLE, SUCCESS]
+
+ prior_statuses = [
+ SUCCESS,
+ RETRYABLE_1,
+ NON_RETRYABLE,
+ RETRYABLE_2,
+ ]
+
+ expected_result = [
+ SUCCESS,
+ NON_RETRYABLE,
+ NON_RETRYABLE,
+ SUCCESS,
+ ]
+
+ _do_mutate_retryable_rows_helper(
+ row_cells,
+ responses,
+ prior_statuses=prior_statuses,
+ expected_result=expected_result,
+ )
- def test_do_mutate_retryable_rows_second_try(self):
- #
- # Setup:
- # - Mutate 4 rows.
- # - First try results:
- # [success, retryable, non-retryable, retryable]
- # Action:
- # - Second try should re-attempt the 'retryable' rows.
- # Expectation:
- # - After second try:
- # [success, non-retryable, non-retryable, success]
- #
- row_cells = [
- (b"row_key_1", ("cf", b"col", b"value1")),
- (b"row_key_2", ("cf", b"col", b"value2")),
- (b"row_key_3", ("cf", b"col", b"value3")),
- (b"row_key_4", ("cf", b"col", b"value4")),
- ]
- responses = [self.NON_RETRYABLE, self.SUCCESS]
+def test_rmrw_do_mutate_retryable_rows_second_try_no_retryable():
+ #
+ # Setup:
+ # - Mutate 2 rows.
+ # - First try results: [success, non-retryable]
+ # Action:
+ # - Second try has no row to retry.
+ # Expectation:
+ # - After second try: [success, non-retryable]
+ #
+ row_cells = [
+ (b"row_key_1", ("cf", b"col", b"value1")),
+ (b"row_key_2", ("cf", b"col", b"value2")),
+ ]
+
+ responses = [] # no calls will be made
+
+ prior_statuses = [
+ SUCCESS,
+ NON_RETRYABLE,
+ ]
+
+ expected_result = [
+ SUCCESS,
+ NON_RETRYABLE,
+ ]
+
+ _do_mutate_retryable_rows_helper(
+ row_cells,
+ responses,
+ prior_statuses=prior_statuses,
+ expected_result=expected_result,
+ )
- prior_statuses = [
- self.SUCCESS,
- self.RETRYABLE_1,
- self.NON_RETRYABLE,
- self.RETRYABLE_2,
- ]
- expected_result = [
- self.SUCCESS,
- self.NON_RETRYABLE,
- self.NON_RETRYABLE,
- self.SUCCESS,
- ]
+def test_rmrw_do_mutate_retryable_rows_mismatch_num_responses():
+ row_cells = [
+ (b"row_key_1", ("cf", b"col", b"value1")),
+ (b"row_key_2", ("cf", b"col", b"value2")),
+ ]
- self._do_mutate_retryable_rows_helper(
- row_cells,
- responses,
- prior_statuses=prior_statuses,
- expected_result=expected_result,
- )
+ responses = [SUCCESS]
- def test_do_mutate_retryable_rows_second_try_no_retryable(self):
- #
- # Setup:
- # - Mutate 2 rows.
- # - First try results: [success, non-retryable]
- # Action:
- # - Second try has no row to retry.
- # Expectation:
- # - After second try: [success, non-retryable]
- #
- row_cells = [
- (b"row_key_1", ("cf", b"col", b"value1")),
- (b"row_key_2", ("cf", b"col", b"value2")),
- ]
+ with pytest.raises(RuntimeError):
+ _do_mutate_retryable_rows_helper(row_cells, responses)
- responses = [] # no calls will be made
- prior_statuses = [
- self.SUCCESS,
- self.NON_RETRYABLE,
- ]
+def test__create_row_request_table_name_only():
+ from google.cloud.bigtable.table import _create_row_request
- expected_result = [
- self.SUCCESS,
- self.NON_RETRYABLE,
- ]
+ table_name = "table_name"
+ result = _create_row_request(table_name)
+ expected_result = _ReadRowsRequestPB(table_name=table_name)
+ assert result == expected_result
- self._do_mutate_retryable_rows_helper(
- row_cells,
- responses,
- prior_statuses=prior_statuses,
- expected_result=expected_result,
- )
- def test_do_mutate_retryable_rows_mismatch_num_responses(self):
- row_cells = [
- (b"row_key_1", ("cf", b"col", b"value1")),
- (b"row_key_2", ("cf", b"col", b"value2")),
- ]
+def test__create_row_request_row_range_row_set_conflict():
+ from google.cloud.bigtable.table import _create_row_request
- responses = [self.SUCCESS]
+ with pytest.raises(ValueError):
+ _create_row_request(None, end_key=object(), row_set=object())
- with self.assertRaises(RuntimeError):
- self._do_mutate_retryable_rows_helper(row_cells, responses)
+def test__create_row_request_row_range_start_key():
+ from google.cloud.bigtable.table import _create_row_request
+ from google.cloud.bigtable_v2.types import RowRange
-class Test__create_row_request(unittest.TestCase):
- def _call_fut(
- self,
- table_name,
- start_key=None,
- end_key=None,
- filter_=None,
- limit=None,
- end_inclusive=False,
- app_profile_id=None,
- row_set=None,
- ):
+ table_name = "table_name"
+ start_key = b"start_key"
+ result = _create_row_request(table_name, start_key=start_key)
+ expected_result = _ReadRowsRequestPB(table_name=table_name)
+ row_range = RowRange(start_key_closed=start_key)
+ expected_result.rows.row_ranges.append(row_range)
+ assert result == expected_result
- from google.cloud.bigtable.table import _create_row_request
- return _create_row_request(
- table_name,
- start_key=start_key,
- end_key=end_key,
- filter_=filter_,
- limit=limit,
- end_inclusive=end_inclusive,
- app_profile_id=app_profile_id,
- row_set=row_set,
- )
+def test__create_row_request_row_range_end_key():
+ from google.cloud.bigtable.table import _create_row_request
+ from google.cloud.bigtable_v2.types import RowRange
- def test_table_name_only(self):
- table_name = "table_name"
- result = self._call_fut(table_name)
- expected_result = _ReadRowsRequestPB(table_name=table_name)
- self.assertEqual(result, expected_result)
-
- def test_row_range_row_set_conflict(self):
- with self.assertRaises(ValueError):
- self._call_fut(None, end_key=object(), row_set=object())
-
- def test_row_range_start_key(self):
- from google.cloud.bigtable_v2.types import RowRange
-
- table_name = "table_name"
- start_key = b"start_key"
- result = self._call_fut(table_name, start_key=start_key)
- expected_result = _ReadRowsRequestPB(table_name=table_name)
- row_range = RowRange(start_key_closed=start_key)
- expected_result.rows.row_ranges.append(row_range)
- self.assertEqual(result, expected_result)
-
- def test_row_range_end_key(self):
- from google.cloud.bigtable_v2.types import RowRange
-
- table_name = "table_name"
- end_key = b"end_key"
- result = self._call_fut(table_name, end_key=end_key)
- expected_result = _ReadRowsRequestPB(table_name=table_name)
- row_range = RowRange(end_key_open=end_key)
- expected_result.rows.row_ranges.append(row_range)
- self.assertEqual(result, expected_result)
-
- def test_row_range_both_keys(self):
- from google.cloud.bigtable_v2.types import RowRange
-
- table_name = "table_name"
- start_key = b"start_key"
- end_key = b"end_key"
- result = self._call_fut(table_name, start_key=start_key, end_key=end_key)
- row_range = RowRange(start_key_closed=start_key, end_key_open=end_key)
- expected_result = _ReadRowsRequestPB(table_name=table_name)
- expected_result.rows.row_ranges.append(row_range)
- self.assertEqual(result, expected_result)
-
- def test_row_range_both_keys_inclusive(self):
- from google.cloud.bigtable_v2.types import RowRange
-
- table_name = "table_name"
- start_key = b"start_key"
- end_key = b"end_key"
- result = self._call_fut(
- table_name, start_key=start_key, end_key=end_key, end_inclusive=True
- )
- expected_result = _ReadRowsRequestPB(table_name=table_name)
- row_range = RowRange(start_key_closed=start_key, end_key_closed=end_key)
- expected_result.rows.row_ranges.append(row_range)
- self.assertEqual(result, expected_result)
-
- def test_with_filter(self):
- from google.cloud.bigtable.row_filters import RowSampleFilter
-
- table_name = "table_name"
- row_filter = RowSampleFilter(0.33)
- result = self._call_fut(table_name, filter_=row_filter)
- expected_result = _ReadRowsRequestPB(
- table_name=table_name, filter=row_filter.to_pb()
- )
- self.assertEqual(result, expected_result)
-
- def test_with_limit(self):
- table_name = "table_name"
- limit = 1337
- result = self._call_fut(table_name, limit=limit)
- expected_result = _ReadRowsRequestPB(table_name=table_name, rows_limit=limit)
- self.assertEqual(result, expected_result)
-
- def test_with_row_set(self):
- from google.cloud.bigtable.row_set import RowSet
-
- table_name = "table_name"
- row_set = RowSet()
- result = self._call_fut(table_name, row_set=row_set)
- expected_result = _ReadRowsRequestPB(table_name=table_name)
- self.assertEqual(result, expected_result)
-
- def test_with_app_profile_id(self):
- table_name = "table_name"
- limit = 1337
- app_profile_id = "app-profile-id"
- result = self._call_fut(table_name, limit=limit, app_profile_id=app_profile_id)
- expected_result = _ReadRowsRequestPB(
- table_name=table_name, rows_limit=limit, app_profile_id=app_profile_id
- )
- self.assertEqual(result, expected_result)
+ table_name = "table_name"
+ end_key = b"end_key"
+ result = _create_row_request(table_name, end_key=end_key)
+ expected_result = _ReadRowsRequestPB(table_name=table_name)
+ row_range = RowRange(end_key_open=end_key)
+ expected_result.rows.row_ranges.append(row_range)
+ assert result == expected_result
+
+
+def test__create_row_request_row_range_both_keys():
+ from google.cloud.bigtable.table import _create_row_request
+ from google.cloud.bigtable_v2.types import RowRange
+
+ table_name = "table_name"
+ start_key = b"start_key"
+ end_key = b"end_key"
+ result = _create_row_request(table_name, start_key=start_key, end_key=end_key)
+ row_range = RowRange(start_key_closed=start_key, end_key_open=end_key)
+ expected_result = _ReadRowsRequestPB(table_name=table_name)
+ expected_result.rows.row_ranges.append(row_range)
+ assert result == expected_result
+
+
+def test__create_row_request_row_range_both_keys_inclusive():
+ from google.cloud.bigtable.table import _create_row_request
+ from google.cloud.bigtable_v2.types import RowRange
+
+ table_name = "table_name"
+ start_key = b"start_key"
+ end_key = b"end_key"
+ result = _create_row_request(
+ table_name, start_key=start_key, end_key=end_key, end_inclusive=True
+ )
+ expected_result = _ReadRowsRequestPB(table_name=table_name)
+ row_range = RowRange(start_key_closed=start_key, end_key_closed=end_key)
+ expected_result.rows.row_ranges.append(row_range)
+ assert result == expected_result
+
+
+def test__create_row_request_with_filter():
+ from google.cloud.bigtable.table import _create_row_request
+ from google.cloud.bigtable.row_filters import RowSampleFilter
+
+ table_name = "table_name"
+ row_filter = RowSampleFilter(0.33)
+ result = _create_row_request(table_name, filter_=row_filter)
+ expected_result = _ReadRowsRequestPB(
+ table_name=table_name, filter=row_filter.to_pb()
+ )
+ assert result == expected_result
+
+
+def test__create_row_request_with_limit():
+ from google.cloud.bigtable.table import _create_row_request
+
+ table_name = "table_name"
+ limit = 1337
+ result = _create_row_request(table_name, limit=limit)
+ expected_result = _ReadRowsRequestPB(table_name=table_name, rows_limit=limit)
+ assert result == expected_result
+
+
+def test__create_row_request_with_row_set():
+ from google.cloud.bigtable.table import _create_row_request
+ from google.cloud.bigtable.row_set import RowSet
+
+ table_name = "table_name"
+ row_set = RowSet()
+ result = _create_row_request(table_name, row_set=row_set)
+ expected_result = _ReadRowsRequestPB(table_name=table_name)
+ assert result == expected_result
+
+
+def test__create_row_request_with_app_profile_id():
+ from google.cloud.bigtable.table import _create_row_request
+
+ table_name = "table_name"
+ limit = 1337
+ app_profile_id = "app-profile-id"
+ result = _create_row_request(table_name, limit=limit, app_profile_id=app_profile_id)
+ expected_result = _ReadRowsRequestPB(
+ table_name=table_name, rows_limit=limit, app_profile_id=app_profile_id
+ )
+ assert result == expected_result
def _ReadRowsRequestPB(*args, **kw):
@@ -2169,90 +2011,83 @@ def _ReadRowsRequestPB(*args, **kw):
return messages_v2_pb2.ReadRowsRequest(*args, **kw)
-class Test_ClusterState(unittest.TestCase):
- def test___eq__(self):
- from google.cloud.bigtable.enums import Table as enum_table
- from google.cloud.bigtable.table import ClusterState
-
- READY = enum_table.ReplicationState.READY
- state1 = ClusterState(READY)
- state2 = ClusterState(READY)
- self.assertEqual(state1, state2)
-
- def test___eq__type_differ(self):
- from google.cloud.bigtable.enums import Table as enum_table
- from google.cloud.bigtable.table import ClusterState
-
- READY = enum_table.ReplicationState.READY
- state1 = ClusterState(READY)
- state2 = object()
- self.assertNotEqual(state1, state2)
-
- def test___ne__same_value(self):
- from google.cloud.bigtable.enums import Table as enum_table
- from google.cloud.bigtable.table import ClusterState
-
- READY = enum_table.ReplicationState.READY
- state1 = ClusterState(READY)
- state2 = ClusterState(READY)
- comparison_val = state1 != state2
- self.assertFalse(comparison_val)
-
- def test___ne__(self):
- from google.cloud.bigtable.enums import Table as enum_table
- from google.cloud.bigtable.table import ClusterState
-
- READY = enum_table.ReplicationState.READY
- INITIALIZING = enum_table.ReplicationState.INITIALIZING
- state1 = ClusterState(READY)
- state2 = ClusterState(INITIALIZING)
- self.assertNotEqual(state1, state2)
-
- def test__repr__(self):
- from google.cloud.bigtable.enums import Table as enum_table
- from google.cloud.bigtable.table import ClusterState
-
- STATE_NOT_KNOWN = enum_table.ReplicationState.STATE_NOT_KNOWN
- INITIALIZING = enum_table.ReplicationState.INITIALIZING
- PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE
- UNPLANNED_MAINTENANCE = enum_table.ReplicationState.UNPLANNED_MAINTENANCE
- READY = enum_table.ReplicationState.READY
-
- replication_dict = {
- STATE_NOT_KNOWN: "STATE_NOT_KNOWN",
- INITIALIZING: "INITIALIZING",
- PLANNED_MAINTENANCE: "PLANNED_MAINTENANCE",
- UNPLANNED_MAINTENANCE: "UNPLANNED_MAINTENANCE",
- READY: "READY",
- }
+def test_cluster_state___eq__():
+ from google.cloud.bigtable.enums import Table as enum_table
+ from google.cloud.bigtable.table import ClusterState
- self.assertEqual(
- str(ClusterState(STATE_NOT_KNOWN)), replication_dict[STATE_NOT_KNOWN]
- )
- self.assertEqual(
- str(ClusterState(INITIALIZING)), replication_dict[INITIALIZING]
- )
- self.assertEqual(
- str(ClusterState(PLANNED_MAINTENANCE)),
- replication_dict[PLANNED_MAINTENANCE],
- )
- self.assertEqual(
- str(ClusterState(UNPLANNED_MAINTENANCE)),
- replication_dict[UNPLANNED_MAINTENANCE],
- )
- self.assertEqual(str(ClusterState(READY)), replication_dict[READY])
+ READY = enum_table.ReplicationState.READY
+ state1 = ClusterState(READY)
+ state2 = ClusterState(READY)
+ assert state1 == state2
- self.assertEqual(
- ClusterState(STATE_NOT_KNOWN).replication_state, STATE_NOT_KNOWN
- )
- self.assertEqual(ClusterState(INITIALIZING).replication_state, INITIALIZING)
- self.assertEqual(
- ClusterState(PLANNED_MAINTENANCE).replication_state, PLANNED_MAINTENANCE
- )
- self.assertEqual(
- ClusterState(UNPLANNED_MAINTENANCE).replication_state, UNPLANNED_MAINTENANCE
- )
- self.assertEqual(ClusterState(READY).replication_state, READY)
+
+def test_cluster_state___eq__type_differ():
+ from google.cloud.bigtable.enums import Table as enum_table
+ from google.cloud.bigtable.table import ClusterState
+
+ READY = enum_table.ReplicationState.READY
+ state1 = ClusterState(READY)
+ state2 = object()
+ assert not (state1 == state2)
+
+
+def test_cluster_state___ne__same_value():
+ from google.cloud.bigtable.enums import Table as enum_table
+ from google.cloud.bigtable.table import ClusterState
+
+ READY = enum_table.ReplicationState.READY
+ state1 = ClusterState(READY)
+ state2 = ClusterState(READY)
+ assert not (state1 != state2)
+
+
+def test_cluster_state___ne__():
+ from google.cloud.bigtable.enums import Table as enum_table
+ from google.cloud.bigtable.table import ClusterState
+
+ READY = enum_table.ReplicationState.READY
+ INITIALIZING = enum_table.ReplicationState.INITIALIZING
+ state1 = ClusterState(READY)
+ state2 = ClusterState(INITIALIZING)
+ assert state1 != state2
+
+
+def test_cluster_state__repr__():
+ from google.cloud.bigtable.enums import Table as enum_table
+ from google.cloud.bigtable.table import ClusterState
+
+ STATE_NOT_KNOWN = enum_table.ReplicationState.STATE_NOT_KNOWN
+ INITIALIZING = enum_table.ReplicationState.INITIALIZING
+ PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE
+ UNPLANNED_MAINTENANCE = enum_table.ReplicationState.UNPLANNED_MAINTENANCE
+ READY = enum_table.ReplicationState.READY
+
+ replication_dict = {
+ STATE_NOT_KNOWN: "STATE_NOT_KNOWN",
+ INITIALIZING: "INITIALIZING",
+ PLANNED_MAINTENANCE: "PLANNED_MAINTENANCE",
+ UNPLANNED_MAINTENANCE: "UNPLANNED_MAINTENANCE",
+ READY: "READY",
+ }
+
+ assert str(ClusterState(STATE_NOT_KNOWN)) == replication_dict[STATE_NOT_KNOWN]
+ assert str(ClusterState(INITIALIZING)) == replication_dict[INITIALIZING]
+ assert (
+ str(ClusterState(PLANNED_MAINTENANCE)) == replication_dict[PLANNED_MAINTENANCE]
+ )
+ assert (
+ str(ClusterState(UNPLANNED_MAINTENANCE))
+ == replication_dict[UNPLANNED_MAINTENANCE]
+ )
+ assert str(ClusterState(READY)) == replication_dict[READY]
+
+ assert ClusterState(STATE_NOT_KNOWN).replication_state == STATE_NOT_KNOWN
+ assert ClusterState(INITIALIZING).replication_state == INITIALIZING
+ assert ClusterState(PLANNED_MAINTENANCE).replication_state == PLANNED_MAINTENANCE
+ assert (
+ ClusterState(UNPLANNED_MAINTENANCE).replication_state == UNPLANNED_MAINTENANCE
+ )
+ assert ClusterState(READY).replication_state == READY
def _ReadRowsResponseCellChunkPB(*args, **kw):